modelfusion 0.6.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +27 -16
- package/composed-function/index.cjs +0 -3
- package/composed-function/index.d.ts +0 -3
- package/composed-function/index.js +0 -3
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
- package/index.cjs +1 -0
- package/index.d.ts +1 -0
- package/index.js +1 -0
- package/model-function/Model.d.ts +2 -2
- package/model-function/ModelCallEvent.d.ts +4 -6
- package/model-function/SuccessfulModelCall.cjs +6 -3
- package/model-function/SuccessfulModelCall.d.ts +3 -3
- package/model-function/SuccessfulModelCall.js +6 -3
- package/model-function/embed-text/embedText.cjs +16 -30
- package/model-function/embed-text/embedText.d.ts +14 -4
- package/model-function/embed-text/embedText.js +16 -30
- package/model-function/executeCall.cjs +6 -6
- package/model-function/executeCall.js +6 -6
- package/model-function/generate-image/generateImage.cjs +7 -20
- package/model-function/generate-image/generateImage.d.ts +7 -2
- package/model-function/generate-image/generateImage.js +7 -20
- package/model-function/generate-json/JsonGenerationEvent.d.ts +2 -2
- package/model-function/generate-json/generateJson.cjs +7 -5
- package/model-function/generate-json/generateJson.d.ts +6 -1
- package/model-function/generate-json/generateJson.js +7 -5
- package/model-function/generate-json/generateJsonOrText.cjs +11 -9
- package/model-function/generate-json/generateJsonOrText.d.ts +10 -1
- package/model-function/generate-json/generateJsonOrText.js +11 -9
- package/model-function/generate-text/generateText.cjs +7 -17
- package/model-function/generate-text/generateText.d.ts +7 -2
- package/model-function/generate-text/generateText.js +7 -17
- package/model-function/generate-text/streamText.cjs +13 -11
- package/model-function/generate-text/streamText.d.ts +9 -1
- package/model-function/generate-text/streamText.js +13 -11
- package/model-function/index.cjs +1 -1
- package/model-function/index.d.ts +1 -1
- package/model-function/index.js +1 -1
- package/model-function/transcribe-audio/transcribe.cjs +7 -19
- package/model-function/transcribe-audio/transcribe.d.ts +7 -2
- package/model-function/transcribe-audio/transcribe.js +7 -19
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.cjs +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.cjs +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +1 -1
- package/package.json +1 -1
- package/run/ConsoleLogger.cjs +2 -2
- package/run/ConsoleLogger.d.ts +5 -5
- package/run/ConsoleLogger.js +2 -2
- package/run/DefaultRun.cjs +7 -7
- package/run/DefaultRun.d.ts +6 -6
- package/run/DefaultRun.js +7 -7
- package/run/Run.d.ts +2 -2
- package/run/RunFunction.d.ts +0 -4
- package/run/RunFunctionEvent.d.ts +12 -0
- package/{model-function/ModelCallEventSource.cjs → run/RunFunctionEventSource.cjs} +7 -7
- package/run/RunFunctionEventSource.d.ts +13 -0
- package/{model-function/ModelCallEventSource.js → run/RunFunctionEventSource.js} +5 -5
- package/run/RunFunctionObserver.cjs +2 -0
- package/run/RunFunctionObserver.d.ts +5 -0
- package/run/RunFunctionObserver.js +1 -0
- package/run/index.cjs +3 -0
- package/run/index.d.ts +3 -0
- package/run/index.js +3 -0
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.cjs +1 -1
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.js +1 -1
- package/text-chunk/upsertTextChunks.cjs +1 -1
- package/text-chunk/upsertTextChunks.js +1 -1
- package/tool/ExecuteToolEvent.cjs +2 -0
- package/tool/ExecuteToolEvent.d.ts +22 -0
- package/tool/ExecuteToolEvent.js +1 -0
- package/{composed-function/use-tool → tool}/Tool.cjs +7 -0
- package/{composed-function/use-tool → tool}/Tool.d.ts +5 -2
- package/{composed-function/use-tool → tool}/Tool.js +7 -0
- package/tool/ToolExecutionError.cjs +31 -0
- package/tool/ToolExecutionError.d.ts +11 -0
- package/tool/ToolExecutionError.js +27 -0
- package/tool/executeTool.cjs +79 -0
- package/tool/executeTool.d.ts +20 -0
- package/tool/executeTool.js +75 -0
- package/tool/index.cjs +22 -0
- package/tool/index.d.ts +6 -0
- package/tool/index.js +6 -0
- package/tool/useTool.cjs +33 -0
- package/tool/useTool.d.ts +15 -0
- package/tool/useTool.js +29 -0
- package/tool/useToolOrGenerateText.cjs +38 -0
- package/{composed-function/use-tool/useTool.d.ts → tool/useToolOrGenerateText.d.ts} +2 -15
- package/tool/useToolOrGenerateText.js +34 -0
- package/composed-function/use-tool/useTool.cjs +0 -59
- package/composed-function/use-tool/useTool.js +0 -54
- package/model-function/ModelCallEventSource.d.ts +0 -13
- package/model-function/ModelCallObserver.d.ts +0 -5
- /package/{model-function/ModelCallObserver.cjs → run/RunFunctionEvent.cjs} +0 -0
- /package/{model-function/ModelCallObserver.js → run/RunFunctionEvent.js} +0 -0
- /package/{composed-function/use-tool → tool}/NoSuchToolError.cjs +0 -0
- /package/{composed-function/use-tool → tool}/NoSuchToolError.d.ts +0 -0
- /package/{composed-function/use-tool → tool}/NoSuchToolError.js +0 -0
package/README.md
CHANGED
@@ -10,9 +10,8 @@
|
|
10
10
|
|
11
11
|
[Introduction](#introduction) | [Quick Install](#quick-install) | [Usage](#usage-examples) | [Features](#features) | [Integrations](#integrations) | [Documentation](#documentation) | [Examples](#more-examples) | [modelfusion.dev](https://modelfusion.dev)
|
12
12
|
|
13
|
-
|
14
|
-
|
15
|
-
ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes.
|
13
|
+
> [!NOTE]
|
14
|
+
> ModelFusion is in its initial development phase. Until version 1.0 there may be breaking changes, because I am still exploring the API design. Feedback and suggestions are welcome.
|
16
15
|
|
17
16
|
## Introduction
|
18
17
|
|
@@ -49,7 +48,7 @@ You can use [prompt mappings](https://modelfusion.dev/guide/function/generate-te
|
|
49
48
|
#### generateText
|
50
49
|
|
51
50
|
```ts
|
52
|
-
const
|
51
|
+
const text = await generateText(
|
53
52
|
new OpenAITextGenerationModel({ model: "text-davinci-003" }),
|
54
53
|
"Write a short story about a robot learning to love:\n\n"
|
55
54
|
);
|
@@ -58,7 +57,7 @@ const { text } = await generateText(
|
|
58
57
|
#### streamText
|
59
58
|
|
60
59
|
```ts
|
61
|
-
const
|
60
|
+
const textStream = await streamText(
|
62
61
|
new OpenAIChatModel({ model: "gpt-3.5-turbo", maxTokens: 1000 }),
|
63
62
|
[
|
64
63
|
OpenAIChatMessage.system("You are a story writer."),
|
@@ -76,7 +75,7 @@ for await (const textFragment of textStream) {
|
|
76
75
|
[Prompt mapping](https://modelfusion.dev/guide/function/generate-text/prompt-mapping) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
|
77
76
|
|
78
77
|
```ts
|
79
|
-
const
|
78
|
+
const text = await generateText(
|
80
79
|
new LlamaCppTextGenerationModel({
|
81
80
|
contextWindowSize: 4096, // Llama 2 context window size
|
82
81
|
nPredict: 1000,
|
@@ -89,7 +88,7 @@ const { text } = await generateText(
|
|
89
88
|
```
|
90
89
|
|
91
90
|
```ts
|
92
|
-
const
|
91
|
+
const textStream = await streamText(
|
93
92
|
new OpenAIChatModel({
|
94
93
|
model: "gpt-3.5-turbo",
|
95
94
|
}).mapPrompt(ChatToOpenAIChatPromptMapping()),
|
@@ -104,15 +103,26 @@ const { textStream } = await streamText(
|
|
104
103
|
|
105
104
|
#### Metadata and original responses
|
106
105
|
|
107
|
-
|
106
|
+
ModelFusion model functions return rich results that include the original response and metadata when you set the `fullResponse` option to `true`.
|
108
107
|
|
109
108
|
```ts
|
110
|
-
|
109
|
+
// access the full response and the metadata:
|
110
|
+
// the response type is specific to the model that's being used
|
111
|
+
const { response, metadata } = await generateText(
|
111
112
|
new OpenAITextGenerationModel({
|
112
113
|
model: "text-davinci-003",
|
114
|
+
maxTokens: 1000,
|
115
|
+
n: 2, // generate 2 completions
|
113
116
|
}),
|
114
|
-
"Write a short story about a robot learning to love:\n\n"
|
117
|
+
"Write a short story about a robot learning to love:\n\n",
|
118
|
+
{ fullResponse: true }
|
115
119
|
);
|
120
|
+
|
121
|
+
for (const choice of response.choices) {
|
122
|
+
console.log(choice.text);
|
123
|
+
}
|
124
|
+
|
125
|
+
console.log(`Duration: ${metadata.durationInMs}ms`);
|
116
126
|
```
|
117
127
|
|
118
128
|
### [Generate JSON](https://modelfusion.dev/guide/function/generate-json)
|
@@ -120,7 +130,7 @@ const { text, response, metadata } = await generateText(
|
|
120
130
|
Generate JSON value that matches a schema.
|
121
131
|
|
122
132
|
```ts
|
123
|
-
const
|
133
|
+
const value = await generateJson(
|
124
134
|
new OpenAIChatModel({
|
125
135
|
model: "gpt-3.5-turbo",
|
126
136
|
temperature: 0,
|
@@ -249,7 +259,7 @@ const { tool, parameters, result, text } = await useToolOrGenerateText(
|
|
249
259
|
Turn audio (voice) into text.
|
250
260
|
|
251
261
|
```ts
|
252
|
-
const
|
262
|
+
const transcription = await transcribe(
|
253
263
|
new OpenAITranscriptionModel({ model: "whisper-1" }),
|
254
264
|
{
|
255
265
|
type: "mp3",
|
@@ -263,7 +273,7 @@ const { transcription } = await transcribe(
|
|
263
273
|
Generate a base64-encoded image from a prompt.
|
264
274
|
|
265
275
|
```ts
|
266
|
-
const
|
276
|
+
const image = await generateImage(
|
267
277
|
new OpenAIImageGenerationModel({ size: "512x512" }),
|
268
278
|
"the wicked witch of the west in the style of early 19th century painting"
|
269
279
|
);
|
@@ -274,7 +284,7 @@ const { image } = await generateImage(
|
|
274
284
|
Create embeddings for text. Embeddings are vectors that represent the meaning of the text.
|
275
285
|
|
276
286
|
```ts
|
277
|
-
const
|
287
|
+
const embeddings = await embedTexts(
|
278
288
|
new OpenAITextEmbeddingModel({ model: "text-embedding-ada-002" }),
|
279
289
|
[
|
280
290
|
"At first, Nox didn't know what to do with the pup.",
|
@@ -399,6 +409,7 @@ Use higher level prompts that are mapped into model specific prompt formats.
|
|
399
409
|
- [Examples & Tutorials](https://modelfusion.dev/tutorial)
|
400
410
|
- [Integrations](https://modelfusion.dev/integration/model-provider)
|
401
411
|
- [API Reference](https://modelfusion.dev/api/modules)
|
412
|
+
- [Blog](https://modelfusion.dev/api/blog)
|
402
413
|
|
403
414
|
## More Examples
|
404
415
|
|
@@ -436,9 +447,9 @@ Record audio with push-to-talk and transcribe it using Whisper, implemented as a
|
|
436
447
|
|
437
448
|
### [BabyAGI Agent](https://github.com/lgrammel/modelfusion/tree/main/examples/babyagi-agent)
|
438
449
|
|
439
|
-
> _terminal app_, _agent_, _BabyAGI_
|
450
|
+
> _terminal app_, _agent_, _BabyAGI_
|
440
451
|
|
441
|
-
TypeScript implementation of the
|
452
|
+
TypeScript implementation of the BabyAGI classic and BabyBeeAGI.
|
442
453
|
|
443
454
|
### [Middle school math agent](https://github.com/lgrammel/modelfusion/tree/main/examples/middle-school-math-agent)
|
444
455
|
|
@@ -14,9 +14,6 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
__exportStar(require("./use-tool/NoSuchToolError.cjs"), exports);
|
18
|
-
__exportStar(require("./use-tool/Tool.cjs"), exports);
|
19
|
-
__exportStar(require("./use-tool/useTool.cjs"), exports);
|
20
17
|
__exportStar(require("./summarize/SummarizationFunction.cjs"), exports);
|
21
18
|
__exportStar(require("./summarize/summarizeRecursively.cjs"), exports);
|
22
19
|
__exportStar(require("./summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs"), exports);
|
@@ -1,6 +1,3 @@
|
|
1
|
-
export * from "./use-tool/NoSuchToolError.js";
|
2
|
-
export * from "./use-tool/Tool.js";
|
3
|
-
export * from "./use-tool/useTool.js";
|
4
1
|
export * from "./summarize/SummarizationFunction.js";
|
5
2
|
export * from "./summarize/summarizeRecursively.js";
|
6
3
|
export * from "./summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js";
|
@@ -1,6 +1,3 @@
|
|
1
|
-
export * from "./use-tool/NoSuchToolError.js";
|
2
|
-
export * from "./use-tool/Tool.js";
|
3
|
-
export * from "./use-tool/useTool.js";
|
4
1
|
export * from "./summarize/SummarizationFunction.js";
|
5
2
|
export * from "./summarize/summarizeRecursively.js";
|
6
3
|
export * from "./summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js";
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs
CHANGED
@@ -18,7 +18,7 @@ async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({ text, m
|
|
18
18
|
maxTokensPerChunk: tokenLimit - emptyPromptTokens,
|
19
19
|
}),
|
20
20
|
summarize: async (input) => {
|
21
|
-
const
|
21
|
+
const text = await (0, generateText_js_1.generateText)(model, await prompt(input), options);
|
22
22
|
return text;
|
23
23
|
},
|
24
24
|
join,
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js
CHANGED
@@ -15,7 +15,7 @@ export async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({
|
|
15
15
|
maxTokensPerChunk: tokenLimit - emptyPromptTokens,
|
16
16
|
}),
|
17
17
|
summarize: async (input) => {
|
18
|
-
const
|
18
|
+
const text = await generateText(model, await prompt(input), options);
|
19
19
|
return text;
|
20
20
|
},
|
21
21
|
join,
|
package/index.cjs
CHANGED
@@ -21,5 +21,6 @@ __exportStar(require("./model-provider/index.cjs"), exports);
|
|
21
21
|
__exportStar(require("./prompt/index.cjs"), exports);
|
22
22
|
__exportStar(require("./run/index.cjs"), exports);
|
23
23
|
__exportStar(require("./text-chunk/index.cjs"), exports);
|
24
|
+
__exportStar(require("./tool/index.cjs"), exports);
|
24
25
|
__exportStar(require("./util/index.cjs"), exports);
|
25
26
|
__exportStar(require("./vector-index/index.cjs"), exports);
|
package/index.d.ts
CHANGED
package/index.js
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import { ModelInformation } from "./ModelInformation.js";
|
2
|
-
import {
|
2
|
+
import { RunFunctionObserver } from "../run/RunFunctionObserver.js";
|
3
3
|
export interface ModelSettings {
|
4
|
-
observers?: Array<
|
4
|
+
observers?: Array<RunFunctionObserver>;
|
5
5
|
}
|
6
6
|
export interface Model<SETTINGS> {
|
7
7
|
modelInformation: ModelInformation;
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { RunFunctionFinishedEventMetadata, RunFunctionStartedEventMetadata } from "../run/RunFunctionEvent.js";
|
2
2
|
import { ModelInformation } from "./ModelInformation.js";
|
3
3
|
import { TextEmbeddingFinishedEvent, TextEmbeddingStartedEvent } from "./embed-text/TextEmbeddingEvent.js";
|
4
4
|
import { ImageGenerationFinishedEvent, ImageGenerationStartedEvent } from "./generate-image/ImageGenerationEvent.js";
|
@@ -6,13 +6,11 @@ import { JsonGenerationFinishedEvent, JsonGenerationStartedEvent } from "./gener
|
|
6
6
|
import { TextGenerationFinishedEvent, TextGenerationStartedEvent } from "./generate-text/TextGenerationEvent.js";
|
7
7
|
import { TextStreamingFinishedEvent, TextStreamingStartedEvent } from "./generate-text/TextStreamingEvent.js";
|
8
8
|
import { TranscriptionFinishedEvent, TranscriptionStartedEvent } from "./transcribe-audio/TranscriptionEvent.js";
|
9
|
-
export type
|
10
|
-
export type ModelCallStartedEventMetadata = IdMetadata & {
|
9
|
+
export type ModelCallStartedEventMetadata = RunFunctionStartedEventMetadata & {
|
11
10
|
model: ModelInformation;
|
12
|
-
startEpochSeconds: number;
|
13
11
|
};
|
14
12
|
export type ModelCallStartedEvent = ImageGenerationStartedEvent | JsonGenerationStartedEvent | TextEmbeddingStartedEvent | TextGenerationStartedEvent | TextStreamingStartedEvent | TranscriptionStartedEvent;
|
15
|
-
export type ModelCallFinishedEventMetadata =
|
16
|
-
|
13
|
+
export type ModelCallFinishedEventMetadata = RunFunctionFinishedEventMetadata & {
|
14
|
+
model: ModelInformation;
|
17
15
|
};
|
18
16
|
export type ModelCallFinishedEvent = ImageGenerationFinishedEvent | JsonGenerationFinishedEvent | TextEmbeddingFinishedEvent | TextGenerationFinishedEvent | TextStreamingFinishedEvent | TranscriptionFinishedEvent;
|
@@ -1,9 +1,11 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.extractSuccessfulModelCalls = void 0;
|
4
|
-
function extractSuccessfulModelCalls(
|
5
|
-
return
|
6
|
-
.filter((event) =>
|
4
|
+
function extractSuccessfulModelCalls(runFunctionEvents) {
|
5
|
+
return runFunctionEvents
|
6
|
+
.filter((event) => Object.keys(eventTypeToCostType).includes(event.type) &&
|
7
|
+
"status" in event &&
|
8
|
+
event.status === "success")
|
7
9
|
.map((event) => ({
|
8
10
|
model: event.metadata.model,
|
9
11
|
settings: event.settings,
|
@@ -15,6 +17,7 @@ exports.extractSuccessfulModelCalls = extractSuccessfulModelCalls;
|
|
15
17
|
const eventTypeToCostType = {
|
16
18
|
"image-generation-finished": "image-generation",
|
17
19
|
"json-generation-finished": "json-generation",
|
20
|
+
"json-or-text-generation-finished": "json-or-text-generation",
|
18
21
|
"text-embedding-finished": "text-embedding",
|
19
22
|
"text-generation-finished": "text-generation",
|
20
23
|
"text-streaming-finished": "text-streaming",
|
@@ -1,9 +1,9 @@
|
|
1
|
-
import {
|
1
|
+
import { RunFunctionEvent } from "../run/RunFunctionEvent.js";
|
2
2
|
import { ModelInformation } from "./ModelInformation.js";
|
3
3
|
export type SuccessfulModelCall = {
|
4
|
-
type: "image-generation" | "json-generation" | "text-embedding" | "text-generation" | "text-streaming" | "transcription";
|
4
|
+
type: "image-generation" | "json-generation" | "json-or-text-generation" | "text-embedding" | "text-generation" | "text-streaming" | "transcription";
|
5
5
|
model: ModelInformation;
|
6
6
|
settings: unknown;
|
7
7
|
response: unknown;
|
8
8
|
};
|
9
|
-
export declare function extractSuccessfulModelCalls(
|
9
|
+
export declare function extractSuccessfulModelCalls(runFunctionEvents: RunFunctionEvent[]): SuccessfulModelCall[];
|
@@ -1,6 +1,8 @@
|
|
1
|
-
export function extractSuccessfulModelCalls(
|
2
|
-
return
|
3
|
-
.filter((event) =>
|
1
|
+
export function extractSuccessfulModelCalls(runFunctionEvents) {
|
2
|
+
return runFunctionEvents
|
3
|
+
.filter((event) => Object.keys(eventTypeToCostType).includes(event.type) &&
|
4
|
+
"status" in event &&
|
5
|
+
event.status === "success")
|
4
6
|
.map((event) => ({
|
5
7
|
model: event.metadata.model,
|
6
8
|
settings: event.settings,
|
@@ -11,6 +13,7 @@ export function extractSuccessfulModelCalls(modelCallEvents) {
|
|
11
13
|
const eventTypeToCostType = {
|
12
14
|
"image-generation-finished": "image-generation",
|
13
15
|
"json-generation-finished": "json-generation",
|
16
|
+
"json-or-text-generation-finished": "json-or-text-generation",
|
14
17
|
"text-embedding-finished": "text-embedding",
|
15
18
|
"text-generation-finished": "text-generation",
|
16
19
|
"text-streaming-finished": "text-streaming",
|
@@ -2,18 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.embedText = exports.embedTexts = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generate embeddings for multiple texts.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const { embeddings } = await embedTexts(
|
10
|
-
* new OpenAITextEmbeddingModel(...),
|
11
|
-
* [
|
12
|
-
* "At first, Nox didn't know what to do with the pup.",
|
13
|
-
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
14
|
-
* ]
|
15
|
-
* );
|
16
|
-
*/
|
17
5
|
async function embedTexts(model, texts, options) {
|
18
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
19
7
|
model,
|
@@ -65,26 +53,24 @@ async function embedTexts(model, texts, options) {
|
|
65
53
|
generatedEmbeddings: output,
|
66
54
|
}),
|
67
55
|
});
|
68
|
-
return
|
69
|
-
|
70
|
-
|
71
|
-
|
56
|
+
return options?.fullResponse === true
|
57
|
+
? {
|
58
|
+
embeddings: result.output,
|
59
|
+
metadata: result.metadata,
|
60
|
+
}
|
61
|
+
: result.output;
|
72
62
|
}
|
73
63
|
exports.embedTexts = embedTexts;
|
74
|
-
/**
|
75
|
-
* Generate an embedding for a single text.
|
76
|
-
*
|
77
|
-
* @example
|
78
|
-
* const { embedding } = await embedText(
|
79
|
-
* new OpenAITextEmbeddingModel(...),
|
80
|
-
* "At first, Nox didn't know what to do with the pup."
|
81
|
-
* );
|
82
|
-
*/
|
83
64
|
async function embedText(model, text, options) {
|
84
|
-
const result = await embedTexts(model, [text],
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
65
|
+
const result = await embedTexts(model, [text], {
|
66
|
+
...(options ?? {}),
|
67
|
+
fullResponse: true,
|
68
|
+
});
|
69
|
+
return options?.fullResponse === true
|
70
|
+
? {
|
71
|
+
embedding: result.embeddings[0],
|
72
|
+
metadata: result.metadata,
|
73
|
+
}
|
74
|
+
: result.embeddings[0];
|
89
75
|
}
|
90
76
|
exports.embedText = embedText;
|
@@ -6,7 +6,7 @@ import { TextEmbeddingModel, TextEmbeddingModelSettings } from "./TextEmbeddingM
|
|
6
6
|
* Generate embeddings for multiple texts.
|
7
7
|
*
|
8
8
|
* @example
|
9
|
-
* const
|
9
|
+
* const embeddings = await embedTexts(
|
10
10
|
* new OpenAITextEmbeddingModel(...),
|
11
11
|
* [
|
12
12
|
* "At first, Nox didn't know what to do with the pup.",
|
@@ -14,20 +14,30 @@ import { TextEmbeddingModel, TextEmbeddingModelSettings } from "./TextEmbeddingM
|
|
14
14
|
* ]
|
15
15
|
* );
|
16
16
|
*/
|
17
|
-
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options
|
17
|
+
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options: FunctionOptions<SETTINGS> & {
|
18
|
+
fullResponse: true;
|
19
|
+
}): Promise<{
|
18
20
|
embeddings: Array<Vector>;
|
19
21
|
metadata: CallMetadata<TextEmbeddingModel<RESPONSE, SETTINGS>>;
|
20
22
|
}>;
|
23
|
+
export declare function embedTexts<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, texts: string[], options?: FunctionOptions<SETTINGS> & {
|
24
|
+
fullResponse?: false;
|
25
|
+
}): Promise<Array<Vector>>;
|
21
26
|
/**
|
22
27
|
* Generate an embedding for a single text.
|
23
28
|
*
|
24
29
|
* @example
|
25
|
-
* const
|
30
|
+
* const embedding = await embedText(
|
26
31
|
* new OpenAITextEmbeddingModel(...),
|
27
32
|
* "At first, Nox didn't know what to do with the pup."
|
28
33
|
* );
|
29
34
|
*/
|
30
|
-
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options
|
35
|
+
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options: FunctionOptions<SETTINGS> & {
|
36
|
+
fullResponse: true;
|
37
|
+
}): Promise<{
|
31
38
|
embedding: Vector;
|
32
39
|
metadata: CallMetadata<TextEmbeddingModel<RESPONSE, SETTINGS>>;
|
33
40
|
}>;
|
41
|
+
export declare function embedText<RESPONSE, SETTINGS extends TextEmbeddingModelSettings>(model: TextEmbeddingModel<RESPONSE, SETTINGS>, text: string, options?: FunctionOptions<SETTINGS> & {
|
42
|
+
fullResponse?: false;
|
43
|
+
}): Promise<Vector>;
|
@@ -1,16 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generate embeddings for multiple texts.
|
4
|
-
*
|
5
|
-
* @example
|
6
|
-
* const { embeddings } = await embedTexts(
|
7
|
-
* new OpenAITextEmbeddingModel(...),
|
8
|
-
* [
|
9
|
-
* "At first, Nox didn't know what to do with the pup.",
|
10
|
-
* "He keenly observed and absorbed everything around him, from the birds in the sky to the trees in the forest.",
|
11
|
-
* ]
|
12
|
-
* );
|
13
|
-
*/
|
14
2
|
export async function embedTexts(model, texts, options) {
|
15
3
|
const result = await executeCall({
|
16
4
|
model,
|
@@ -62,24 +50,22 @@ export async function embedTexts(model, texts, options) {
|
|
62
50
|
generatedEmbeddings: output,
|
63
51
|
}),
|
64
52
|
});
|
65
|
-
return
|
66
|
-
|
67
|
-
|
68
|
-
|
53
|
+
return options?.fullResponse === true
|
54
|
+
? {
|
55
|
+
embeddings: result.output,
|
56
|
+
metadata: result.metadata,
|
57
|
+
}
|
58
|
+
: result.output;
|
69
59
|
}
|
70
|
-
/**
|
71
|
-
* Generate an embedding for a single text.
|
72
|
-
*
|
73
|
-
* @example
|
74
|
-
* const { embedding } = await embedText(
|
75
|
-
* new OpenAITextEmbeddingModel(...),
|
76
|
-
* "At first, Nox didn't know what to do with the pup."
|
77
|
-
* );
|
78
|
-
*/
|
79
60
|
export async function embedText(model, text, options) {
|
80
|
-
const result = await embedTexts(model, [text],
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
61
|
+
const result = await embedTexts(model, [text], {
|
62
|
+
...(options ?? {}),
|
63
|
+
fullResponse: true,
|
64
|
+
});
|
65
|
+
return options?.fullResponse === true
|
66
|
+
? {
|
67
|
+
embedding: result.embeddings[0],
|
68
|
+
metadata: result.metadata,
|
69
|
+
}
|
70
|
+
: result.embeddings[0];
|
85
71
|
}
|
@@ -2,10 +2,10 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.executeCall = void 0;
|
4
4
|
const nanoid_1 = require("nanoid");
|
5
|
+
const RunFunctionEventSource_js_1 = require("../run/RunFunctionEventSource.cjs");
|
5
6
|
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
6
7
|
const AbortError_js_1 = require("../util/api/AbortError.cjs");
|
7
8
|
const runSafe_js_1 = require("../util/runSafe.cjs");
|
8
|
-
const ModelCallEventSource_js_1 = require("./ModelCallEventSource.cjs");
|
9
9
|
async function executeCall({ model, options, getStartEvent, getAbortEvent, getFailureEvent, getSuccessEvent, generateResponse, extractOutputValue, }) {
|
10
10
|
if (options?.settings != null) {
|
11
11
|
model = model.withSettings(options.settings);
|
@@ -16,7 +16,7 @@ async function executeCall({ model, options, getStartEvent, getAbortEvent, getFa
|
|
16
16
|
}
|
17
17
|
const run = options?.run;
|
18
18
|
const settings = model.settings;
|
19
|
-
const eventSource = new
|
19
|
+
const eventSource = new RunFunctionEventSource_js_1.RunFunctionEventSource({
|
20
20
|
observers: [...(settings.observers ?? []), ...(run?.observers ?? [])],
|
21
21
|
errorHandler: run?.errorHandler,
|
22
22
|
});
|
@@ -30,7 +30,7 @@ async function executeCall({ model, options, getStartEvent, getAbortEvent, getFa
|
|
30
30
|
model: model.modelInformation,
|
31
31
|
startEpochSeconds: durationMeasurement.startEpochSeconds,
|
32
32
|
};
|
33
|
-
eventSource.
|
33
|
+
eventSource.notifyRunFunctionStarted(getStartEvent(startMetadata, settings));
|
34
34
|
const result = await (0, runSafe_js_1.runSafe)(() => generateResponse({
|
35
35
|
functionId: options?.functionId,
|
36
36
|
settings,
|
@@ -42,15 +42,15 @@ async function executeCall({ model, options, getStartEvent, getAbortEvent, getFa
|
|
42
42
|
};
|
43
43
|
if (!result.ok) {
|
44
44
|
if (result.isAborted) {
|
45
|
-
eventSource.
|
45
|
+
eventSource.notifyRunFunctionFinished(getAbortEvent(finishMetadata, settings));
|
46
46
|
throw new AbortError_js_1.AbortError();
|
47
47
|
}
|
48
|
-
eventSource.
|
48
|
+
eventSource.notifyRunFunctionFinished(getFailureEvent(finishMetadata, settings, result.error));
|
49
49
|
throw result.error;
|
50
50
|
}
|
51
51
|
const response = result.output;
|
52
52
|
const output = extractOutputValue(response);
|
53
|
-
eventSource.
|
53
|
+
eventSource.notifyRunFunctionFinished(getSuccessEvent(finishMetadata, settings, response, output));
|
54
54
|
return {
|
55
55
|
output,
|
56
56
|
response,
|
@@ -1,8 +1,8 @@
|
|
1
1
|
import { nanoid as createId } from "nanoid";
|
2
|
+
import { RunFunctionEventSource } from "../run/RunFunctionEventSource.js";
|
2
3
|
import { startDurationMeasurement } from "../util/DurationMeasurement.js";
|
3
4
|
import { AbortError } from "../util/api/AbortError.js";
|
4
5
|
import { runSafe } from "../util/runSafe.js";
|
5
|
-
import { ModelCallEventSource } from "./ModelCallEventSource.js";
|
6
6
|
export async function executeCall({ model, options, getStartEvent, getAbortEvent, getFailureEvent, getSuccessEvent, generateResponse, extractOutputValue, }) {
|
7
7
|
if (options?.settings != null) {
|
8
8
|
model = model.withSettings(options.settings);
|
@@ -13,7 +13,7 @@ export async function executeCall({ model, options, getStartEvent, getAbortEvent
|
|
13
13
|
}
|
14
14
|
const run = options?.run;
|
15
15
|
const settings = model.settings;
|
16
|
-
const eventSource = new
|
16
|
+
const eventSource = new RunFunctionEventSource({
|
17
17
|
observers: [...(settings.observers ?? []), ...(run?.observers ?? [])],
|
18
18
|
errorHandler: run?.errorHandler,
|
19
19
|
});
|
@@ -27,7 +27,7 @@ export async function executeCall({ model, options, getStartEvent, getAbortEvent
|
|
27
27
|
model: model.modelInformation,
|
28
28
|
startEpochSeconds: durationMeasurement.startEpochSeconds,
|
29
29
|
};
|
30
|
-
eventSource.
|
30
|
+
eventSource.notifyRunFunctionStarted(getStartEvent(startMetadata, settings));
|
31
31
|
const result = await runSafe(() => generateResponse({
|
32
32
|
functionId: options?.functionId,
|
33
33
|
settings,
|
@@ -39,15 +39,15 @@ export async function executeCall({ model, options, getStartEvent, getAbortEvent
|
|
39
39
|
};
|
40
40
|
if (!result.ok) {
|
41
41
|
if (result.isAborted) {
|
42
|
-
eventSource.
|
42
|
+
eventSource.notifyRunFunctionFinished(getAbortEvent(finishMetadata, settings));
|
43
43
|
throw new AbortError();
|
44
44
|
}
|
45
|
-
eventSource.
|
45
|
+
eventSource.notifyRunFunctionFinished(getFailureEvent(finishMetadata, settings, result.error));
|
46
46
|
throw result.error;
|
47
47
|
}
|
48
48
|
const response = result.output;
|
49
49
|
const output = extractOutputValue(response);
|
50
|
-
eventSource.
|
50
|
+
eventSource.notifyRunFunctionFinished(getSuccessEvent(finishMetadata, settings, response, output));
|
51
51
|
return {
|
52
52
|
output,
|
53
53
|
response,
|
@@ -2,21 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateImage = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generates a base64-encoded image using a prompt.
|
7
|
-
* The prompt format depends on the model.
|
8
|
-
* For example, OpenAI image models expect a string prompt,
|
9
|
-
* and Stability AI models expect an array of text prompts with optional weights.
|
10
|
-
*
|
11
|
-
* @example
|
12
|
-
* const { image } = await generateImage(
|
13
|
-
* new StabilityImageGenerationModel(...),
|
14
|
-
* [
|
15
|
-
* { text: "the wicked witch of the west" },
|
16
|
-
* { text: "style of early 19th century painting", weight: 0.5 },
|
17
|
-
* ]
|
18
|
-
* );
|
19
|
-
*/
|
20
5
|
async function generateImage(model, prompt, options) {
|
21
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
22
7
|
model,
|
@@ -54,10 +39,12 @@ async function generateImage(model, prompt, options) {
|
|
54
39
|
generatedImage: output,
|
55
40
|
}),
|
56
41
|
});
|
57
|
-
return
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
42
|
+
return options?.fullResponse === true
|
43
|
+
? {
|
44
|
+
image: result.output,
|
45
|
+
response: result.response,
|
46
|
+
metadata: result.metadata,
|
47
|
+
}
|
48
|
+
: result.output;
|
62
49
|
}
|
63
50
|
exports.generateImage = generateImage;
|
@@ -8,7 +8,7 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
8
8
|
* and Stability AI models expect an array of text prompts with optional weights.
|
9
9
|
*
|
10
10
|
* @example
|
11
|
-
* const
|
11
|
+
* const image = await generateImage(
|
12
12
|
* new StabilityImageGenerationModel(...),
|
13
13
|
* [
|
14
14
|
* { text: "the wicked witch of the west" },
|
@@ -16,8 +16,13 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
16
16
|
* ]
|
17
17
|
* );
|
18
18
|
*/
|
19
|
-
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options
|
19
|
+
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
20
|
+
fullResponse: true;
|
21
|
+
}): Promise<{
|
20
22
|
image: string;
|
21
23
|
response: RESPONSE;
|
22
24
|
metadata: CallMetadata<ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>>;
|
23
25
|
}>;
|
26
|
+
export declare function generateImage<PROMPT, RESPONSE, SETTINGS extends ImageGenerationModelSettings>(model: ImageGenerationModel<PROMPT, RESPONSE, SETTINGS>, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
27
|
+
fullResponse?: false;
|
28
|
+
}): Promise<string>;
|