modelfusion 0.6.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +27 -16
- package/composed-function/index.cjs +0 -3
- package/composed-function/index.d.ts +0 -3
- package/composed-function/index.js +0 -3
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
- package/index.cjs +1 -0
- package/index.d.ts +1 -0
- package/index.js +1 -0
- package/model-function/Model.d.ts +2 -2
- package/model-function/ModelCallEvent.d.ts +4 -6
- package/model-function/SuccessfulModelCall.cjs +6 -3
- package/model-function/SuccessfulModelCall.d.ts +3 -3
- package/model-function/SuccessfulModelCall.js +6 -3
- package/model-function/embed-text/embedText.cjs +16 -30
- package/model-function/embed-text/embedText.d.ts +14 -4
- package/model-function/embed-text/embedText.js +16 -30
- package/model-function/executeCall.cjs +6 -6
- package/model-function/executeCall.js +6 -6
- package/model-function/generate-image/generateImage.cjs +7 -20
- package/model-function/generate-image/generateImage.d.ts +7 -2
- package/model-function/generate-image/generateImage.js +7 -20
- package/model-function/generate-json/JsonGenerationEvent.d.ts +2 -2
- package/model-function/generate-json/generateJson.cjs +7 -5
- package/model-function/generate-json/generateJson.d.ts +6 -1
- package/model-function/generate-json/generateJson.js +7 -5
- package/model-function/generate-json/generateJsonOrText.cjs +11 -9
- package/model-function/generate-json/generateJsonOrText.d.ts +10 -1
- package/model-function/generate-json/generateJsonOrText.js +11 -9
- package/model-function/generate-text/generateText.cjs +7 -17
- package/model-function/generate-text/generateText.d.ts +7 -2
- package/model-function/generate-text/generateText.js +7 -17
- package/model-function/generate-text/streamText.cjs +13 -11
- package/model-function/generate-text/streamText.d.ts +9 -1
- package/model-function/generate-text/streamText.js +13 -11
- package/model-function/index.cjs +1 -1
- package/model-function/index.d.ts +1 -1
- package/model-function/index.js +1 -1
- package/model-function/transcribe-audio/transcribe.cjs +7 -19
- package/model-function/transcribe-audio/transcribe.d.ts +7 -2
- package/model-function/transcribe-audio/transcribe.js +7 -19
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.cjs +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextGenerationModel.js +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.cjs +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatPrompt.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.cjs +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +1 -1
- package/package.json +1 -1
- package/run/ConsoleLogger.cjs +2 -2
- package/run/ConsoleLogger.d.ts +5 -5
- package/run/ConsoleLogger.js +2 -2
- package/run/DefaultRun.cjs +7 -7
- package/run/DefaultRun.d.ts +6 -6
- package/run/DefaultRun.js +7 -7
- package/run/Run.d.ts +2 -2
- package/run/RunFunction.d.ts +0 -4
- package/run/RunFunctionEvent.d.ts +12 -0
- package/{model-function/ModelCallEventSource.cjs → run/RunFunctionEventSource.cjs} +7 -7
- package/run/RunFunctionEventSource.d.ts +13 -0
- package/{model-function/ModelCallEventSource.js → run/RunFunctionEventSource.js} +5 -5
- package/run/RunFunctionObserver.cjs +2 -0
- package/run/RunFunctionObserver.d.ts +5 -0
- package/run/RunFunctionObserver.js +1 -0
- package/run/index.cjs +3 -0
- package/run/index.d.ts +3 -0
- package/run/index.js +3 -0
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.cjs +1 -1
- package/text-chunk/SimilarTextChunksFromVectorIndexRetriever.js +1 -1
- package/text-chunk/upsertTextChunks.cjs +1 -1
- package/text-chunk/upsertTextChunks.js +1 -1
- package/tool/ExecuteToolEvent.cjs +2 -0
- package/tool/ExecuteToolEvent.d.ts +22 -0
- package/tool/ExecuteToolEvent.js +1 -0
- package/{composed-function/use-tool → tool}/Tool.cjs +7 -0
- package/{composed-function/use-tool → tool}/Tool.d.ts +5 -2
- package/{composed-function/use-tool → tool}/Tool.js +7 -0
- package/tool/ToolExecutionError.cjs +31 -0
- package/tool/ToolExecutionError.d.ts +11 -0
- package/tool/ToolExecutionError.js +27 -0
- package/tool/executeTool.cjs +79 -0
- package/tool/executeTool.d.ts +20 -0
- package/tool/executeTool.js +75 -0
- package/tool/index.cjs +22 -0
- package/tool/index.d.ts +6 -0
- package/tool/index.js +6 -0
- package/tool/useTool.cjs +33 -0
- package/tool/useTool.d.ts +15 -0
- package/tool/useTool.js +29 -0
- package/tool/useToolOrGenerateText.cjs +38 -0
- package/{composed-function/use-tool/useTool.d.ts → tool/useToolOrGenerateText.d.ts} +2 -15
- package/tool/useToolOrGenerateText.js +34 -0
- package/composed-function/use-tool/useTool.cjs +0 -59
- package/composed-function/use-tool/useTool.js +0 -54
- package/model-function/ModelCallEventSource.d.ts +0 -13
- package/model-function/ModelCallObserver.d.ts +0 -5
- /package/{model-function/ModelCallObserver.cjs → run/RunFunctionEvent.cjs} +0 -0
- /package/{model-function/ModelCallObserver.js → run/RunFunctionEvent.js} +0 -0
- /package/{composed-function/use-tool → tool}/NoSuchToolError.cjs +0 -0
- /package/{composed-function/use-tool → tool}/NoSuchToolError.d.ts +0 -0
- /package/{composed-function/use-tool → tool}/NoSuchToolError.js +0 -0
@@ -1,19 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generates a base64-encoded image using a prompt.
|
4
|
-
* The prompt format depends on the model.
|
5
|
-
* For example, OpenAI image models expect a string prompt,
|
6
|
-
* and Stability AI models expect an array of text prompts with optional weights.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const { image } = await generateImage(
|
10
|
-
* new StabilityImageGenerationModel(...),
|
11
|
-
* [
|
12
|
-
* { text: "the wicked witch of the west" },
|
13
|
-
* { text: "style of early 19th century painting", weight: 0.5 },
|
14
|
-
* ]
|
15
|
-
* );
|
16
|
-
*/
|
17
2
|
export async function generateImage(model, prompt, options) {
|
18
3
|
const result = await executeCall({
|
19
4
|
model,
|
@@ -51,9 +36,11 @@ export async function generateImage(model, prompt, options) {
|
|
51
36
|
generatedImage: output,
|
52
37
|
}),
|
53
38
|
});
|
54
|
-
return
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
39
|
+
return options?.fullResponse === true
|
40
|
+
? {
|
41
|
+
image: result.output,
|
42
|
+
response: result.response,
|
43
|
+
metadata: result.metadata,
|
44
|
+
}
|
45
|
+
: result.output;
|
59
46
|
}
|
@@ -1,12 +1,12 @@
|
|
1
1
|
import { ModelCallFinishedEventMetadata, ModelCallStartedEventMetadata } from "../ModelCallEvent.js";
|
2
2
|
export type JsonGenerationStartedEvent = {
|
3
|
-
type: "json-generation-started";
|
3
|
+
type: "json-generation-started" | "json-or-text-generation-started";
|
4
4
|
metadata: ModelCallStartedEventMetadata;
|
5
5
|
settings: unknown;
|
6
6
|
prompt: unknown;
|
7
7
|
};
|
8
8
|
export type JsonGenerationFinishedEvent = {
|
9
|
-
type: "json-generation-finished";
|
9
|
+
type: "json-generation-finished" | "json-or-text-generation-finished";
|
10
10
|
metadata: ModelCallFinishedEventMetadata;
|
11
11
|
settings: unknown;
|
12
12
|
prompt: unknown;
|
@@ -52,10 +52,12 @@ async function generateJson(model, schemaDefinition, prompt, options) {
|
|
52
52
|
generatedJson: output,
|
53
53
|
}),
|
54
54
|
});
|
55
|
-
return
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
55
|
+
return options?.fullResponse === true
|
56
|
+
? {
|
57
|
+
value: result.output,
|
58
|
+
response: result.response,
|
59
|
+
metadata: result.metadata,
|
60
|
+
}
|
61
|
+
: result.output;
|
60
62
|
}
|
61
63
|
exports.generateJson = generateJson;
|
@@ -2,8 +2,13 @@ import { FunctionOptions } from "../FunctionOptions.js";
|
|
2
2
|
import { CallMetadata } from "../executeCall.js";
|
3
3
|
import { GenerateJsonModel, GenerateJsonModelSettings, GenerateJsonPrompt } from "./GenerateJsonModel.js";
|
4
4
|
import { SchemaDefinition } from "./SchemaDefinition.js";
|
5
|
-
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options
|
5
|
+
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options: FunctionOptions<SETTINGS> & {
|
6
|
+
fullResponse: true;
|
7
|
+
}): Promise<{
|
6
8
|
value: STRUCTURE;
|
7
9
|
response: RESPONSE;
|
8
10
|
metadata: CallMetadata<GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>>;
|
9
11
|
}>;
|
12
|
+
export declare function generateJson<STRUCTURE, PROMPT, RESPONSE, NAME extends string, SETTINGS extends GenerateJsonModelSettings>(model: GenerateJsonModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinition: SchemaDefinition<NAME, STRUCTURE>, prompt: (schemaDefinition: SchemaDefinition<NAME, STRUCTURE>) => PROMPT & GenerateJsonPrompt<RESPONSE>, options?: FunctionOptions<SETTINGS> & {
|
13
|
+
fullResponse?: false;
|
14
|
+
}): Promise<STRUCTURE>;
|
@@ -49,9 +49,11 @@ export async function generateJson(model, schemaDefinition, prompt, options) {
|
|
49
49
|
generatedJson: output,
|
50
50
|
}),
|
51
51
|
});
|
52
|
-
return
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
52
|
+
return options?.fullResponse === true
|
53
|
+
? {
|
54
|
+
value: result.output,
|
55
|
+
response: result.response,
|
56
|
+
metadata: result.metadata,
|
57
|
+
}
|
58
|
+
: result.output;
|
57
59
|
}
|
@@ -35,20 +35,20 @@ async function generateJsonOrText(model, schemaDefinitions, prompt, options) {
|
|
35
35
|
};
|
36
36
|
},
|
37
37
|
getStartEvent: (metadata, settings) => ({
|
38
|
-
type: "json-generation-started",
|
38
|
+
type: "json-or-text-generation-started",
|
39
39
|
metadata,
|
40
40
|
settings,
|
41
41
|
prompt,
|
42
42
|
}),
|
43
43
|
getAbortEvent: (metadata, settings) => ({
|
44
|
-
type: "json-generation-finished",
|
44
|
+
type: "json-or-text-generation-finished",
|
45
45
|
status: "abort",
|
46
46
|
metadata,
|
47
47
|
settings,
|
48
48
|
prompt,
|
49
49
|
}),
|
50
50
|
getFailureEvent: (metadata, settings, error) => ({
|
51
|
-
type: "json-generation-finished",
|
51
|
+
type: "json-or-text-generation-finished",
|
52
52
|
status: "failure",
|
53
53
|
metadata,
|
54
54
|
settings,
|
@@ -56,7 +56,7 @@ async function generateJsonOrText(model, schemaDefinitions, prompt, options) {
|
|
56
56
|
error,
|
57
57
|
}),
|
58
58
|
getSuccessEvent: (metadata, settings, response, output) => ({
|
59
|
-
type: "json-generation-finished",
|
59
|
+
type: "json-or-text-generation-finished",
|
60
60
|
status: "success",
|
61
61
|
metadata,
|
62
62
|
settings,
|
@@ -65,10 +65,12 @@ async function generateJsonOrText(model, schemaDefinitions, prompt, options) {
|
|
65
65
|
generatedJson: output,
|
66
66
|
}),
|
67
67
|
});
|
68
|
-
return
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
68
|
+
return options?.fullResponse === true
|
69
|
+
? {
|
70
|
+
...result.output,
|
71
|
+
response: result.response,
|
72
|
+
metadata: result.metadata,
|
73
|
+
}
|
74
|
+
: result.output;
|
73
75
|
}
|
74
76
|
exports.generateJsonOrText = generateJsonOrText;
|
@@ -14,7 +14,9 @@ type ToSchemaUnion<T> = {
|
|
14
14
|
} : never;
|
15
15
|
}[keyof T];
|
16
16
|
type ToOutputValue<SCHEMAS extends SchemaDefinitionArray<SchemaDefinition<any, any>[]>> = ToSchemaUnion<ToSchemaDefinitionsMap<SCHEMAS>>;
|
17
|
-
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options
|
17
|
+
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options: FunctionOptions<SETTINGS> & {
|
18
|
+
fullResponse: true;
|
19
|
+
}): Promise<({
|
18
20
|
schema: null;
|
19
21
|
value: null;
|
20
22
|
text: string;
|
@@ -22,4 +24,11 @@ export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any,
|
|
22
24
|
response: RESPONSE;
|
23
25
|
metadata: CallMetadata<GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>>;
|
24
26
|
}>;
|
27
|
+
export declare function generateJsonOrText<SCHEMAS extends SchemaDefinition<any, any>[], PROMPT, RESPONSE, SETTINGS extends GenerateJsonOrTextModelSettings>(model: GenerateJsonOrTextModel<PROMPT, RESPONSE, SETTINGS>, schemaDefinitions: SCHEMAS, prompt: (schemaDefinitions: SCHEMAS) => PROMPT & GenerateJsonOrTextPrompt<RESPONSE>, options?: FunctionOptions<SETTINGS> & {
|
28
|
+
fullResponse?: false;
|
29
|
+
}): Promise<{
|
30
|
+
schema: null;
|
31
|
+
value: null;
|
32
|
+
text: string;
|
33
|
+
} | ToOutputValue<SCHEMAS>>;
|
25
34
|
export {};
|
@@ -32,20 +32,20 @@ export async function generateJsonOrText(model, schemaDefinitions, prompt, optio
|
|
32
32
|
};
|
33
33
|
},
|
34
34
|
getStartEvent: (metadata, settings) => ({
|
35
|
-
type: "json-generation-started",
|
35
|
+
type: "json-or-text-generation-started",
|
36
36
|
metadata,
|
37
37
|
settings,
|
38
38
|
prompt,
|
39
39
|
}),
|
40
40
|
getAbortEvent: (metadata, settings) => ({
|
41
|
-
type: "json-generation-finished",
|
41
|
+
type: "json-or-text-generation-finished",
|
42
42
|
status: "abort",
|
43
43
|
metadata,
|
44
44
|
settings,
|
45
45
|
prompt,
|
46
46
|
}),
|
47
47
|
getFailureEvent: (metadata, settings, error) => ({
|
48
|
-
type: "json-generation-finished",
|
48
|
+
type: "json-or-text-generation-finished",
|
49
49
|
status: "failure",
|
50
50
|
metadata,
|
51
51
|
settings,
|
@@ -53,7 +53,7 @@ export async function generateJsonOrText(model, schemaDefinitions, prompt, optio
|
|
53
53
|
error,
|
54
54
|
}),
|
55
55
|
getSuccessEvent: (metadata, settings, response, output) => ({
|
56
|
-
type: "json-generation-finished",
|
56
|
+
type: "json-or-text-generation-finished",
|
57
57
|
status: "success",
|
58
58
|
metadata,
|
59
59
|
settings,
|
@@ -62,9 +62,11 @@ export async function generateJsonOrText(model, schemaDefinitions, prompt, optio
|
|
62
62
|
generatedJson: output,
|
63
63
|
}),
|
64
64
|
});
|
65
|
-
return
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
65
|
+
return options?.fullResponse === true
|
66
|
+
? {
|
67
|
+
...result.output,
|
68
|
+
response: result.response,
|
69
|
+
metadata: result.metadata,
|
70
|
+
}
|
71
|
+
: result.output;
|
70
72
|
}
|
@@ -2,18 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateText = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Generates a text using a prompt.
|
7
|
-
* The prompt format depends on the model.
|
8
|
-
* For example, OpenAI text models expect a string prompt, and OpenAI chat models expect an array of chat messages.
|
9
|
-
*
|
10
|
-
* @example
|
11
|
-
* const model = new OpenAITextGenerationModel(...);
|
12
|
-
*
|
13
|
-
* const { text } = await model.generateText(
|
14
|
-
* "Write a short story about a robot learning to love:\n\n"
|
15
|
-
* );
|
16
|
-
*/
|
17
5
|
async function generateText(
|
18
6
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
19
7
|
model, prompt, options) {
|
@@ -58,10 +46,12 @@ model, prompt, options) {
|
|
58
46
|
generatedText: output,
|
59
47
|
}),
|
60
48
|
});
|
61
|
-
return
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
49
|
+
return options?.fullResponse === true
|
50
|
+
? {
|
51
|
+
text: result.output,
|
52
|
+
response: result.response,
|
53
|
+
metadata: result.metadata,
|
54
|
+
}
|
55
|
+
: result.output;
|
66
56
|
}
|
67
57
|
exports.generateText = generateText;
|
@@ -9,12 +9,17 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
9
9
|
* @example
|
10
10
|
* const model = new OpenAITextGenerationModel(...);
|
11
11
|
*
|
12
|
-
* const
|
12
|
+
* const text = await model.generateText(
|
13
13
|
* "Write a short story about a robot learning to love:\n\n"
|
14
14
|
* );
|
15
15
|
*/
|
16
|
-
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options
|
16
|
+
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
17
|
+
fullResponse: true;
|
18
|
+
}): Promise<{
|
17
19
|
text: string;
|
18
20
|
response: RESPONSE;
|
19
21
|
metadata: CallMetadata<TextGenerationModel<PROMPT, RESPONSE, unknown, SETTINGS>>;
|
20
22
|
}>;
|
23
|
+
export declare function generateText<PROMPT, RESPONSE, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, RESPONSE, any, SETTINGS>, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
24
|
+
fullResponse?: false;
|
25
|
+
}): Promise<string>;
|
@@ -1,16 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Generates a text using a prompt.
|
4
|
-
* The prompt format depends on the model.
|
5
|
-
* For example, OpenAI text models expect a string prompt, and OpenAI chat models expect an array of chat messages.
|
6
|
-
*
|
7
|
-
* @example
|
8
|
-
* const model = new OpenAITextGenerationModel(...);
|
9
|
-
*
|
10
|
-
* const { text } = await model.generateText(
|
11
|
-
* "Write a short story about a robot learning to love:\n\n"
|
12
|
-
* );
|
13
|
-
*/
|
14
2
|
export async function generateText(
|
15
3
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
16
4
|
model, prompt, options) {
|
@@ -55,9 +43,11 @@ model, prompt, options) {
|
|
55
43
|
generatedText: output,
|
56
44
|
}),
|
57
45
|
});
|
58
|
-
return
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
46
|
+
return options?.fullResponse === true
|
47
|
+
? {
|
48
|
+
text: result.output,
|
49
|
+
response: result.response,
|
50
|
+
metadata: result.metadata,
|
51
|
+
}
|
52
|
+
: result.output;
|
63
53
|
}
|
@@ -2,10 +2,10 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.streamText = void 0;
|
4
4
|
const nanoid_1 = require("nanoid");
|
5
|
+
const RunFunctionEventSource_js_1 = require("../../run/RunFunctionEventSource.cjs");
|
5
6
|
const DurationMeasurement_js_1 = require("../../util/DurationMeasurement.cjs");
|
6
7
|
const AbortError_js_1 = require("../../util/api/AbortError.cjs");
|
7
8
|
const runSafe_js_1 = require("../../util/runSafe.cjs");
|
8
|
-
const ModelCallEventSource_js_1 = require("../ModelCallEventSource.cjs");
|
9
9
|
const extractTextDeltas_js_1 = require("./extractTextDeltas.cjs");
|
10
10
|
async function streamText(model, prompt, options) {
|
11
11
|
if (options?.settings != null) {
|
@@ -17,7 +17,7 @@ async function streamText(model, prompt, options) {
|
|
17
17
|
}
|
18
18
|
const run = options?.run;
|
19
19
|
const settings = model.settings;
|
20
|
-
const eventSource = new
|
20
|
+
const eventSource = new RunFunctionEventSource_js_1.RunFunctionEventSource({
|
21
21
|
observers: [...(settings.observers ?? []), ...(run?.observers ?? [])],
|
22
22
|
errorHandler: run?.errorHandler,
|
23
23
|
});
|
@@ -31,7 +31,7 @@ async function streamText(model, prompt, options) {
|
|
31
31
|
model: model.modelInformation,
|
32
32
|
startEpochSeconds: durationMeasurement.startEpochSeconds,
|
33
33
|
};
|
34
|
-
eventSource.
|
34
|
+
eventSource.notifyRunFunctionStarted({
|
35
35
|
type: "text-streaming-started",
|
36
36
|
metadata: startMetadata,
|
37
37
|
settings,
|
@@ -49,7 +49,7 @@ async function streamText(model, prompt, options) {
|
|
49
49
|
...startMetadata,
|
50
50
|
durationInMs: durationMeasurement.durationInMs,
|
51
51
|
};
|
52
|
-
eventSource.
|
52
|
+
eventSource.notifyRunFunctionFinished({
|
53
53
|
type: "text-streaming-finished",
|
54
54
|
status: "success",
|
55
55
|
metadata: finishMetadata,
|
@@ -64,7 +64,7 @@ async function streamText(model, prompt, options) {
|
|
64
64
|
...startMetadata,
|
65
65
|
durationInMs: durationMeasurement.durationInMs,
|
66
66
|
};
|
67
|
-
eventSource.
|
67
|
+
eventSource.notifyRunFunctionFinished(error instanceof AbortError_js_1.AbortError
|
68
68
|
? {
|
69
69
|
type: "text-streaming-finished",
|
70
70
|
status: "abort",
|
@@ -88,7 +88,7 @@ async function streamText(model, prompt, options) {
|
|
88
88
|
durationInMs: durationMeasurement.durationInMs,
|
89
89
|
};
|
90
90
|
if (result.isAborted) {
|
91
|
-
eventSource.
|
91
|
+
eventSource.notifyRunFunctionFinished({
|
92
92
|
type: "text-streaming-finished",
|
93
93
|
status: "abort",
|
94
94
|
metadata: finishMetadata,
|
@@ -97,7 +97,7 @@ async function streamText(model, prompt, options) {
|
|
97
97
|
});
|
98
98
|
throw new AbortError_js_1.AbortError();
|
99
99
|
}
|
100
|
-
eventSource.
|
100
|
+
eventSource.notifyRunFunctionFinished({
|
101
101
|
type: "text-streaming-finished",
|
102
102
|
status: "failure",
|
103
103
|
metadata: finishMetadata,
|
@@ -107,9 +107,11 @@ async function streamText(model, prompt, options) {
|
|
107
107
|
});
|
108
108
|
throw result.error;
|
109
109
|
}
|
110
|
-
return
|
111
|
-
|
112
|
-
|
113
|
-
|
110
|
+
return options?.fullResponse === true
|
111
|
+
? {
|
112
|
+
textStream: result.output,
|
113
|
+
metadata: startMetadata,
|
114
|
+
}
|
115
|
+
: result.output;
|
114
116
|
}
|
115
117
|
exports.streamText = streamText;
|
@@ -5,7 +5,15 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
5
5
|
export declare function streamText<PROMPT, FULL_DELTA, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS> & {
|
6
6
|
generateDeltaStreamResponse: (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
|
7
7
|
extractTextDelta: (fullDelta: FULL_DELTA) => string | undefined;
|
8
|
-
}, prompt: PROMPT, options?: FunctionOptions<SETTINGS>
|
8
|
+
}, prompt: PROMPT, options?: FunctionOptions<SETTINGS> & {
|
9
|
+
fullResponse?: false;
|
10
|
+
}): Promise<AsyncIterable<string>>;
|
11
|
+
export declare function streamText<PROMPT, FULL_DELTA, SETTINGS extends TextGenerationModelSettings>(model: TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS> & {
|
12
|
+
generateDeltaStreamResponse: (prompt: PROMPT, options: FunctionOptions<SETTINGS>) => PromiseLike<AsyncIterable<DeltaEvent<FULL_DELTA>>>;
|
13
|
+
extractTextDelta: (fullDelta: FULL_DELTA) => string | undefined;
|
14
|
+
}, prompt: PROMPT, options: FunctionOptions<SETTINGS> & {
|
15
|
+
fullResponse: true;
|
16
|
+
}): Promise<{
|
9
17
|
textStream: AsyncIterable<string>;
|
10
18
|
metadata: Omit<CallMetadata<TextGenerationModel<PROMPT, unknown, FULL_DELTA, SETTINGS>>, "durationInMs">;
|
11
19
|
}>;
|
@@ -1,8 +1,8 @@
|
|
1
1
|
import { nanoid as createId } from "nanoid";
|
2
|
+
import { RunFunctionEventSource } from "../../run/RunFunctionEventSource.js";
|
2
3
|
import { startDurationMeasurement } from "../../util/DurationMeasurement.js";
|
3
4
|
import { AbortError } from "../../util/api/AbortError.js";
|
4
5
|
import { runSafe } from "../../util/runSafe.js";
|
5
|
-
import { ModelCallEventSource } from "../ModelCallEventSource.js";
|
6
6
|
import { extractTextDeltas } from "./extractTextDeltas.js";
|
7
7
|
export async function streamText(model, prompt, options) {
|
8
8
|
if (options?.settings != null) {
|
@@ -14,7 +14,7 @@ export async function streamText(model, prompt, options) {
|
|
14
14
|
}
|
15
15
|
const run = options?.run;
|
16
16
|
const settings = model.settings;
|
17
|
-
const eventSource = new
|
17
|
+
const eventSource = new RunFunctionEventSource({
|
18
18
|
observers: [...(settings.observers ?? []), ...(run?.observers ?? [])],
|
19
19
|
errorHandler: run?.errorHandler,
|
20
20
|
});
|
@@ -28,7 +28,7 @@ export async function streamText(model, prompt, options) {
|
|
28
28
|
model: model.modelInformation,
|
29
29
|
startEpochSeconds: durationMeasurement.startEpochSeconds,
|
30
30
|
};
|
31
|
-
eventSource.
|
31
|
+
eventSource.notifyRunFunctionStarted({
|
32
32
|
type: "text-streaming-started",
|
33
33
|
metadata: startMetadata,
|
34
34
|
settings,
|
@@ -46,7 +46,7 @@ export async function streamText(model, prompt, options) {
|
|
46
46
|
...startMetadata,
|
47
47
|
durationInMs: durationMeasurement.durationInMs,
|
48
48
|
};
|
49
|
-
eventSource.
|
49
|
+
eventSource.notifyRunFunctionFinished({
|
50
50
|
type: "text-streaming-finished",
|
51
51
|
status: "success",
|
52
52
|
metadata: finishMetadata,
|
@@ -61,7 +61,7 @@ export async function streamText(model, prompt, options) {
|
|
61
61
|
...startMetadata,
|
62
62
|
durationInMs: durationMeasurement.durationInMs,
|
63
63
|
};
|
64
|
-
eventSource.
|
64
|
+
eventSource.notifyRunFunctionFinished(error instanceof AbortError
|
65
65
|
? {
|
66
66
|
type: "text-streaming-finished",
|
67
67
|
status: "abort",
|
@@ -85,7 +85,7 @@ export async function streamText(model, prompt, options) {
|
|
85
85
|
durationInMs: durationMeasurement.durationInMs,
|
86
86
|
};
|
87
87
|
if (result.isAborted) {
|
88
|
-
eventSource.
|
88
|
+
eventSource.notifyRunFunctionFinished({
|
89
89
|
type: "text-streaming-finished",
|
90
90
|
status: "abort",
|
91
91
|
metadata: finishMetadata,
|
@@ -94,7 +94,7 @@ export async function streamText(model, prompt, options) {
|
|
94
94
|
});
|
95
95
|
throw new AbortError();
|
96
96
|
}
|
97
|
-
eventSource.
|
97
|
+
eventSource.notifyRunFunctionFinished({
|
98
98
|
type: "text-streaming-finished",
|
99
99
|
status: "failure",
|
100
100
|
metadata: finishMetadata,
|
@@ -104,8 +104,10 @@ export async function streamText(model, prompt, options) {
|
|
104
104
|
});
|
105
105
|
throw result.error;
|
106
106
|
}
|
107
|
-
return
|
108
|
-
|
109
|
-
|
110
|
-
|
107
|
+
return options?.fullResponse === true
|
108
|
+
? {
|
109
|
+
textStream: result.output,
|
110
|
+
metadata: startMetadata,
|
111
|
+
}
|
112
|
+
: result.output;
|
111
113
|
}
|
package/model-function/index.cjs
CHANGED
@@ -17,7 +17,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
17
|
__exportStar(require("./FunctionOptions.cjs"), exports);
|
18
18
|
__exportStar(require("./Model.cjs"), exports);
|
19
19
|
__exportStar(require("./ModelCallEvent.cjs"), exports);
|
20
|
-
__exportStar(require("
|
20
|
+
__exportStar(require("../run/RunFunctionObserver.cjs"), exports);
|
21
21
|
__exportStar(require("./ModelInformation.cjs"), exports);
|
22
22
|
__exportStar(require("./SuccessfulModelCall.cjs"), exports);
|
23
23
|
__exportStar(require("./embed-text/TextEmbeddingEvent.cjs"), exports);
|
@@ -1,7 +1,7 @@
|
|
1
1
|
export * from "./FunctionOptions.js";
|
2
2
|
export * from "./Model.js";
|
3
3
|
export * from "./ModelCallEvent.js";
|
4
|
-
export * from "
|
4
|
+
export * from "../run/RunFunctionObserver.js";
|
5
5
|
export * from "./ModelInformation.js";
|
6
6
|
export * from "./SuccessfulModelCall.js";
|
7
7
|
export * from "./embed-text/TextEmbeddingEvent.js";
|
package/model-function/index.js
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
export * from "./FunctionOptions.js";
|
2
2
|
export * from "./Model.js";
|
3
3
|
export * from "./ModelCallEvent.js";
|
4
|
-
export * from "
|
4
|
+
export * from "../run/RunFunctionObserver.js";
|
5
5
|
export * from "./ModelInformation.js";
|
6
6
|
export * from "./SuccessfulModelCall.js";
|
7
7
|
export * from "./embed-text/TextEmbeddingEvent.js";
|
@@ -2,20 +2,6 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.transcribe = void 0;
|
4
4
|
const executeCall_js_1 = require("../executeCall.cjs");
|
5
|
-
/**
|
6
|
-
* Transcribe audio data into text.
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const data = await fs.promises.readFile("data/test.mp3");
|
10
|
-
*
|
11
|
-
* const { transcription } = await transcribe(
|
12
|
-
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
13
|
-
* {
|
14
|
-
* type: "mp3",
|
15
|
-
* data,
|
16
|
-
* }
|
17
|
-
* );
|
18
|
-
*/
|
19
5
|
async function transcribe(model, data, options) {
|
20
6
|
const result = await (0, executeCall_js_1.executeCall)({
|
21
7
|
model,
|
@@ -53,10 +39,12 @@ async function transcribe(model, data, options) {
|
|
53
39
|
transcription: output,
|
54
40
|
}),
|
55
41
|
});
|
56
|
-
return
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
42
|
+
return options?.fullResponse === true
|
43
|
+
? {
|
44
|
+
transcription: result.output,
|
45
|
+
response: result.response,
|
46
|
+
metadata: result.metadata,
|
47
|
+
}
|
48
|
+
: result.output;
|
61
49
|
}
|
62
50
|
exports.transcribe = transcribe;
|
@@ -7,7 +7,7 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
|
|
7
7
|
* @example
|
8
8
|
* const data = await fs.promises.readFile("data/test.mp3");
|
9
9
|
*
|
10
|
-
* const
|
10
|
+
* const transcription = await transcribe(
|
11
11
|
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
12
12
|
* {
|
13
13
|
* type: "mp3",
|
@@ -15,8 +15,13 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
|
|
15
15
|
* }
|
16
16
|
* );
|
17
17
|
*/
|
18
|
-
export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options
|
18
|
+
export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options: FunctionOptions<SETTINGS> & {
|
19
|
+
fullResponse: true;
|
20
|
+
}): Promise<{
|
19
21
|
transcription: string;
|
20
22
|
response: RESPONSE;
|
21
23
|
metadata: CallMetadata<TranscriptionModel<DATA, RESPONSE, SETTINGS>>;
|
22
24
|
}>;
|
25
|
+
export declare function transcribe<DATA, RESPONSE, SETTINGS extends TranscriptionModelSettings>(model: TranscriptionModel<DATA, RESPONSE, SETTINGS>, data: DATA, options?: FunctionOptions<SETTINGS> & {
|
26
|
+
fullResponse?: false;
|
27
|
+
}): Promise<string>;
|
@@ -1,18 +1,4 @@
|
|
1
1
|
import { executeCall } from "../executeCall.js";
|
2
|
-
/**
|
3
|
-
* Transcribe audio data into text.
|
4
|
-
*
|
5
|
-
* @example
|
6
|
-
* const data = await fs.promises.readFile("data/test.mp3");
|
7
|
-
*
|
8
|
-
* const { transcription } = await transcribe(
|
9
|
-
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
10
|
-
* {
|
11
|
-
* type: "mp3",
|
12
|
-
* data,
|
13
|
-
* }
|
14
|
-
* );
|
15
|
-
*/
|
16
2
|
export async function transcribe(model, data, options) {
|
17
3
|
const result = await executeCall({
|
18
4
|
model,
|
@@ -50,9 +36,11 @@ export async function transcribe(model, data, options) {
|
|
50
36
|
transcription: output,
|
51
37
|
}),
|
52
38
|
});
|
53
|
-
return
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
39
|
+
return options?.fullResponse === true
|
40
|
+
? {
|
41
|
+
transcription: result.output,
|
42
|
+
response: result.response,
|
43
|
+
metadata: result.metadata,
|
44
|
+
}
|
45
|
+
: result.output;
|
58
46
|
}
|
@@ -15,8 +15,8 @@ export declare class Automatic1111ImageGenerationModel extends AbstractModel<Aut
|
|
15
15
|
get modelName(): string;
|
16
16
|
callAPI(input: A111ImageGenerationPrompt, options?: FunctionOptions<Automatic1111ImageGenerationModelSettings>): Promise<Automatic1111ImageGenerationResponse>;
|
17
17
|
generateImageResponse(prompt: A111ImageGenerationPrompt, options?: FunctionOptions<Automatic1111ImageGenerationModelSettings>): Promise<{
|
18
|
-
parameters: {};
|
19
18
|
images: string[];
|
19
|
+
parameters: {};
|
20
20
|
info: string;
|
21
21
|
}>;
|
22
22
|
extractBase64Image(response: Automatic1111ImageGenerationResponse): string;
|
@@ -37,12 +37,12 @@ declare const Automatic1111ImageGenerationResponseSchema: z.ZodObject<{
|
|
37
37
|
parameters: z.ZodObject<{}, "strip", z.ZodTypeAny, {}, {}>;
|
38
38
|
info: z.ZodString;
|
39
39
|
}, "strip", z.ZodTypeAny, {
|
40
|
-
parameters: {};
|
41
40
|
images: string[];
|
41
|
+
parameters: {};
|
42
42
|
info: string;
|
43
43
|
}, {
|
44
|
-
parameters: {};
|
45
44
|
images: string[];
|
45
|
+
parameters: {};
|
46
46
|
info: string;
|
47
47
|
}>;
|
48
48
|
export type Automatic1111ImageGenerationResponse = z.infer<typeof Automatic1111ImageGenerationResponseSchema>;
|