modelfusion 0.112.0 → 0.114.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +105 -0
- package/README.md +108 -212
- package/core/FunctionOptions.d.ts +14 -0
- package/core/api/AbstractApiConfiguration.cjs +16 -1
- package/core/api/AbstractApiConfiguration.d.ts +7 -3
- package/core/api/AbstractApiConfiguration.js +16 -1
- package/core/api/ApiConfiguration.d.ts +10 -1
- package/core/api/BaseUrlApiConfiguration.cjs +9 -5
- package/core/api/BaseUrlApiConfiguration.d.ts +7 -7
- package/core/api/BaseUrlApiConfiguration.js +9 -5
- package/core/api/CustomHeaderProvider.cjs +2 -0
- package/core/api/CustomHeaderProvider.d.ts +2 -0
- package/core/api/CustomHeaderProvider.js +1 -0
- package/core/api/index.cjs +1 -0
- package/core/api/index.d.ts +1 -0
- package/core/api/index.js +1 -0
- package/core/cache/Cache.cjs +2 -0
- package/core/cache/Cache.d.ts +12 -0
- package/core/cache/Cache.js +1 -0
- package/core/cache/MemoryCache.cjs +23 -0
- package/core/cache/MemoryCache.d.ts +15 -0
- package/core/cache/MemoryCache.js +19 -0
- package/core/cache/index.cjs +18 -0
- package/core/cache/index.d.ts +2 -0
- package/core/cache/index.js +2 -0
- package/core/index.cjs +1 -0
- package/core/index.d.ts +1 -0
- package/core/index.js +1 -0
- package/core/schema/TypeValidationError.cjs +36 -0
- package/core/schema/TypeValidationError.d.ts +15 -0
- package/core/schema/TypeValidationError.js +32 -0
- package/core/schema/index.cjs +2 -0
- package/core/schema/index.d.ts +2 -0
- package/core/schema/index.js +2 -0
- package/core/schema/parseJSON.cjs +6 -14
- package/core/schema/parseJSON.d.ts +3 -2
- package/core/schema/parseJSON.js +6 -14
- package/core/schema/validateTypes.cjs +65 -0
- package/core/schema/validateTypes.d.ts +34 -0
- package/core/schema/validateTypes.js +60 -0
- package/model-function/embed/EmbeddingModel.d.ts +2 -2
- package/model-function/executeStandardCall.cjs +3 -1
- package/model-function/executeStandardCall.d.ts +2 -2
- package/model-function/executeStandardCall.js +3 -1
- package/model-function/executeStreamCall.cjs +2 -1
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +2 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +2 -2
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +2 -2
- package/model-function/generate-speech/SpeechGenerationModel.d.ts +3 -3
- package/model-function/generate-structure/StructureFromTextPromptTemplate.d.ts +13 -0
- package/model-function/generate-structure/generateStructure.cjs +4 -1
- package/model-function/generate-structure/generateStructure.js +4 -1
- package/model-function/generate-structure/jsonStructurePrompt.cjs +12 -0
- package/model-function/generate-structure/jsonStructurePrompt.d.ts +3 -3
- package/model-function/generate-structure/jsonStructurePrompt.js +12 -0
- package/model-function/generate-structure/streamStructure.cjs +4 -1
- package/model-function/generate-structure/streamStructure.js +4 -1
- package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +11 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.js +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -2
- package/model-function/generate-text/TextGenerationModel.d.ts +16 -3
- package/model-function/generate-text/generateText.cjs +43 -1
- package/model-function/generate-text/generateText.js +43 -1
- package/model-function/generate-transcription/TranscriptionModel.d.ts +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +20 -8
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +27 -5
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +20 -8
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +8 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +3 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +8 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +8 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.js +8 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -8
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +45 -5
- package/model-provider/cohere/CohereTextGenerationModel.js +20 -8
- package/model-provider/cohere/CohereTokenizer.cjs +16 -6
- package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
- package/model-provider/cohere/CohereTokenizer.js +16 -6
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +1 -1
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +1 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +8 -3
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +2 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +8 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +8 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +8 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +21 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -4
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +20 -8
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +125 -5
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +20 -8
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +8 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +3 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +8 -3
- package/model-provider/llamacpp/LlamaCppTokenizer.cjs +8 -3
- package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +2 -2
- package/model-provider/llamacpp/LlamaCppTokenizer.js +8 -3
- package/model-provider/lmnt/LmntSpeechModel.cjs +8 -3
- package/model-provider/lmnt/LmntSpeechModel.d.ts +2 -2
- package/model-provider/lmnt/LmntSpeechModel.js +8 -3
- package/model-provider/mistral/MistralChatModel.cjs +20 -8
- package/model-provider/mistral/MistralChatModel.d.ts +55 -5
- package/model-provider/mistral/MistralChatModel.js +20 -8
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +8 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.js +8 -3
- package/model-provider/ollama/OllamaChatModel.cjs +35 -8
- package/model-provider/ollama/OllamaChatModel.d.ts +31 -5
- package/model-provider/ollama/OllamaChatModel.js +35 -8
- package/model-provider/ollama/OllamaCompletionModel.cjs +20 -7
- package/model-provider/ollama/OllamaCompletionModel.d.ts +43 -5
- package/model-provider/ollama/OllamaCompletionModel.js +20 -7
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +8 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +8 -3
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +23 -13
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +94 -7
- package/model-provider/openai/AbstractOpenAIChatModel.js +23 -13
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +21 -9
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +35 -5
- package/model-provider/openai/AbstractOpenAICompletionModel.js +21 -9
- package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +5 -2
- package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +2 -1
- package/model-provider/openai/AzureOpenAIApiConfiguration.js +5 -2
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +12 -6
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +89 -5
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +12 -6
- package/model-provider/openai/OpenAIChatModel.cjs +12 -4
- package/model-provider/openai/OpenAIChatModel.d.ts +3 -2
- package/model-provider/openai/OpenAIChatModel.js +12 -4
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +10 -6
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +4 -4
- package/model-provider/openai/OpenAIImageGenerationModel.js +10 -6
- package/model-provider/openai/OpenAISpeechModel.cjs +9 -4
- package/model-provider/openai/OpenAISpeechModel.d.ts +3 -3
- package/model-provider/openai/OpenAISpeechModel.js +9 -4
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +11 -6
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.js +11 -6
- package/model-provider/openai/OpenAITranscriptionModel.cjs +9 -6
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +4 -4
- package/model-provider/openai/OpenAITranscriptionModel.js +9 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +12 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +3 -2
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +12 -4
- package/model-provider/stability/StabilityImageGenerationModel.cjs +10 -5
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.js +10 -5
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +9 -7
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +9 -7
- package/observability/helicone/HeliconeOpenAIApiConfiguration.cjs +2 -1
- package/observability/helicone/HeliconeOpenAIApiConfiguration.d.ts +3 -1
- package/observability/helicone/HeliconeOpenAIApiConfiguration.js +2 -1
- package/package.json +2 -2
@@ -43,11 +43,13 @@ async function executeStandardCall({ model, options, input, functionType, genera
|
|
43
43
|
...startMetadata,
|
44
44
|
});
|
45
45
|
const result = await (0, runSafe_js_1.runSafe)(() => generateResponse({
|
46
|
+
functionType: startMetadata.functionType,
|
46
47
|
functionId: options?.functionId,
|
48
|
+
callId: startMetadata.callId,
|
47
49
|
logging: options?.logging,
|
48
50
|
observers: options?.observers,
|
51
|
+
cache: options?.cache,
|
49
52
|
run,
|
50
|
-
parentCallId: startMetadata.callId,
|
51
53
|
}));
|
52
54
|
const finishMetadata = {
|
53
55
|
eventType: "finished",
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { FunctionOptions } from "../core/FunctionOptions.js";
|
1
|
+
import { FunctionCallOptions, FunctionOptions } from "../core/FunctionOptions.js";
|
2
2
|
import { Model, ModelSettings } from "./Model.js";
|
3
3
|
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
4
4
|
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
@@ -7,7 +7,7 @@ export declare function executeStandardCall<VALUE, MODEL extends Model<ModelSett
|
|
7
7
|
options?: FunctionOptions;
|
8
8
|
input: unknown;
|
9
9
|
functionType: ModelCallStartedEvent["functionType"];
|
10
|
-
generateResponse: (options
|
10
|
+
generateResponse: (options: FunctionCallOptions) => PromiseLike<{
|
11
11
|
response: unknown;
|
12
12
|
extractedValue: VALUE;
|
13
13
|
usage?: unknown;
|
@@ -40,11 +40,13 @@ export async function executeStandardCall({ model, options, input, functionType,
|
|
40
40
|
...startMetadata,
|
41
41
|
});
|
42
42
|
const result = await runSafe(() => generateResponse({
|
43
|
+
functionType: startMetadata.functionType,
|
43
44
|
functionId: options?.functionId,
|
45
|
+
callId: startMetadata.callId,
|
44
46
|
logging: options?.logging,
|
45
47
|
observers: options?.observers,
|
48
|
+
cache: options?.cache,
|
46
49
|
run,
|
47
|
-
parentCallId: startMetadata.callId,
|
48
50
|
}));
|
49
51
|
const finishMetadata = {
|
50
52
|
eventType: "finished",
|
@@ -45,11 +45,12 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
45
45
|
});
|
46
46
|
const result = await (0, runSafe_js_1.runSafe)(async () => {
|
47
47
|
const deltaIterable = await startStream({
|
48
|
+
functionType: startMetadata.functionType,
|
48
49
|
functionId: options?.functionId,
|
50
|
+
callId: startMetadata.callId,
|
49
51
|
logging: options?.logging,
|
50
52
|
observers: options?.observers,
|
51
53
|
run,
|
52
|
-
parentCallId: startMetadata.callId,
|
53
54
|
});
|
54
55
|
// Return a queue that can be iterated over several times:
|
55
56
|
const responseQueue = new AsyncQueue_js_1.AsyncQueue();
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { FunctionOptions } from "../core/FunctionOptions.js";
|
1
|
+
import { FunctionCallOptions, FunctionOptions } from "../core/FunctionOptions.js";
|
2
2
|
import { Delta } from "./Delta.js";
|
3
3
|
import { Model, ModelSettings } from "./Model.js";
|
4
4
|
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
@@ -8,7 +8,7 @@ export declare function executeStreamCall<DELTA_VALUE, VALUE, MODEL extends Mode
|
|
8
8
|
options?: FunctionOptions;
|
9
9
|
input: unknown;
|
10
10
|
functionType: ModelCallStartedEvent["functionType"];
|
11
|
-
startStream: (options
|
11
|
+
startStream: (options: FunctionCallOptions) => PromiseLike<AsyncIterable<Delta<DELTA_VALUE>>>;
|
12
12
|
processDelta: (delta: Delta<DELTA_VALUE> & {
|
13
13
|
type: "delta";
|
14
14
|
}) => VALUE | undefined;
|
@@ -42,11 +42,12 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
42
42
|
});
|
43
43
|
const result = await runSafe(async () => {
|
44
44
|
const deltaIterable = await startStream({
|
45
|
+
functionType: startMetadata.functionType,
|
45
46
|
functionId: options?.functionId,
|
47
|
+
callId: startMetadata.callId,
|
46
48
|
logging: options?.logging,
|
47
49
|
observers: options?.observers,
|
48
50
|
run,
|
49
|
-
parentCallId: startMetadata.callId,
|
50
51
|
});
|
51
52
|
// Return a queue that can be iterated over several times:
|
52
53
|
const responseQueue = new AsyncQueue();
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { Model, ModelSettings } from "../Model.js";
|
3
3
|
import { PromptTemplate } from "../PromptTemplate.js";
|
4
4
|
export interface ImageGenerationModelSettings extends ModelSettings {
|
@@ -14,7 +14,7 @@ export interface ImageGenerationModelSettings extends ModelSettings {
|
|
14
14
|
numberOfGenerations?: number;
|
15
15
|
}
|
16
16
|
export interface ImageGenerationModel<PROMPT, SETTINGS extends ImageGenerationModelSettings = ImageGenerationModelSettings> extends Model<SETTINGS> {
|
17
|
-
doGenerateImages(prompt: PROMPT, options
|
17
|
+
doGenerateImages(prompt: PROMPT, options: FunctionCallOptions): PromiseLike<{
|
18
18
|
response: unknown;
|
19
19
|
base64Images: string[];
|
20
20
|
}>;
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { PromptTemplate } from "../PromptTemplate.js";
|
3
3
|
import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGenerationModel.js";
|
4
4
|
export declare class PromptTemplateImageGenerationModel<PROMPT, MODEL_PROMPT, SETTINGS extends ImageGenerationModelSettings, MODEL extends ImageGenerationModel<MODEL_PROMPT, SETTINGS>> implements ImageGenerationModel<PROMPT, SETTINGS> {
|
@@ -10,7 +10,7 @@ export declare class PromptTemplateImageGenerationModel<PROMPT, MODEL_PROMPT, SE
|
|
10
10
|
});
|
11
11
|
get modelInformation(): import("../ModelInformation.js").ModelInformation;
|
12
12
|
get settings(): SETTINGS;
|
13
|
-
doGenerateImages(prompt: PROMPT, options
|
13
|
+
doGenerateImages(prompt: PROMPT, options: FunctionCallOptions): PromiseLike<{
|
14
14
|
response: unknown;
|
15
15
|
base64Images: string[];
|
16
16
|
}>;
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/// <reference types="node" />
|
2
|
-
import {
|
2
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
3
3
|
import { Delta } from "../Delta.js";
|
4
4
|
import { Model, ModelSettings } from "../Model.js";
|
5
5
|
export interface SpeechGenerationModelSettings extends ModelSettings {
|
@@ -8,8 +8,8 @@ export interface SpeechGenerationModel<SETTINGS extends SpeechGenerationModelSet
|
|
8
8
|
/**
|
9
9
|
* Generates an mp3 audio buffer that contains the speech for the given text.
|
10
10
|
*/
|
11
|
-
doGenerateSpeechStandard(text: string, options
|
11
|
+
doGenerateSpeechStandard(text: string, options: FunctionCallOptions): PromiseLike<Buffer>;
|
12
12
|
}
|
13
13
|
export interface StreamingSpeechGenerationModel<SETTINGS extends SpeechGenerationModelSettings = SpeechGenerationModelSettings> extends SpeechGenerationModel<SETTINGS> {
|
14
|
-
doGenerateSpeechStreamDuplex(textStream: AsyncIterable<string>, options
|
14
|
+
doGenerateSpeechStreamDuplex(textStream: AsyncIterable<string>, options: FunctionCallOptions): PromiseLike<AsyncIterable<Delta<Buffer>>>;
|
15
15
|
}
|
@@ -1,6 +1,19 @@
|
|
1
1
|
import { JsonSchemaProducer } from "../../core/schema/JsonSchemaProducer.js";
|
2
2
|
import { Schema } from "../../core/schema/Schema.js";
|
3
|
+
import { TextStreamingModel } from "../generate-text/TextGenerationModel.js";
|
4
|
+
import { ChatPrompt } from "../generate-text/prompt-template/ChatPrompt.js";
|
5
|
+
import { InstructionPrompt } from "../generate-text/prompt-template/InstructionPrompt.js";
|
3
6
|
export type StructureFromTextPromptTemplate<SOURCE_PROMPT, TARGET_PROMPT> = {
|
4
7
|
createPrompt: (prompt: SOURCE_PROMPT, schema: Schema<unknown> & JsonSchemaProducer) => TARGET_PROMPT;
|
5
8
|
extractStructure: (response: string) => unknown;
|
6
9
|
};
|
10
|
+
export type FlexibleStructureFromTextPromptTemplate<SOURCE_PROMPT, INTERMEDIATE_PROMPT> = {
|
11
|
+
createPrompt: (prompt: SOURCE_PROMPT, schema: Schema<unknown> & JsonSchemaProducer) => INTERMEDIATE_PROMPT;
|
12
|
+
extractStructure: (response: string) => unknown;
|
13
|
+
adaptModel: (model: TextStreamingModel<never> & {
|
14
|
+
withTextPrompt(): TextStreamingModel<string>;
|
15
|
+
withInstructionPrompt(): TextStreamingModel<InstructionPrompt>;
|
16
|
+
withChatPrompt(): TextStreamingModel<ChatPrompt>;
|
17
|
+
withJsonOutput?: () => typeof model;
|
18
|
+
}) => TextStreamingModel<INTERMEDIATE_PROMPT>;
|
19
|
+
};
|
@@ -10,7 +10,10 @@ async function generateStructure(model, schema, prompt, options) {
|
|
10
10
|
: prompt;
|
11
11
|
const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
|
12
12
|
functionType: "generate-structure",
|
13
|
-
input:
|
13
|
+
input: {
|
14
|
+
schema,
|
15
|
+
prompt: expandedPrompt,
|
16
|
+
},
|
14
17
|
model,
|
15
18
|
options,
|
16
19
|
generateResponse: async (options) => {
|
@@ -7,7 +7,10 @@ export async function generateStructure(model, schema, prompt, options) {
|
|
7
7
|
: prompt;
|
8
8
|
const fullResponse = await executeStandardCall({
|
9
9
|
functionType: "generate-structure",
|
10
|
-
input:
|
10
|
+
input: {
|
11
|
+
schema,
|
12
|
+
prompt: expandedPrompt,
|
13
|
+
},
|
11
14
|
model,
|
12
15
|
options,
|
13
16
|
generateResponse: async (options) => {
|
@@ -15,6 +15,12 @@ exports.jsonStructurePrompt = {
|
|
15
15
|
instruction: prompt,
|
16
16
|
}),
|
17
17
|
extractStructure,
|
18
|
+
adaptModel: (model) => {
|
19
|
+
if (model.withJsonOutput != null) {
|
20
|
+
model = model.withJsonOutput();
|
21
|
+
}
|
22
|
+
return model.withInstructionPrompt();
|
23
|
+
},
|
18
24
|
};
|
19
25
|
},
|
20
26
|
instruction({ schemaPrefix, schemaSuffix, } = {}) {
|
@@ -29,6 +35,12 @@ exports.jsonStructurePrompt = {
|
|
29
35
|
instruction: prompt.instruction,
|
30
36
|
}),
|
31
37
|
extractStructure,
|
38
|
+
adaptModel: (model) => {
|
39
|
+
if (model.withJsonOutput != null) {
|
40
|
+
model = model.withJsonOutput();
|
41
|
+
}
|
42
|
+
return model.withInstructionPrompt();
|
43
|
+
},
|
32
44
|
};
|
33
45
|
},
|
34
46
|
};
|
@@ -1,15 +1,15 @@
|
|
1
1
|
import { JsonSchemaProducer } from "../../core/schema/JsonSchemaProducer.js";
|
2
2
|
import { Schema } from "../../core/schema/Schema.js";
|
3
3
|
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
|
-
import { StructureFromTextPromptTemplate } from "./StructureFromTextPromptTemplate.js";
|
4
|
+
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "./StructureFromTextPromptTemplate.js";
|
5
5
|
export declare const jsonStructurePrompt: {
|
6
6
|
custom<SOURCE_PROMPT, TARGET_PROMPT>(createPrompt: (prompt: SOURCE_PROMPT, schema: Schema<unknown> & JsonSchemaProducer) => TARGET_PROMPT): StructureFromTextPromptTemplate<SOURCE_PROMPT, TARGET_PROMPT>;
|
7
7
|
text({ schemaPrefix, schemaSuffix, }?: {
|
8
8
|
schemaPrefix?: string | undefined;
|
9
9
|
schemaSuffix?: string | undefined;
|
10
|
-
}):
|
10
|
+
}): FlexibleStructureFromTextPromptTemplate<string, InstructionPrompt>;
|
11
11
|
instruction({ schemaPrefix, schemaSuffix, }?: {
|
12
12
|
schemaPrefix?: string | undefined;
|
13
13
|
schemaSuffix?: string | undefined;
|
14
|
-
}):
|
14
|
+
}): FlexibleStructureFromTextPromptTemplate<InstructionPrompt, InstructionPrompt>;
|
15
15
|
};
|
@@ -12,6 +12,12 @@ export const jsonStructurePrompt = {
|
|
12
12
|
instruction: prompt,
|
13
13
|
}),
|
14
14
|
extractStructure,
|
15
|
+
adaptModel: (model) => {
|
16
|
+
if (model.withJsonOutput != null) {
|
17
|
+
model = model.withJsonOutput();
|
18
|
+
}
|
19
|
+
return model.withInstructionPrompt();
|
20
|
+
},
|
15
21
|
};
|
16
22
|
},
|
17
23
|
instruction({ schemaPrefix, schemaSuffix, } = {}) {
|
@@ -26,6 +32,12 @@ export const jsonStructurePrompt = {
|
|
26
32
|
instruction: prompt.instruction,
|
27
33
|
}),
|
28
34
|
extractStructure,
|
35
|
+
adaptModel: (model) => {
|
36
|
+
if (model.withJsonOutput != null) {
|
37
|
+
model = model.withJsonOutput();
|
38
|
+
}
|
39
|
+
return model.withInstructionPrompt();
|
40
|
+
},
|
29
41
|
};
|
30
42
|
},
|
31
43
|
};
|
@@ -12,7 +12,10 @@ async function streamStructure(model, schema, prompt, options) {
|
|
12
12
|
let lastStructure;
|
13
13
|
const fullResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
|
14
14
|
functionType: "stream-structure",
|
15
|
-
input:
|
15
|
+
input: {
|
16
|
+
schema,
|
17
|
+
prompt: expandedPrompt,
|
18
|
+
},
|
16
19
|
model,
|
17
20
|
options,
|
18
21
|
startStream: async (options) => model.doStreamStructure(schema, expandedPrompt, options),
|
@@ -9,7 +9,10 @@ export async function streamStructure(model, schema, prompt, options) {
|
|
9
9
|
let lastStructure;
|
10
10
|
const fullResponse = await executeStreamCall({
|
11
11
|
functionType: "stream-structure",
|
12
|
-
input:
|
12
|
+
input: {
|
13
|
+
schema,
|
14
|
+
prompt: expandedPrompt,
|
15
|
+
},
|
13
16
|
model,
|
14
17
|
options,
|
15
18
|
startStream: async (options) => model.doStreamStructure(schema, expandedPrompt, options),
|
@@ -44,6 +44,9 @@ class PromptTemplateTextGenerationModel {
|
|
44
44
|
const mappedPrompt = this.promptTemplate.format(prompt);
|
45
45
|
return this.model.doGenerateTexts(mappedPrompt, options);
|
46
46
|
}
|
47
|
+
restoreGeneratedTexts(rawResponse) {
|
48
|
+
return this.model.restoreGeneratedTexts(rawResponse);
|
49
|
+
}
|
47
50
|
get settingsForEvent() {
|
48
51
|
return this.model.settingsForEvent;
|
49
52
|
}
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { TextGenerationToolCallModel, ToolCallPromptTemplate } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
3
3
|
import { TextGenerationToolCallsModel } from "../../tool/generate-tool-calls/TextGenerationToolCallsModel.js";
|
4
4
|
import { ToolCallsPromptTemplate } from "../../tool/generate-tool-calls/ToolCallsPromptTemplate.js";
|
@@ -18,7 +18,7 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
|
|
18
18
|
get tokenizer(): MODEL["tokenizer"];
|
19
19
|
get contextWindowSize(): MODEL["contextWindowSize"];
|
20
20
|
get countPromptTokens(): MODEL["countPromptTokens"] extends undefined ? undefined : (prompt: PROMPT) => PromiseLike<number>;
|
21
|
-
doGenerateTexts(prompt: PROMPT, options?:
|
21
|
+
doGenerateTexts(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<{
|
22
22
|
response: unknown;
|
23
23
|
textGenerationResults: import("./TextGenerationResult.js").TextGenerationResult[];
|
24
24
|
usage?: {
|
@@ -27,6 +27,15 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
|
|
27
27
|
totalTokens: number;
|
28
28
|
} | undefined;
|
29
29
|
}>;
|
30
|
+
restoreGeneratedTexts(rawResponse: unknown): {
|
31
|
+
response: unknown;
|
32
|
+
textGenerationResults: import("./TextGenerationResult.js").TextGenerationResult[];
|
33
|
+
usage?: {
|
34
|
+
promptTokens: number;
|
35
|
+
completionTokens: number;
|
36
|
+
totalTokens: number;
|
37
|
+
} | undefined;
|
38
|
+
};
|
30
39
|
get settingsForEvent(): Partial<SETTINGS>;
|
31
40
|
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationToolCallModel<INPUT_PROMPT, PROMPT, this>;
|
32
41
|
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationToolCallsModel<INPUT_PROMPT, PROMPT, this>;
|
@@ -41,6 +41,9 @@ export class PromptTemplateTextGenerationModel {
|
|
41
41
|
const mappedPrompt = this.promptTemplate.format(prompt);
|
42
42
|
return this.model.doGenerateTexts(mappedPrompt, options);
|
43
43
|
}
|
44
|
+
restoreGeneratedTexts(rawResponse) {
|
45
|
+
return this.model.restoreGeneratedTexts(rawResponse);
|
46
|
+
}
|
44
47
|
get settingsForEvent() {
|
45
48
|
return this.model.settingsForEvent;
|
46
49
|
}
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { StructureFromTextPromptTemplate } from "../generate-structure/StructureFromTextPromptTemplate.js";
|
3
3
|
import { StructureFromTextStreamingModel } from "../generate-structure/StructureFromTextStreamingModel.js";
|
4
4
|
import { PromptTemplateTextGenerationModel } from "./PromptTemplateTextGenerationModel.js";
|
@@ -9,7 +9,7 @@ export declare class PromptTemplateTextStreamingModel<PROMPT, MODEL_PROMPT, SETT
|
|
9
9
|
model: MODEL;
|
10
10
|
promptTemplate: TextGenerationPromptTemplate<PROMPT, MODEL_PROMPT>;
|
11
11
|
});
|
12
|
-
doStreamText(prompt: PROMPT, options?:
|
12
|
+
doStreamText(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<AsyncIterable<import("../Delta.js").Delta<unknown>>>;
|
13
13
|
extractTextDelta(delta: unknown): string | undefined;
|
14
14
|
asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextStreamingModel<INPUT_PROMPT, PROMPT, this>;
|
15
15
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextStreamingModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { Delta } from "../Delta.js";
|
3
3
|
import { Model, ModelSettings } from "../Model.js";
|
4
4
|
import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
|
@@ -62,7 +62,7 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
|
|
62
62
|
* Optional. Implement if you have a tokenizer and want to count the number of tokens in a prompt.
|
63
63
|
*/
|
64
64
|
readonly countPromptTokens: ((prompt: PROMPT) => PromiseLike<number>) | undefined;
|
65
|
-
doGenerateTexts(prompt: PROMPT, options?:
|
65
|
+
doGenerateTexts(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<{
|
66
66
|
response: unknown;
|
67
67
|
textGenerationResults: TextGenerationResult[];
|
68
68
|
usage?: {
|
@@ -71,10 +71,23 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
|
|
71
71
|
totalTokens: number;
|
72
72
|
};
|
73
73
|
}>;
|
74
|
+
restoreGeneratedTexts(rawResponse: unknown): {
|
75
|
+
response: unknown;
|
76
|
+
textGenerationResults: TextGenerationResult[];
|
77
|
+
usage?: {
|
78
|
+
promptTokens: number;
|
79
|
+
completionTokens: number;
|
80
|
+
totalTokens: number;
|
81
|
+
};
|
82
|
+
};
|
74
83
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
|
84
|
+
/**
|
85
|
+
* Optional. When available, forces the model to return JSON as the text output.
|
86
|
+
*/
|
87
|
+
withJsonOutput?(): this;
|
75
88
|
}
|
76
89
|
export interface TextStreamingModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
|
77
|
-
doStreamText(prompt: PROMPT, options?:
|
90
|
+
doStreamText(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
|
78
91
|
extractTextDelta(delta: unknown): string | undefined;
|
79
92
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextStreamingModel<INPUT_PROMPT, SETTINGS>;
|
80
93
|
}
|
@@ -9,7 +9,48 @@ async function generateText(model, prompt, options) {
|
|
9
9
|
model,
|
10
10
|
options,
|
11
11
|
generateResponse: async (options) => {
|
12
|
-
|
12
|
+
async function getGeneratedTexts() {
|
13
|
+
if (options?.cache == null) {
|
14
|
+
return {
|
15
|
+
...(await model.doGenerateTexts(prompt, options)),
|
16
|
+
cache: undefined,
|
17
|
+
};
|
18
|
+
}
|
19
|
+
let cacheErrors = undefined;
|
20
|
+
const cacheKey = {
|
21
|
+
functionType: "generate-text",
|
22
|
+
functionId: options?.functionId,
|
23
|
+
input: {
|
24
|
+
model,
|
25
|
+
settings: model.settingsForEvent, // TODO should include full model information
|
26
|
+
prompt,
|
27
|
+
},
|
28
|
+
};
|
29
|
+
try {
|
30
|
+
const cachedRawResponse = await options.cache.lookupValue(cacheKey);
|
31
|
+
if (cachedRawResponse != null) {
|
32
|
+
return {
|
33
|
+
...model.restoreGeneratedTexts(cachedRawResponse),
|
34
|
+
cache: { status: "hit" },
|
35
|
+
};
|
36
|
+
}
|
37
|
+
}
|
38
|
+
catch (err) {
|
39
|
+
cacheErrors = [err];
|
40
|
+
}
|
41
|
+
const result = await model.doGenerateTexts(prompt, options);
|
42
|
+
try {
|
43
|
+
await options.cache.storeValue(cacheKey, result.response);
|
44
|
+
}
|
45
|
+
catch (err) {
|
46
|
+
cacheErrors = [...(cacheErrors ?? []), err];
|
47
|
+
}
|
48
|
+
return {
|
49
|
+
...result,
|
50
|
+
cache: { status: "miss", errors: cacheErrors },
|
51
|
+
};
|
52
|
+
}
|
53
|
+
const result = await getGeneratedTexts();
|
13
54
|
const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
|
14
55
|
const textGenerationResults = shouldTrimWhitespace
|
15
56
|
? result.textGenerationResults.map((textGeneration) => ({
|
@@ -17,6 +58,7 @@ async function generateText(model, prompt, options) {
|
|
17
58
|
finishReason: textGeneration.finishReason,
|
18
59
|
}))
|
19
60
|
: result.textGenerationResults;
|
61
|
+
// TODO add cache information
|
20
62
|
return {
|
21
63
|
response: result.response,
|
22
64
|
extractedValue: textGenerationResults,
|
@@ -6,7 +6,48 @@ export async function generateText(model, prompt, options) {
|
|
6
6
|
model,
|
7
7
|
options,
|
8
8
|
generateResponse: async (options) => {
|
9
|
-
|
9
|
+
async function getGeneratedTexts() {
|
10
|
+
if (options?.cache == null) {
|
11
|
+
return {
|
12
|
+
...(await model.doGenerateTexts(prompt, options)),
|
13
|
+
cache: undefined,
|
14
|
+
};
|
15
|
+
}
|
16
|
+
let cacheErrors = undefined;
|
17
|
+
const cacheKey = {
|
18
|
+
functionType: "generate-text",
|
19
|
+
functionId: options?.functionId,
|
20
|
+
input: {
|
21
|
+
model,
|
22
|
+
settings: model.settingsForEvent, // TODO should include full model information
|
23
|
+
prompt,
|
24
|
+
},
|
25
|
+
};
|
26
|
+
try {
|
27
|
+
const cachedRawResponse = await options.cache.lookupValue(cacheKey);
|
28
|
+
if (cachedRawResponse != null) {
|
29
|
+
return {
|
30
|
+
...model.restoreGeneratedTexts(cachedRawResponse),
|
31
|
+
cache: { status: "hit" },
|
32
|
+
};
|
33
|
+
}
|
34
|
+
}
|
35
|
+
catch (err) {
|
36
|
+
cacheErrors = [err];
|
37
|
+
}
|
38
|
+
const result = await model.doGenerateTexts(prompt, options);
|
39
|
+
try {
|
40
|
+
await options.cache.storeValue(cacheKey, result.response);
|
41
|
+
}
|
42
|
+
catch (err) {
|
43
|
+
cacheErrors = [...(cacheErrors ?? []), err];
|
44
|
+
}
|
45
|
+
return {
|
46
|
+
...result,
|
47
|
+
cache: { status: "miss", errors: cacheErrors },
|
48
|
+
};
|
49
|
+
}
|
50
|
+
const result = await getGeneratedTexts();
|
10
51
|
const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
|
11
52
|
const textGenerationResults = shouldTrimWhitespace
|
12
53
|
? result.textGenerationResults.map((textGeneration) => ({
|
@@ -14,6 +55,7 @@ export async function generateText(model, prompt, options) {
|
|
14
55
|
finishReason: textGeneration.finishReason,
|
15
56
|
}))
|
16
57
|
: result.textGenerationResults;
|
58
|
+
// TODO add cache information
|
17
59
|
return {
|
18
60
|
response: result.response,
|
19
61
|
extractedValue: textGenerationResults,
|
@@ -1,9 +1,9 @@
|
|
1
|
-
import {
|
1
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { Model, ModelSettings } from "../Model.js";
|
3
3
|
export interface TranscriptionModelSettings extends ModelSettings {
|
4
4
|
}
|
5
5
|
export interface TranscriptionModel<DATA, SETTINGS extends TranscriptionModelSettings = TranscriptionModelSettings> extends Model<SETTINGS> {
|
6
|
-
doTranscribe: (data: DATA, options
|
6
|
+
doTranscribe: (data: DATA, options: FunctionCallOptions) => PromiseLike<{
|
7
7
|
response: unknown;
|
8
8
|
transcription: string;
|
9
9
|
}>;
|
@@ -6,6 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
|
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
7
|
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
8
8
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
|
+
const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
|
9
10
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
11
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
11
12
|
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
@@ -69,17 +70,22 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
69
70
|
get modelName() {
|
70
71
|
return this.settings.model;
|
71
72
|
}
|
72
|
-
async callAPI(prompt, options) {
|
73
|
+
async callAPI(prompt, callOptions, options) {
|
73
74
|
const api = this.settings.api ?? new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration();
|
74
75
|
const responseFormat = options.responseFormat;
|
75
|
-
const abortSignal =
|
76
|
+
const abortSignal = callOptions.run?.abortSignal;
|
76
77
|
const userId = this.settings.userId;
|
77
78
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
78
79
|
retry: api.retry,
|
79
80
|
throttle: api.throttle,
|
80
81
|
call: async () => (0, postToApi_js_1.postJsonToApi)({
|
81
82
|
url: api.assembleUrl(`/complete`),
|
82
|
-
headers: api.headers
|
83
|
+
headers: api.headers({
|
84
|
+
functionType: callOptions.functionType,
|
85
|
+
functionId: callOptions.functionId,
|
86
|
+
run: callOptions.run,
|
87
|
+
callId: callOptions.callId,
|
88
|
+
}),
|
83
89
|
body: {
|
84
90
|
model: this.settings.model,
|
85
91
|
prompt,
|
@@ -108,10 +114,17 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
108
114
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
109
115
|
}
|
110
116
|
async doGenerateTexts(prompt, options) {
|
111
|
-
|
112
|
-
...options,
|
117
|
+
return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
|
113
118
|
responseFormat: exports.AnthropicTextGenerationResponseFormat.json,
|
114
|
-
});
|
119
|
+
}));
|
120
|
+
}
|
121
|
+
restoreGeneratedTexts(rawResponse) {
|
122
|
+
return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
|
123
|
+
structure: rawResponse,
|
124
|
+
schema: (0, ZodSchema_js_1.zodSchema)(anthropicTextGenerationResponseSchema),
|
125
|
+
}));
|
126
|
+
}
|
127
|
+
processTextGenerationResponse(response) {
|
115
128
|
return {
|
116
129
|
response,
|
117
130
|
textGenerationResults: [
|
@@ -133,8 +146,7 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
133
146
|
}
|
134
147
|
}
|
135
148
|
doStreamText(prompt, options) {
|
136
|
-
return this.callAPI(prompt, {
|
137
|
-
...options,
|
149
|
+
return this.callAPI(prompt, options, {
|
138
150
|
responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
|
139
151
|
});
|
140
152
|
}
|