modelfusion 0.112.0 → 0.114.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +105 -0
- package/README.md +108 -212
- package/core/FunctionOptions.d.ts +14 -0
- package/core/api/AbstractApiConfiguration.cjs +16 -1
- package/core/api/AbstractApiConfiguration.d.ts +7 -3
- package/core/api/AbstractApiConfiguration.js +16 -1
- package/core/api/ApiConfiguration.d.ts +10 -1
- package/core/api/BaseUrlApiConfiguration.cjs +9 -5
- package/core/api/BaseUrlApiConfiguration.d.ts +7 -7
- package/core/api/BaseUrlApiConfiguration.js +9 -5
- package/core/api/CustomHeaderProvider.cjs +2 -0
- package/core/api/CustomHeaderProvider.d.ts +2 -0
- package/core/api/CustomHeaderProvider.js +1 -0
- package/core/api/index.cjs +1 -0
- package/core/api/index.d.ts +1 -0
- package/core/api/index.js +1 -0
- package/core/cache/Cache.cjs +2 -0
- package/core/cache/Cache.d.ts +12 -0
- package/core/cache/Cache.js +1 -0
- package/core/cache/MemoryCache.cjs +23 -0
- package/core/cache/MemoryCache.d.ts +15 -0
- package/core/cache/MemoryCache.js +19 -0
- package/core/cache/index.cjs +18 -0
- package/core/cache/index.d.ts +2 -0
- package/core/cache/index.js +2 -0
- package/core/index.cjs +1 -0
- package/core/index.d.ts +1 -0
- package/core/index.js +1 -0
- package/core/schema/TypeValidationError.cjs +36 -0
- package/core/schema/TypeValidationError.d.ts +15 -0
- package/core/schema/TypeValidationError.js +32 -0
- package/core/schema/index.cjs +2 -0
- package/core/schema/index.d.ts +2 -0
- package/core/schema/index.js +2 -0
- package/core/schema/parseJSON.cjs +6 -14
- package/core/schema/parseJSON.d.ts +3 -2
- package/core/schema/parseJSON.js +6 -14
- package/core/schema/validateTypes.cjs +65 -0
- package/core/schema/validateTypes.d.ts +34 -0
- package/core/schema/validateTypes.js +60 -0
- package/model-function/embed/EmbeddingModel.d.ts +2 -2
- package/model-function/executeStandardCall.cjs +3 -1
- package/model-function/executeStandardCall.d.ts +2 -2
- package/model-function/executeStandardCall.js +3 -1
- package/model-function/executeStreamCall.cjs +2 -1
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +2 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +2 -2
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +2 -2
- package/model-function/generate-speech/SpeechGenerationModel.d.ts +3 -3
- package/model-function/generate-structure/StructureFromTextPromptTemplate.d.ts +13 -0
- package/model-function/generate-structure/generateStructure.cjs +4 -1
- package/model-function/generate-structure/generateStructure.js +4 -1
- package/model-function/generate-structure/jsonStructurePrompt.cjs +12 -0
- package/model-function/generate-structure/jsonStructurePrompt.d.ts +3 -3
- package/model-function/generate-structure/jsonStructurePrompt.js +12 -0
- package/model-function/generate-structure/streamStructure.cjs +4 -1
- package/model-function/generate-structure/streamStructure.js +4 -1
- package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +11 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.js +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -2
- package/model-function/generate-text/TextGenerationModel.d.ts +16 -3
- package/model-function/generate-text/generateText.cjs +43 -1
- package/model-function/generate-text/generateText.js +43 -1
- package/model-function/generate-transcription/TranscriptionModel.d.ts +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +20 -8
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +27 -5
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +20 -8
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +8 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +3 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +8 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +8 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.js +8 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -8
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +45 -5
- package/model-provider/cohere/CohereTextGenerationModel.js +20 -8
- package/model-provider/cohere/CohereTokenizer.cjs +16 -6
- package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
- package/model-provider/cohere/CohereTokenizer.js +16 -6
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +1 -1
- package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +1 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +8 -3
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +2 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +8 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +8 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +8 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +21 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -4
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +20 -8
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +125 -5
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +20 -8
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +8 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +3 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +8 -3
- package/model-provider/llamacpp/LlamaCppTokenizer.cjs +8 -3
- package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +2 -2
- package/model-provider/llamacpp/LlamaCppTokenizer.js +8 -3
- package/model-provider/lmnt/LmntSpeechModel.cjs +8 -3
- package/model-provider/lmnt/LmntSpeechModel.d.ts +2 -2
- package/model-provider/lmnt/LmntSpeechModel.js +8 -3
- package/model-provider/mistral/MistralChatModel.cjs +20 -8
- package/model-provider/mistral/MistralChatModel.d.ts +55 -5
- package/model-provider/mistral/MistralChatModel.js +20 -8
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +8 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.js +8 -3
- package/model-provider/ollama/OllamaChatModel.cjs +35 -8
- package/model-provider/ollama/OllamaChatModel.d.ts +31 -5
- package/model-provider/ollama/OllamaChatModel.js +35 -8
- package/model-provider/ollama/OllamaCompletionModel.cjs +20 -7
- package/model-provider/ollama/OllamaCompletionModel.d.ts +43 -5
- package/model-provider/ollama/OllamaCompletionModel.js +20 -7
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +8 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +8 -3
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +23 -13
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +94 -7
- package/model-provider/openai/AbstractOpenAIChatModel.js +23 -13
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +21 -9
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +35 -5
- package/model-provider/openai/AbstractOpenAICompletionModel.js +21 -9
- package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +5 -2
- package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +2 -1
- package/model-provider/openai/AzureOpenAIApiConfiguration.js +5 -2
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +12 -6
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +89 -5
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +12 -6
- package/model-provider/openai/OpenAIChatModel.cjs +12 -4
- package/model-provider/openai/OpenAIChatModel.d.ts +3 -2
- package/model-provider/openai/OpenAIChatModel.js +12 -4
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +10 -6
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +4 -4
- package/model-provider/openai/OpenAIImageGenerationModel.js +10 -6
- package/model-provider/openai/OpenAISpeechModel.cjs +9 -4
- package/model-provider/openai/OpenAISpeechModel.d.ts +3 -3
- package/model-provider/openai/OpenAISpeechModel.js +9 -4
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +11 -6
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.js +11 -6
- package/model-provider/openai/OpenAITranscriptionModel.cjs +9 -6
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +4 -4
- package/model-provider/openai/OpenAITranscriptionModel.js +9 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +12 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +3 -2
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +12 -4
- package/model-provider/stability/StabilityImageGenerationModel.cjs +10 -5
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.js +10 -5
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +9 -7
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +9 -7
- package/observability/helicone/HeliconeOpenAIApiConfiguration.cjs +2 -1
- package/observability/helicone/HeliconeOpenAIApiConfiguration.d.ts +3 -1
- package/observability/helicone/HeliconeOpenAIApiConfiguration.js +2 -1
- package/package.json +2 -2
@@ -1,6 +1,6 @@
|
|
1
|
-
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
2
1
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
2
|
import { createAudioMpegResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
3
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
4
4
|
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
5
5
|
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
6
6
|
/**
|
@@ -41,15 +41,20 @@ export class OpenAISpeechModel extends AbstractModel {
|
|
41
41
|
get modelName() {
|
42
42
|
return this.settings.model;
|
43
43
|
}
|
44
|
-
async callAPI(text,
|
44
|
+
async callAPI(text, callOptions) {
|
45
45
|
const api = this.settings.api ?? new OpenAIApiConfiguration();
|
46
|
-
const abortSignal =
|
46
|
+
const abortSignal = callOptions.run?.abortSignal;
|
47
47
|
return callWithRetryAndThrottle({
|
48
48
|
retry: api.retry,
|
49
49
|
throttle: api.throttle,
|
50
50
|
call: async () => postJsonToApi({
|
51
51
|
url: api.assembleUrl(`/audio/speech`),
|
52
|
-
headers: api.headers
|
52
|
+
headers: api.headers({
|
53
|
+
functionType: callOptions.functionType,
|
54
|
+
functionId: callOptions.functionId,
|
55
|
+
run: callOptions.run,
|
56
|
+
callId: callOptions.callId,
|
57
|
+
}),
|
53
58
|
body: {
|
54
59
|
input: text,
|
55
60
|
voice: this.settings.voice,
|
@@ -91,20 +91,25 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
91
91
|
async countTokens(input) {
|
92
92
|
return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
|
93
93
|
}
|
94
|
-
async callAPI(texts,
|
94
|
+
async callAPI(texts, callOptions) {
|
95
95
|
const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
|
96
|
-
const abortSignal =
|
96
|
+
const abortSignal = callOptions.run?.abortSignal;
|
97
97
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
98
98
|
retry: api.retry,
|
99
99
|
throttle: api.throttle,
|
100
100
|
call: async () => (0, postToApi_js_1.postJsonToApi)({
|
101
101
|
url: api.assembleUrl("/embeddings"),
|
102
|
-
headers: api.headers
|
102
|
+
headers: api.headers({
|
103
|
+
functionType: callOptions.functionType,
|
104
|
+
functionId: callOptions.functionId,
|
105
|
+
run: callOptions.run,
|
106
|
+
callId: callOptions.callId,
|
107
|
+
}),
|
103
108
|
body: {
|
104
109
|
model: this.modelName,
|
105
110
|
input: texts,
|
106
111
|
user: this.settings.isUserIdForwardingEnabled
|
107
|
-
?
|
112
|
+
? callOptions.run?.userId
|
108
113
|
: undefined,
|
109
114
|
},
|
110
115
|
failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
|
@@ -116,11 +121,11 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
116
121
|
get settingsForEvent() {
|
117
122
|
return {};
|
118
123
|
}
|
119
|
-
async doEmbedValues(texts,
|
124
|
+
async doEmbedValues(texts, callOptions) {
|
120
125
|
if (texts.length > this.maxValuesPerCall) {
|
121
126
|
throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
122
127
|
}
|
123
|
-
const response = await this.callAPI(texts,
|
128
|
+
const response = await this.callAPI(texts, callOptions);
|
124
129
|
return {
|
125
130
|
response,
|
126
131
|
embeddings: response.data.map((data) => data.embedding),
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { z } from "zod";
|
2
|
-
import {
|
2
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
5
|
import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
|
@@ -47,9 +47,9 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
|
|
47
47
|
readonly tokenizer: TikTokenTokenizer;
|
48
48
|
readonly contextWindowSize: number;
|
49
49
|
countTokens(input: string): Promise<number>;
|
50
|
-
callAPI(texts: Array<string>,
|
50
|
+
callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<OpenAITextEmbeddingResponse>;
|
51
51
|
get settingsForEvent(): Partial<OpenAITextEmbeddingModelSettings>;
|
52
|
-
doEmbedValues(texts: string[],
|
52
|
+
doEmbedValues(texts: string[], callOptions: FunctionCallOptions): Promise<{
|
53
53
|
response: {
|
54
54
|
object: "list";
|
55
55
|
data: {
|
@@ -86,20 +86,25 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
|
|
86
86
|
async countTokens(input) {
|
87
87
|
return countTokens(this.tokenizer, input);
|
88
88
|
}
|
89
|
-
async callAPI(texts,
|
89
|
+
async callAPI(texts, callOptions) {
|
90
90
|
const api = this.settings.api ?? new OpenAIApiConfiguration();
|
91
|
-
const abortSignal =
|
91
|
+
const abortSignal = callOptions.run?.abortSignal;
|
92
92
|
return callWithRetryAndThrottle({
|
93
93
|
retry: api.retry,
|
94
94
|
throttle: api.throttle,
|
95
95
|
call: async () => postJsonToApi({
|
96
96
|
url: api.assembleUrl("/embeddings"),
|
97
|
-
headers: api.headers
|
97
|
+
headers: api.headers({
|
98
|
+
functionType: callOptions.functionType,
|
99
|
+
functionId: callOptions.functionId,
|
100
|
+
run: callOptions.run,
|
101
|
+
callId: callOptions.callId,
|
102
|
+
}),
|
98
103
|
body: {
|
99
104
|
model: this.modelName,
|
100
105
|
input: texts,
|
101
106
|
user: this.settings.isUserIdForwardingEnabled
|
102
|
-
?
|
107
|
+
? callOptions.run?.userId
|
103
108
|
: undefined,
|
104
109
|
},
|
105
110
|
failedResponseHandler: failedOpenAICallResponseHandler,
|
@@ -111,11 +116,11 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
|
|
111
116
|
get settingsForEvent() {
|
112
117
|
return {};
|
113
118
|
}
|
114
|
-
async doEmbedValues(texts,
|
119
|
+
async doEmbedValues(texts, callOptions) {
|
115
120
|
if (texts.length > this.maxValuesPerCall) {
|
116
121
|
throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
117
122
|
}
|
118
|
-
const response = await this.callAPI(texts,
|
123
|
+
const response = await this.callAPI(texts, callOptions);
|
119
124
|
return {
|
120
125
|
response,
|
121
126
|
embeddings: response.data.map((data) => data.embedding),
|
@@ -55,19 +55,17 @@ class OpenAITranscriptionModel extends AbstractModel_js_1.AbstractModel {
|
|
55
55
|
return this.settings.model;
|
56
56
|
}
|
57
57
|
async doTranscribe(data, options) {
|
58
|
-
const response = await this.callAPI(data, {
|
58
|
+
const response = await this.callAPI(data, options, {
|
59
59
|
responseFormat: exports.OpenAITranscriptionResponseFormat.verboseJson,
|
60
|
-
functionId: options?.functionId,
|
61
|
-
run: options?.run,
|
62
60
|
});
|
63
61
|
return {
|
64
62
|
response,
|
65
63
|
transcription: response.text,
|
66
64
|
};
|
67
65
|
}
|
68
|
-
async callAPI(data, options) {
|
66
|
+
async callAPI(data, callOptions, options) {
|
69
67
|
const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
|
70
|
-
const abortSignal =
|
68
|
+
const abortSignal = callOptions?.run?.abortSignal;
|
71
69
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
72
70
|
retry: api.retry,
|
73
71
|
throttle: api.throttle,
|
@@ -90,7 +88,12 @@ class OpenAITranscriptionModel extends AbstractModel_js_1.AbstractModel {
|
|
90
88
|
}
|
91
89
|
return (0, postToApi_js_1.postToApi)({
|
92
90
|
url: api.assembleUrl("/audio/transcriptions"),
|
93
|
-
headers: api.headers
|
91
|
+
headers: api.headers({
|
92
|
+
functionType: callOptions.functionType,
|
93
|
+
functionId: callOptions.functionId,
|
94
|
+
run: callOptions.run,
|
95
|
+
callId: callOptions.callId,
|
96
|
+
}),
|
94
97
|
body: {
|
95
98
|
content: formData,
|
96
99
|
values: {
|
@@ -1,6 +1,6 @@
|
|
1
1
|
/// <reference types="node" />
|
2
2
|
import { z } from "zod";
|
3
|
-
import {
|
3
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
4
4
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
5
5
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
@@ -65,7 +65,7 @@ export declare class OpenAITranscriptionModel extends AbstractModel<OpenAITransc
|
|
65
65
|
constructor(settings: OpenAITranscriptionModelSettings);
|
66
66
|
readonly provider: "openai";
|
67
67
|
get modelName(): "whisper-1";
|
68
|
-
doTranscribe(data: OpenAITranscriptionInput, options
|
68
|
+
doTranscribe(data: OpenAITranscriptionInput, options: FunctionCallOptions): Promise<{
|
69
69
|
response: {
|
70
70
|
text: string;
|
71
71
|
duration: number;
|
@@ -87,9 +87,9 @@ export declare class OpenAITranscriptionModel extends AbstractModel<OpenAITransc
|
|
87
87
|
};
|
88
88
|
transcription: string;
|
89
89
|
}>;
|
90
|
-
callAPI<RESULT>(data: OpenAITranscriptionInput, options: {
|
90
|
+
callAPI<RESULT>(data: OpenAITranscriptionInput, callOptions: FunctionCallOptions, options: {
|
91
91
|
responseFormat: OpenAITranscriptionResponseFormatType<RESULT>;
|
92
|
-
}
|
92
|
+
}): Promise<RESULT>;
|
93
93
|
get settingsForEvent(): Partial<OpenAITranscriptionModelSettings>;
|
94
94
|
withSettings(additionalSettings: OpenAITranscriptionModelSettings): this;
|
95
95
|
}
|
@@ -51,19 +51,17 @@ export class OpenAITranscriptionModel extends AbstractModel {
|
|
51
51
|
return this.settings.model;
|
52
52
|
}
|
53
53
|
async doTranscribe(data, options) {
|
54
|
-
const response = await this.callAPI(data, {
|
54
|
+
const response = await this.callAPI(data, options, {
|
55
55
|
responseFormat: OpenAITranscriptionResponseFormat.verboseJson,
|
56
|
-
functionId: options?.functionId,
|
57
|
-
run: options?.run,
|
58
56
|
});
|
59
57
|
return {
|
60
58
|
response,
|
61
59
|
transcription: response.text,
|
62
60
|
};
|
63
61
|
}
|
64
|
-
async callAPI(data, options) {
|
62
|
+
async callAPI(data, callOptions, options) {
|
65
63
|
const api = this.settings.api ?? new OpenAIApiConfiguration();
|
66
|
-
const abortSignal =
|
64
|
+
const abortSignal = callOptions?.run?.abortSignal;
|
67
65
|
return callWithRetryAndThrottle({
|
68
66
|
retry: api.retry,
|
69
67
|
throttle: api.throttle,
|
@@ -86,7 +84,12 @@ export class OpenAITranscriptionModel extends AbstractModel {
|
|
86
84
|
}
|
87
85
|
return postToApi({
|
88
86
|
url: api.assembleUrl("/audio/transcriptions"),
|
89
|
-
headers: api.headers
|
87
|
+
headers: api.headers({
|
88
|
+
functionType: callOptions.functionType,
|
89
|
+
functionId: callOptions.functionId,
|
90
|
+
run: callOptions.run,
|
91
|
+
callId: callOptions.callId,
|
92
|
+
}),
|
90
93
|
body: {
|
91
94
|
content: formData,
|
92
95
|
values: {
|
@@ -58,10 +58,15 @@ class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpe
|
|
58
58
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
59
59
|
}
|
60
60
|
asStructureGenerationModel(promptTemplate) {
|
61
|
-
return
|
62
|
-
|
63
|
-
|
64
|
-
|
61
|
+
return "adaptModel" in promptTemplate
|
62
|
+
? new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
63
|
+
model: promptTemplate.adaptModel(this),
|
64
|
+
template: promptTemplate,
|
65
|
+
})
|
66
|
+
: new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
67
|
+
model: this,
|
68
|
+
template: promptTemplate,
|
69
|
+
});
|
65
70
|
}
|
66
71
|
/**
|
67
72
|
* Returns this model with a text prompt template.
|
@@ -92,6 +97,9 @@ class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpe
|
|
92
97
|
promptTemplate,
|
93
98
|
});
|
94
99
|
}
|
100
|
+
withJsonOutput() {
|
101
|
+
return this.withSettings({ responseFormat: { type: "json_object" } });
|
102
|
+
}
|
95
103
|
withSettings(additionalSettings) {
|
96
104
|
return new OpenAICompatibleChatModel(Object.assign({}, this.settings, additionalSettings));
|
97
105
|
}
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
1
|
+
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
2
2
|
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
3
3
|
import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
|
4
4
|
import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
@@ -26,7 +26,7 @@ export declare class OpenAICompatibleChatModel extends AbstractOpenAIChatModel<O
|
|
26
26
|
readonly tokenizer: undefined;
|
27
27
|
readonly countPromptTokens: undefined;
|
28
28
|
get settingsForEvent(): Partial<OpenAICompatibleChatSettings>;
|
29
|
-
asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt,
|
29
|
+
asStructureGenerationModel<INPUT_PROMPT, OpenAIChatPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, TextStreamingModel<OpenAIChatPrompt, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>>;
|
30
30
|
/**
|
31
31
|
* Returns this model with a text prompt template.
|
32
32
|
*/
|
@@ -40,5 +40,6 @@ export declare class OpenAICompatibleChatModel extends AbstractOpenAIChatModel<O
|
|
40
40
|
*/
|
41
41
|
withChatPrompt(): PromptTemplateFullTextModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
42
42
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateFullTextModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
43
|
+
withJsonOutput(): this;
|
43
44
|
withSettings(additionalSettings: Partial<OpenAICompatibleChatSettings>): this;
|
44
45
|
}
|
@@ -55,10 +55,15 @@ export class OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
|
|
55
55
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
56
56
|
}
|
57
57
|
asStructureGenerationModel(promptTemplate) {
|
58
|
-
return
|
59
|
-
|
60
|
-
|
61
|
-
|
58
|
+
return "adaptModel" in promptTemplate
|
59
|
+
? new StructureFromTextStreamingModel({
|
60
|
+
model: promptTemplate.adaptModel(this),
|
61
|
+
template: promptTemplate,
|
62
|
+
})
|
63
|
+
: new StructureFromTextStreamingModel({
|
64
|
+
model: this,
|
65
|
+
template: promptTemplate,
|
66
|
+
});
|
62
67
|
}
|
63
68
|
/**
|
64
69
|
* Returns this model with a text prompt template.
|
@@ -89,6 +94,9 @@ export class OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
|
|
89
94
|
promptTemplate,
|
90
95
|
});
|
91
96
|
}
|
97
|
+
withJsonOutput() {
|
98
|
+
return this.withSettings({ responseFormat: { type: "json_object" } });
|
99
|
+
}
|
92
100
|
withSettings(additionalSettings) {
|
93
101
|
return new OpenAICompatibleChatModel(Object.assign({}, this.settings, additionalSettings));
|
94
102
|
}
|
@@ -48,15 +48,20 @@ class StabilityImageGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
48
48
|
get modelName() {
|
49
49
|
return this.settings.model;
|
50
50
|
}
|
51
|
-
async callAPI(input,
|
51
|
+
async callAPI(input, callOptions) {
|
52
52
|
const api = this.settings.api ?? new StabilityApiConfiguration_js_1.StabilityApiConfiguration();
|
53
|
-
const abortSignal =
|
53
|
+
const abortSignal = callOptions.run?.abortSignal;
|
54
54
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
55
55
|
retry: this.settings.api?.retry,
|
56
56
|
throttle: this.settings.api?.throttle,
|
57
57
|
call: async () => (0, postToApi_js_1.postJsonToApi)({
|
58
58
|
url: api.assembleUrl(`/generation/${this.settings.model}/text-to-image`),
|
59
|
-
headers: api.headers
|
59
|
+
headers: api.headers({
|
60
|
+
functionType: callOptions.functionType,
|
61
|
+
functionId: callOptions.functionId,
|
62
|
+
run: callOptions.run,
|
63
|
+
callId: callOptions.callId,
|
64
|
+
}),
|
60
65
|
body: {
|
61
66
|
height: this.settings.height,
|
62
67
|
width: this.settings.width,
|
@@ -88,8 +93,8 @@ class StabilityImageGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
88
93
|
stylePreset: this.settings.stylePreset,
|
89
94
|
};
|
90
95
|
}
|
91
|
-
async doGenerateImages(prompt,
|
92
|
-
const response = await this.callAPI(prompt,
|
96
|
+
async doGenerateImages(prompt, callOptions) {
|
97
|
+
const response = await this.callAPI(prompt, callOptions);
|
93
98
|
return {
|
94
99
|
response,
|
95
100
|
base64Images: response.artifacts.map((artifact) => artifact.base64),
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { z } from "zod";
|
2
|
-
import {
|
2
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
5
|
import { PromptTemplate } from "../../model-function/PromptTemplate.js";
|
@@ -64,9 +64,9 @@ export declare class StabilityImageGenerationModel extends AbstractModel<Stabili
|
|
64
64
|
constructor(settings: StabilityImageGenerationSettings);
|
65
65
|
readonly provider: "stability";
|
66
66
|
get modelName(): StabilityImageGenerationModelType;
|
67
|
-
callAPI(input: StabilityImageGenerationPrompt,
|
67
|
+
callAPI(input: StabilityImageGenerationPrompt, callOptions: FunctionCallOptions): Promise<StabilityImageGenerationResponse>;
|
68
68
|
get settingsForEvent(): Partial<StabilityImageGenerationSettings>;
|
69
|
-
doGenerateImages(prompt: StabilityImageGenerationPrompt,
|
69
|
+
doGenerateImages(prompt: StabilityImageGenerationPrompt, callOptions: FunctionCallOptions): Promise<{
|
70
70
|
response: {
|
71
71
|
artifacts: {
|
72
72
|
base64: string;
|
@@ -45,15 +45,20 @@ export class StabilityImageGenerationModel extends AbstractModel {
|
|
45
45
|
get modelName() {
|
46
46
|
return this.settings.model;
|
47
47
|
}
|
48
|
-
async callAPI(input,
|
48
|
+
async callAPI(input, callOptions) {
|
49
49
|
const api = this.settings.api ?? new StabilityApiConfiguration();
|
50
|
-
const abortSignal =
|
50
|
+
const abortSignal = callOptions.run?.abortSignal;
|
51
51
|
return callWithRetryAndThrottle({
|
52
52
|
retry: this.settings.api?.retry,
|
53
53
|
throttle: this.settings.api?.throttle,
|
54
54
|
call: async () => postJsonToApi({
|
55
55
|
url: api.assembleUrl(`/generation/${this.settings.model}/text-to-image`),
|
56
|
-
headers: api.headers
|
56
|
+
headers: api.headers({
|
57
|
+
functionType: callOptions.functionType,
|
58
|
+
functionId: callOptions.functionId,
|
59
|
+
run: callOptions.run,
|
60
|
+
callId: callOptions.callId,
|
61
|
+
}),
|
57
62
|
body: {
|
58
63
|
height: this.settings.height,
|
59
64
|
width: this.settings.width,
|
@@ -85,8 +90,8 @@ export class StabilityImageGenerationModel extends AbstractModel {
|
|
85
90
|
stylePreset: this.settings.stylePreset,
|
86
91
|
};
|
87
92
|
}
|
88
|
-
async doGenerateImages(prompt,
|
89
|
-
const response = await this.callAPI(prompt,
|
93
|
+
async doGenerateImages(prompt, callOptions) {
|
94
|
+
const response = await this.callAPI(prompt, callOptions);
|
90
95
|
return {
|
91
96
|
response,
|
92
97
|
base64Images: response.artifacts.map((artifact) => artifact.base64),
|
@@ -26,19 +26,16 @@ class WhisperCppTranscriptionModel extends AbstractModel_js_1.AbstractModel {
|
|
26
26
|
});
|
27
27
|
}
|
28
28
|
async doTranscribe(data, options) {
|
29
|
-
const response = await this.callAPI(data,
|
30
|
-
functionId: options?.functionId,
|
31
|
-
run: options?.run,
|
32
|
-
});
|
29
|
+
const response = await this.callAPI(data, options);
|
33
30
|
return {
|
34
31
|
response,
|
35
32
|
transcription: response.text,
|
36
33
|
};
|
37
34
|
}
|
38
|
-
async callAPI(data,
|
35
|
+
async callAPI(data, callOptions) {
|
39
36
|
const { temperature } = this.settings;
|
40
37
|
const api = this.settings.api ?? new WhisperCppApiConfiguration_js_1.WhisperCppApiConfiguration();
|
41
|
-
const abortSignal =
|
38
|
+
const abortSignal = callOptions.run?.abortSignal;
|
42
39
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
43
40
|
retry: api.retry,
|
44
41
|
throttle: api.throttle,
|
@@ -51,7 +48,12 @@ class WhisperCppTranscriptionModel extends AbstractModel_js_1.AbstractModel {
|
|
51
48
|
}
|
52
49
|
return (0, postToApi_js_1.postToApi)({
|
53
50
|
url: api.assembleUrl("/inference"),
|
54
|
-
headers: api.headers
|
51
|
+
headers: api.headers({
|
52
|
+
functionType: callOptions.functionType,
|
53
|
+
functionId: callOptions.functionId,
|
54
|
+
run: callOptions.run,
|
55
|
+
callId: callOptions.callId,
|
56
|
+
}),
|
55
57
|
body: {
|
56
58
|
content: formData,
|
57
59
|
values: { temperature },
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/// <reference types="node" />
|
2
|
-
import {
|
2
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
5
|
import { TranscriptionModel, TranscriptionModelSettings } from "../../model-function/generate-transcription/TranscriptionModel.js";
|
@@ -15,13 +15,13 @@ export declare class WhisperCppTranscriptionModel extends AbstractModel<WhisperC
|
|
15
15
|
constructor(settings: WhisperCppTranscriptionModelSettings);
|
16
16
|
readonly provider: "whispercpp";
|
17
17
|
readonly modelName: null;
|
18
|
-
doTranscribe(data: WhisperCppTranscriptionInput, options
|
18
|
+
doTranscribe(data: WhisperCppTranscriptionInput, options: FunctionCallOptions): Promise<{
|
19
19
|
response: {
|
20
20
|
text: string;
|
21
21
|
};
|
22
22
|
transcription: string;
|
23
23
|
}>;
|
24
|
-
callAPI(data: WhisperCppTranscriptionInput,
|
24
|
+
callAPI(data: WhisperCppTranscriptionInput, callOptions: FunctionCallOptions): Promise<{
|
25
25
|
text: string;
|
26
26
|
}>;
|
27
27
|
get settingsForEvent(): Partial<WhisperCppTranscriptionModelSettings>;
|
@@ -23,19 +23,16 @@ export class WhisperCppTranscriptionModel extends AbstractModel {
|
|
23
23
|
});
|
24
24
|
}
|
25
25
|
async doTranscribe(data, options) {
|
26
|
-
const response = await this.callAPI(data,
|
27
|
-
functionId: options?.functionId,
|
28
|
-
run: options?.run,
|
29
|
-
});
|
26
|
+
const response = await this.callAPI(data, options);
|
30
27
|
return {
|
31
28
|
response,
|
32
29
|
transcription: response.text,
|
33
30
|
};
|
34
31
|
}
|
35
|
-
async callAPI(data,
|
32
|
+
async callAPI(data, callOptions) {
|
36
33
|
const { temperature } = this.settings;
|
37
34
|
const api = this.settings.api ?? new WhisperCppApiConfiguration();
|
38
|
-
const abortSignal =
|
35
|
+
const abortSignal = callOptions.run?.abortSignal;
|
39
36
|
return callWithRetryAndThrottle({
|
40
37
|
retry: api.retry,
|
41
38
|
throttle: api.throttle,
|
@@ -48,7 +45,12 @@ export class WhisperCppTranscriptionModel extends AbstractModel {
|
|
48
45
|
}
|
49
46
|
return postToApi({
|
50
47
|
url: api.assembleUrl("/inference"),
|
51
|
-
headers: api.headers
|
48
|
+
headers: api.headers({
|
49
|
+
functionType: callOptions.functionType,
|
50
|
+
functionId: callOptions.functionId,
|
51
|
+
run: callOptions.run,
|
52
|
+
callId: callOptions.callId,
|
53
|
+
}),
|
52
54
|
body: {
|
53
55
|
content: formData,
|
54
56
|
values: { temperature },
|
@@ -4,7 +4,7 @@ exports.HeliconeOpenAIApiConfiguration = void 0;
|
|
4
4
|
const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
|
5
5
|
const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
|
6
6
|
class HeliconeOpenAIApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfiguration {
|
7
|
-
constructor({ baseUrl = "https://oai.hconeai.com/v1", openAIApiKey, heliconeApiKey, retry, throttle, } = {}) {
|
7
|
+
constructor({ baseUrl = "https://oai.hconeai.com/v1", openAIApiKey, heliconeApiKey, retry, throttle, customCallHeaders, } = {}) {
|
8
8
|
super({
|
9
9
|
baseUrl,
|
10
10
|
headers: {
|
@@ -23,6 +23,7 @@ class HeliconeOpenAIApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUr
|
|
23
23
|
},
|
24
24
|
retry,
|
25
25
|
throttle,
|
26
|
+
customCallHeaders,
|
26
27
|
});
|
27
28
|
}
|
28
29
|
}
|
@@ -1,12 +1,14 @@
|
|
1
1
|
import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
|
2
|
+
import { CustomHeaderProvider } from "../../core/api/CustomHeaderProvider.js";
|
2
3
|
import { RetryFunction } from "../../core/api/RetryFunction.js";
|
3
4
|
import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
|
4
5
|
export declare class HeliconeOpenAIApiConfiguration extends BaseUrlApiConfiguration {
|
5
|
-
constructor({ baseUrl, openAIApiKey, heliconeApiKey, retry, throttle, }?: {
|
6
|
+
constructor({ baseUrl, openAIApiKey, heliconeApiKey, retry, throttle, customCallHeaders, }?: {
|
6
7
|
baseUrl?: string;
|
7
8
|
openAIApiKey?: string;
|
8
9
|
heliconeApiKey?: string;
|
9
10
|
retry?: RetryFunction;
|
10
11
|
throttle?: ThrottleFunction;
|
12
|
+
customCallHeaders?: CustomHeaderProvider;
|
11
13
|
});
|
12
14
|
}
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
|
2
2
|
import { loadApiKey } from "../../core/api/loadApiKey.js";
|
3
3
|
export class HeliconeOpenAIApiConfiguration extends BaseUrlApiConfiguration {
|
4
|
-
constructor({ baseUrl = "https://oai.hconeai.com/v1", openAIApiKey, heliconeApiKey, retry, throttle, } = {}) {
|
4
|
+
constructor({ baseUrl = "https://oai.hconeai.com/v1", openAIApiKey, heliconeApiKey, retry, throttle, customCallHeaders, } = {}) {
|
5
5
|
super({
|
6
6
|
baseUrl,
|
7
7
|
headers: {
|
@@ -20,6 +20,7 @@ export class HeliconeOpenAIApiConfiguration extends BaseUrlApiConfiguration {
|
|
20
20
|
},
|
21
21
|
retry,
|
22
22
|
throttle,
|
23
|
+
customCallHeaders,
|
23
24
|
});
|
24
25
|
}
|
25
26
|
}
|
package/package.json
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
{
|
2
2
|
"name": "modelfusion",
|
3
|
-
"description": "The TypeScript library for building
|
4
|
-
"version": "0.
|
3
|
+
"description": "The TypeScript library for building AI applications.",
|
4
|
+
"version": "0.114.0",
|
5
5
|
"author": "Lars Grammel",
|
6
6
|
"license": "MIT",
|
7
7
|
"keywords": [
|