modelfusion 0.92.1 → 0.93.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +19 -19
- package/model-function/{PromptFormat.d.ts → PromptTemplate.d.ts} +2 -2
- package/model-function/generate-image/ImageGenerationModel.d.ts +2 -2
- package/model-function/generate-image/{PromptFormatImageGenerationModel.cjs → PromptTemplateImageGenerationModel.cjs} +11 -11
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +20 -0
- package/model-function/generate-image/{PromptFormatImageGenerationModel.js → PromptTemplateImageGenerationModel.js} +9 -9
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +6 -6
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +4 -4
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +6 -6
- package/model-function/generate-structure/{StructureFromTextPromptFormat.d.ts → StructureFromTextPromptTemplate.d.ts} +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +4 -4
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +2 -2
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +4 -4
- package/model-function/generate-structure/index.cjs +1 -1
- package/model-function/generate-structure/index.d.ts +1 -1
- package/model-function/generate-structure/index.js +1 -1
- package/model-function/generate-structure/jsonStructurePrompt.d.ts +2 -2
- package/model-function/generate-text/{PromptFormatTextGenerationModel.cjs → PromptTemplateTextGenerationModel.cjs} +21 -21
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +35 -0
- package/model-function/generate-text/{PromptFormatTextGenerationModel.js → PromptTemplateTextGenerationModel.js} +19 -19
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +38 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +16 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +34 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +3 -3
- package/model-function/generate-text/TextGenerationPromptTemplate.d.ts +11 -0
- package/model-function/generate-text/index.cjs +4 -4
- package/model-function/generate-text/index.d.ts +4 -4
- package/model-function/generate-text/index.js +4 -4
- package/model-function/generate-text/{prompt-format/AlpacaPromptFormat.cjs → prompt-template/AlpacaPromptTemplate.cjs} +5 -2
- package/model-function/generate-text/{prompt-format/AlpacaPromptFormat.d.ts → prompt-template/AlpacaPromptTemplate.d.ts} +5 -5
- package/model-function/generate-text/{prompt-format/AlpacaPromptFormat.js → prompt-template/AlpacaPromptTemplate.js} +5 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.cjs +31 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.js +29 -0
- package/model-function/generate-text/{prompt-format/ChatMLPromptFormat.cjs → prompt-template/ChatMLPromptTemplate.cjs} +5 -5
- package/model-function/generate-text/{prompt-format/ChatMLPromptFormat.d.ts → prompt-template/ChatMLPromptTemplate.d.ts} +7 -7
- package/model-function/generate-text/{prompt-format/ChatMLPromptFormat.js → prompt-template/ChatMLPromptTemplate.js} +5 -5
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +49 -0
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +47 -0
- package/model-function/generate-text/{prompt-format → prompt-template}/ChatPrompt.d.ts +1 -1
- package/model-function/generate-text/prompt-template/Content.js +1 -0
- package/model-function/generate-text/{prompt-format → prompt-template}/InstructionPrompt.d.ts +7 -0
- package/model-function/generate-text/prompt-template/InstructionPrompt.js +1 -0
- package/model-function/generate-text/{prompt-format/Llama2PromptFormat.cjs → prompt-template/Llama2PromptTemplate.cjs} +8 -7
- package/model-function/generate-text/{prompt-format/Llama2PromptFormat.d.ts → prompt-template/Llama2PromptTemplate.d.ts} +7 -6
- package/model-function/generate-text/{prompt-format/Llama2PromptFormat.js → prompt-template/Llama2PromptTemplate.js} +8 -7
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +49 -0
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.d.ts +1 -0
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +47 -0
- package/model-function/generate-text/{prompt-format/TextPromptFormat.cjs → prompt-template/TextPromptTemplate.cjs} +3 -0
- package/model-function/generate-text/{prompt-format/TextPromptFormat.d.ts → prompt-template/TextPromptTemplate.d.ts} +4 -4
- package/model-function/generate-text/{prompt-format/TextPromptFormat.js → prompt-template/TextPromptTemplate.js} +3 -0
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +49 -0
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.d.ts +1 -0
- package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +47 -0
- package/model-function/generate-text/{prompt-format/VicunaPromptFormat.d.ts → prompt-template/VicunaPromptTemplate.d.ts} +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +21 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.d.ts +1 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +19 -0
- package/model-function/generate-text/{prompt-format → prompt-template}/index.cjs +6 -6
- package/model-function/generate-text/prompt-template/index.d.ts +10 -0
- package/model-function/generate-text/prompt-template/index.js +10 -0
- package/model-function/index.cjs +2 -2
- package/model-function/index.d.ts +2 -2
- package/model-function/index.js +2 -2
- package/model-provider/anthropic/{AnthropicPromptFormat.cjs → AnthropicPromptTemplate.cjs} +15 -8
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +17 -0
- package/model-provider/anthropic/{AnthropicPromptFormat.js → AnthropicPromptTemplate.js} +15 -8
- package/model-provider/anthropic/AnthropicPromptTemplate.test.cjs +49 -0
- package/model-provider/anthropic/AnthropicPromptTemplate.test.d.ts +1 -0
- package/model-provider/anthropic/AnthropicPromptTemplate.test.js +47 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +12 -12
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +9 -9
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +12 -12
- package/model-provider/anthropic/index.cjs +2 -2
- package/model-provider/anthropic/index.d.ts +1 -1
- package/model-provider/anthropic/index.js +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +5 -5
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +4 -4
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +5 -5
- package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.d.ts +2 -2
- package/model-provider/cohere/CohereTextGenerationModel.cjs +10 -10
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +7 -7
- package/model-provider/cohere/CohereTextGenerationModel.js +10 -10
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +4 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +4 -4
- package/model-provider/llamacpp/{LlamaCppBakLLaVA1Format.cjs → LlamaCppBakLLaVA1PromptTemplate.cjs} +1 -1
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +11 -0
- package/model-provider/llamacpp/{LlamaCppBakLLaVA1Format.js → LlamaCppBakLLaVA1PromptTemplate.js} +1 -1
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +12 -12
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +7 -7
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +12 -12
- package/model-provider/llamacpp/index.cjs +2 -2
- package/model-provider/llamacpp/index.d.ts +1 -1
- package/model-provider/llamacpp/index.js +1 -1
- package/model-provider/ollama/OllamaTextGenerationModel.cjs +9 -9
- package/model-provider/ollama/OllamaTextGenerationModel.d.ts +7 -7
- package/model-provider/ollama/OllamaTextGenerationModel.js +9 -9
- package/model-provider/openai/OpenAICompletionModel.cjs +10 -10
- package/model-provider/openai/OpenAICompletionModel.d.ts +7 -7
- package/model-provider/openai/OpenAICompletionModel.js +10 -10
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +4 -4
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +3 -3
- package/model-provider/openai/OpenAIImageGenerationModel.js +4 -4
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.cjs +16 -16
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +14 -14
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.js +16 -16
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +15 -15
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +12 -12
- package/model-provider/openai/chat/OpenAIChatModel.js +15 -15
- package/model-provider/openai/chat/{OpenAIChatPromptFormat.cjs → OpenAIChatPromptTemplate.cjs} +1 -1
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +20 -0
- package/model-provider/openai/chat/{OpenAIChatPromptFormat.js → OpenAIChatPromptTemplate.js} +1 -1
- package/model-provider/openai/index.cjs +2 -2
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +14 -14
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +11 -11
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +14 -14
- package/model-provider/stability/StabilityImageGenerationModel.cjs +5 -5
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +4 -4
- package/model-provider/stability/StabilityImageGenerationModel.js +5 -5
- package/model-provider/stability/StabilityImageGenerationPrompt.d.ts +2 -2
- package/package.json +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +2 -2
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +6 -6
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.d.ts +4 -4
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +6 -6
- package/model-function/generate-image/PromptFormatImageGenerationModel.d.ts +0 -20
- package/model-function/generate-text/PromptFormatTextGenerationModel.d.ts +0 -35
- package/model-function/generate-text/PromptFormatTextStreamingModel.cjs +0 -38
- package/model-function/generate-text/PromptFormatTextStreamingModel.d.ts +0 -16
- package/model-function/generate-text/PromptFormatTextStreamingModel.js +0 -34
- package/model-function/generate-text/TextGenerationPromptFormat.d.ts +0 -11
- package/model-function/generate-text/prompt-format/index.d.ts +0 -10
- package/model-function/generate-text/prompt-format/index.js +0 -10
- package/model-provider/anthropic/AnthropicPromptFormat.d.ts +0 -17
- package/model-provider/llamacpp/LlamaCppBakLLaVA1Format.d.ts +0 -11
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +0 -20
- /package/model-function/{PromptFormat.cjs → PromptTemplate.cjs} +0 -0
- /package/model-function/{PromptFormat.js → PromptTemplate.js} +0 -0
- /package/model-function/generate-structure/{StructureFromTextPromptFormat.cjs → StructureFromTextPromptTemplate.cjs} +0 -0
- /package/model-function/generate-structure/{StructureFromTextPromptFormat.js → StructureFromTextPromptTemplate.js} +0 -0
- /package/model-function/generate-text/{TextGenerationPromptFormat.cjs → TextGenerationPromptTemplate.cjs} +0 -0
- /package/model-function/generate-text/{TextGenerationPromptFormat.js → TextGenerationPromptTemplate.js} +0 -0
- /package/model-function/generate-text/{prompt-format/Content.js → prompt-template/AlpacaPromptTemplate.test.d.ts} +0 -0
- /package/model-function/generate-text/{prompt-format/InstructionPrompt.js → prompt-template/ChatMLPromptTemplate.test.d.ts} +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/ChatPrompt.cjs +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/ChatPrompt.js +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/Content.cjs +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/Content.d.ts +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/InstructionPrompt.cjs +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/InvalidPromptError.cjs +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/InvalidPromptError.d.ts +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/InvalidPromptError.js +0 -0
- /package/model-function/generate-text/{prompt-format/VicunaPromptFormat.cjs → prompt-template/VicunaPromptTemplate.cjs} +0 -0
- /package/model-function/generate-text/{prompt-format/VicunaPromptFormat.js → prompt-template/VicunaPromptTemplate.js} +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/trimChatPrompt.cjs +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/trimChatPrompt.d.ts +0 -0
- /package/model-function/generate-text/{prompt-format → prompt-template}/trimChatPrompt.js +0 -0
@@ -2,7 +2,7 @@ import { z } from "zod";
|
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
-
import {
|
5
|
+
import { PromptTemplateTextGenerationModel } from "../../model-function/generate-text/PromptTemplateTextGenerationModel.js";
|
6
6
|
import { HuggingFaceApiConfiguration } from "./HuggingFaceApiConfiguration.js";
|
7
7
|
import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
|
8
8
|
/**
|
@@ -92,10 +92,10 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
|
|
92
92
|
text: response[0].generated_text,
|
93
93
|
};
|
94
94
|
}
|
95
|
-
|
96
|
-
return new
|
95
|
+
withPromptTemplate(promptTemplate) {
|
96
|
+
return new PromptTemplateTextGenerationModel({
|
97
97
|
model: this,
|
98
|
-
|
98
|
+
promptTemplate,
|
99
99
|
});
|
100
100
|
}
|
101
101
|
withSettings(additionalSettings) {
|
package/model-provider/llamacpp/{LlamaCppBakLLaVA1Format.cjs → LlamaCppBakLLaVA1PromptTemplate.cjs}
RENAMED
@@ -1,7 +1,7 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = void 0;
|
4
|
-
const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-
|
4
|
+
const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
|
5
5
|
// default Vicuna 1 system message
|
6
6
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
7
7
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
@@ -0,0 +1,11 @@
|
|
1
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
+
import { MultiModalChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
|
+
import { MultiModalInstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
|
+
import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
|
5
|
+
/**
|
6
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
7
|
+
*
|
8
|
+
* @see https://github.com/SkunkworksAI/BakLLaVA
|
9
|
+
*/
|
10
|
+
export declare function instruction(): TextGenerationPromptTemplate<MultiModalInstructionPrompt, LlamaCppTextGenerationPrompt>;
|
11
|
+
export declare function chat(): TextGenerationPromptTemplate<MultiModalChatPrompt, LlamaCppTextGenerationPrompt>;
|
package/model-provider/llamacpp/{LlamaCppBakLLaVA1Format.js → LlamaCppBakLLaVA1PromptTemplate.js}
RENAMED
@@ -1,4 +1,4 @@
|
|
1
|
-
import { validateChatPrompt, } from "../../model-function/generate-text/prompt-
|
1
|
+
import { validateChatPrompt, } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
2
2
|
// default Vicuna 1 system message
|
3
3
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
4
4
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
@@ -6,7 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
|
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
7
|
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
8
8
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
9
|
-
const
|
9
|
+
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
10
10
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
11
11
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
12
12
|
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
@@ -101,7 +101,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
101
101
|
});
|
102
102
|
}
|
103
103
|
withTextPrompt() {
|
104
|
-
return this.
|
104
|
+
return this.withPromptTemplate({
|
105
105
|
format(prompt) {
|
106
106
|
return { text: prompt };
|
107
107
|
},
|
@@ -109,31 +109,31 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
109
109
|
});
|
110
110
|
}
|
111
111
|
/**
|
112
|
-
* Maps the prompt for a text version of the Llama.cpp prompt
|
112
|
+
* Maps the prompt for a text version of the Llama.cpp prompt template (without image support).
|
113
113
|
*/
|
114
|
-
|
115
|
-
return new
|
114
|
+
withTextPromptTemplate(promptTemplate) {
|
115
|
+
return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
|
116
116
|
model: this.withTextPrompt().withSettings({
|
117
117
|
stopSequences: [
|
118
118
|
...(this.settings.stopSequences ?? []),
|
119
|
-
...
|
119
|
+
...promptTemplate.stopSequences,
|
120
120
|
],
|
121
121
|
}),
|
122
|
-
|
122
|
+
promptTemplate,
|
123
123
|
});
|
124
124
|
}
|
125
125
|
/**
|
126
|
-
* Maps the prompt for the full Llama.cpp prompt
|
126
|
+
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
127
127
|
*/
|
128
|
-
|
129
|
-
return new
|
128
|
+
withPromptTemplate(promptTemplate) {
|
129
|
+
return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
|
130
130
|
model: this.withSettings({
|
131
131
|
stopSequences: [
|
132
132
|
...(this.settings.stopSequences ?? []),
|
133
|
-
...
|
133
|
+
...promptTemplate.stopSequences,
|
134
134
|
],
|
135
135
|
}),
|
136
|
-
|
136
|
+
promptTemplate,
|
137
137
|
});
|
138
138
|
}
|
139
139
|
withSettings(additionalSettings) {
|
@@ -4,9 +4,9 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { Delta } from "../../model-function/Delta.js";
|
7
|
-
import {
|
7
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
8
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
|
-
import {
|
9
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
10
10
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
11
11
|
export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
|
12
12
|
api?: ApiConfiguration;
|
@@ -110,15 +110,15 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
110
110
|
};
|
111
111
|
}>;
|
112
112
|
doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
113
|
-
withTextPrompt():
|
113
|
+
withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
114
114
|
/**
|
115
|
-
* Maps the prompt for a text version of the Llama.cpp prompt
|
115
|
+
* Maps the prompt for a text version of the Llama.cpp prompt template (without image support).
|
116
116
|
*/
|
117
|
-
|
117
|
+
withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
|
118
118
|
/**
|
119
|
-
* Maps the prompt for the full Llama.cpp prompt
|
119
|
+
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
120
120
|
*/
|
121
|
-
|
121
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
122
122
|
withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
123
123
|
}
|
124
124
|
declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
|
@@ -3,7 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
|
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
4
|
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
-
import {
|
6
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
7
7
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
8
8
|
import { parseJSON } from "../../core/schema/parseJSON.js";
|
9
9
|
import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
|
@@ -98,7 +98,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
98
98
|
});
|
99
99
|
}
|
100
100
|
withTextPrompt() {
|
101
|
-
return this.
|
101
|
+
return this.withPromptTemplate({
|
102
102
|
format(prompt) {
|
103
103
|
return { text: prompt };
|
104
104
|
},
|
@@ -106,31 +106,31 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
106
106
|
});
|
107
107
|
}
|
108
108
|
/**
|
109
|
-
* Maps the prompt for a text version of the Llama.cpp prompt
|
109
|
+
* Maps the prompt for a text version of the Llama.cpp prompt template (without image support).
|
110
110
|
*/
|
111
|
-
|
112
|
-
return new
|
111
|
+
withTextPromptTemplate(promptTemplate) {
|
112
|
+
return new PromptTemplateTextStreamingModel({
|
113
113
|
model: this.withTextPrompt().withSettings({
|
114
114
|
stopSequences: [
|
115
115
|
...(this.settings.stopSequences ?? []),
|
116
|
-
...
|
116
|
+
...promptTemplate.stopSequences,
|
117
117
|
],
|
118
118
|
}),
|
119
|
-
|
119
|
+
promptTemplate,
|
120
120
|
});
|
121
121
|
}
|
122
122
|
/**
|
123
|
-
* Maps the prompt for the full Llama.cpp prompt
|
123
|
+
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
124
124
|
*/
|
125
|
-
|
126
|
-
return new
|
125
|
+
withPromptTemplate(promptTemplate) {
|
126
|
+
return new PromptTemplateTextStreamingModel({
|
127
127
|
model: this.withSettings({
|
128
128
|
stopSequences: [
|
129
129
|
...(this.settings.stopSequences ?? []),
|
130
|
-
...
|
130
|
+
...promptTemplate.stopSequences,
|
131
131
|
],
|
132
132
|
}),
|
133
|
-
|
133
|
+
promptTemplate,
|
134
134
|
});
|
135
135
|
}
|
136
136
|
withSettings(additionalSettings) {
|
@@ -26,9 +26,9 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
26
26
|
return result;
|
27
27
|
};
|
28
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
29
|
-
exports.llamacpp = exports.LlamaCppError = exports.
|
29
|
+
exports.llamacpp = exports.LlamaCppError = exports.LlamaCppBakLLaVA1Prompt = void 0;
|
30
30
|
__exportStar(require("./LlamaCppApiConfiguration.cjs"), exports);
|
31
|
-
exports.
|
31
|
+
exports.LlamaCppBakLLaVA1Prompt = __importStar(require("./LlamaCppBakLLaVA1PromptTemplate.cjs"));
|
32
32
|
var LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
33
33
|
Object.defineProperty(exports, "LlamaCppError", { enumerable: true, get: function () { return LlamaCppError_js_1.LlamaCppError; } });
|
34
34
|
exports.llamacpp = __importStar(require("./LlamaCppFacade.cjs"));
|
@@ -1,5 +1,5 @@
|
|
1
1
|
export * from "./LlamaCppApiConfiguration.js";
|
2
|
-
export * as
|
2
|
+
export * as LlamaCppBakLLaVA1Prompt from "./LlamaCppBakLLaVA1PromptTemplate.js";
|
3
3
|
export { LlamaCppError, LlamaCppErrorData } from "./LlamaCppError.js";
|
4
4
|
export * as llamacpp from "./LlamaCppFacade.js";
|
5
5
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
@@ -1,5 +1,5 @@
|
|
1
1
|
export * from "./LlamaCppApiConfiguration.js";
|
2
|
-
export * as
|
2
|
+
export * as LlamaCppBakLLaVA1Prompt from "./LlamaCppBakLLaVA1PromptTemplate.js";
|
3
3
|
export { LlamaCppError } from "./LlamaCppError.js";
|
4
4
|
export * as llamacpp from "./LlamaCppFacade.js";
|
5
5
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
@@ -8,7 +8,7 @@ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
|
8
8
|
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
9
9
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
10
10
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
11
|
-
const
|
11
|
+
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
12
12
|
const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
|
13
13
|
const TextGenerationToolCallsOrGenerateTextModel_js_1 = require("../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs");
|
14
14
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
@@ -98,27 +98,27 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
98
98
|
responseFormat: exports.OllamaTextGenerationResponseFormat.deltaIterable,
|
99
99
|
});
|
100
100
|
}
|
101
|
-
asToolCallGenerationModel(
|
101
|
+
asToolCallGenerationModel(promptTemplate) {
|
102
102
|
return new TextGenerationToolCallModel_js_1.TextGenerationToolCallModel({
|
103
103
|
model: this,
|
104
|
-
format:
|
104
|
+
format: promptTemplate,
|
105
105
|
});
|
106
106
|
}
|
107
|
-
asToolCallsOrTextGenerationModel(
|
107
|
+
asToolCallsOrTextGenerationModel(promptTemplate) {
|
108
108
|
return new TextGenerationToolCallsOrGenerateTextModel_js_1.TextGenerationToolCallsOrGenerateTextModel({
|
109
109
|
model: this,
|
110
|
-
|
110
|
+
template: promptTemplate,
|
111
111
|
});
|
112
112
|
}
|
113
|
-
|
114
|
-
return new
|
113
|
+
withPromptTemplate(promptTemplate) {
|
114
|
+
return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
|
115
115
|
model: this.withSettings({
|
116
116
|
stopSequences: [
|
117
117
|
...(this.settings.stopSequences ?? []),
|
118
|
-
...
|
118
|
+
...promptTemplate.stopSequences,
|
119
119
|
],
|
120
120
|
}),
|
121
|
-
|
121
|
+
promptTemplate,
|
122
122
|
});
|
123
123
|
}
|
124
124
|
withSettings(additionalSettings) {
|
@@ -4,11 +4,11 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { Delta } from "../../model-function/Delta.js";
|
7
|
-
import {
|
7
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
8
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
|
-
import {
|
10
|
-
import { TextGenerationToolCallModel,
|
11
|
-
import { TextGenerationToolCallsOrGenerateTextModel,
|
9
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
10
|
+
import { TextGenerationToolCallModel, ToolCallPromptTemplate } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
11
|
+
import { TextGenerationToolCallsOrGenerateTextModel, ToolCallsOrGenerateTextPromptTemplate } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
|
12
12
|
/**
|
13
13
|
* @see https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
|
14
14
|
*/
|
@@ -137,9 +137,9 @@ export declare class OllamaTextGenerationModel<CONTEXT_WINDOW_SIZE extends numbe
|
|
137
137
|
text: string;
|
138
138
|
}>;
|
139
139
|
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
140
|
-
asToolCallGenerationModel<INPUT_PROMPT>(
|
141
|
-
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(
|
142
|
-
|
140
|
+
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, string>): TextGenerationToolCallModel<INPUT_PROMPT, string, this>;
|
141
|
+
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT, string>): TextGenerationToolCallsOrGenerateTextModel<INPUT_PROMPT, string, this>;
|
142
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
143
143
|
withSettings(additionalSettings: Partial<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
144
144
|
}
|
145
145
|
declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
|
@@ -5,7 +5,7 @@ import { postJsonToApi } from "../../core/api/postToApi.js";
|
|
5
5
|
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
6
6
|
import { safeParseJSON } from "../../core/schema/parseJSON.js";
|
7
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
|
-
import {
|
8
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
9
|
import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
10
10
|
import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
|
11
11
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
@@ -95,27 +95,27 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
95
95
|
responseFormat: OllamaTextGenerationResponseFormat.deltaIterable,
|
96
96
|
});
|
97
97
|
}
|
98
|
-
asToolCallGenerationModel(
|
98
|
+
asToolCallGenerationModel(promptTemplate) {
|
99
99
|
return new TextGenerationToolCallModel({
|
100
100
|
model: this,
|
101
|
-
format:
|
101
|
+
format: promptTemplate,
|
102
102
|
});
|
103
103
|
}
|
104
|
-
asToolCallsOrTextGenerationModel(
|
104
|
+
asToolCallsOrTextGenerationModel(promptTemplate) {
|
105
105
|
return new TextGenerationToolCallsOrGenerateTextModel({
|
106
106
|
model: this,
|
107
|
-
|
107
|
+
template: promptTemplate,
|
108
108
|
});
|
109
109
|
}
|
110
|
-
|
111
|
-
return new
|
110
|
+
withPromptTemplate(promptTemplate) {
|
111
|
+
return new PromptTemplateTextStreamingModel({
|
112
112
|
model: this.withSettings({
|
113
113
|
stopSequences: [
|
114
114
|
...(this.settings.stopSequences ?? []),
|
115
|
-
...
|
115
|
+
...promptTemplate.stopSequences,
|
116
116
|
],
|
117
117
|
}),
|
118
|
-
|
118
|
+
promptTemplate,
|
119
119
|
});
|
120
120
|
}
|
121
121
|
withSettings(additionalSettings) {
|
@@ -7,8 +7,8 @@ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
|
7
7
|
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
8
8
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
|
-
const
|
11
|
-
const
|
10
|
+
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
11
|
+
const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
|
12
12
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
13
13
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
14
14
|
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
@@ -239,26 +239,26 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
239
239
|
});
|
240
240
|
}
|
241
241
|
/**
|
242
|
-
* Returns this model with an instruction prompt
|
242
|
+
* Returns this model with an instruction prompt template.
|
243
243
|
*/
|
244
244
|
withInstructionPrompt() {
|
245
|
-
return this.
|
245
|
+
return this.withPromptTemplate((0, TextPromptTemplate_js_1.instruction)());
|
246
246
|
}
|
247
247
|
/**
|
248
|
-
* Returns this model with a chat prompt
|
248
|
+
* Returns this model with a chat prompt template.
|
249
249
|
*/
|
250
250
|
withChatPrompt(options) {
|
251
|
-
return this.
|
251
|
+
return this.withPromptTemplate((0, TextPromptTemplate_js_1.chat)(options));
|
252
252
|
}
|
253
|
-
|
254
|
-
return new
|
253
|
+
withPromptTemplate(promptTemplate) {
|
254
|
+
return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
|
255
255
|
model: this.withSettings({
|
256
256
|
stopSequences: [
|
257
257
|
...(this.settings.stopSequences ?? []),
|
258
|
-
...
|
258
|
+
...promptTemplate.stopSequences,
|
259
259
|
],
|
260
260
|
}),
|
261
|
-
|
261
|
+
promptTemplate,
|
262
262
|
});
|
263
263
|
}
|
264
264
|
withSettings(additionalSettings) {
|
@@ -4,9 +4,9 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { Delta } from "../../model-function/Delta.js";
|
7
|
-
import {
|
7
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
8
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
|
-
import {
|
9
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
10
10
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
11
11
|
/**
|
12
12
|
* @see https://platform.openai.com/docs/models/
|
@@ -174,17 +174,17 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
|
|
174
174
|
}>;
|
175
175
|
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
176
176
|
/**
|
177
|
-
* Returns this model with an instruction prompt
|
177
|
+
* Returns this model with an instruction prompt template.
|
178
178
|
*/
|
179
|
-
withInstructionPrompt():
|
179
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, string, OpenAICompletionModelSettings, this>;
|
180
180
|
/**
|
181
|
-
* Returns this model with a chat prompt
|
181
|
+
* Returns this model with a chat prompt template.
|
182
182
|
*/
|
183
183
|
withChatPrompt(options?: {
|
184
184
|
user?: string;
|
185
185
|
assistant?: string;
|
186
|
-
}):
|
187
|
-
|
186
|
+
}): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, string, OpenAICompletionModelSettings, this>;
|
187
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
|
188
188
|
withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
|
189
189
|
}
|
190
190
|
declare const OpenAICompletionResponseSchema: z.ZodObject<{
|
@@ -4,8 +4,8 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
|
|
4
4
|
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
5
5
|
import { parseJSON } from "../../core/schema/parseJSON.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
|
-
import {
|
8
|
-
import { chat, instruction, } from "../../model-function/generate-text/prompt-
|
7
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
|
+
import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
9
9
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
10
10
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
11
11
|
import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
|
@@ -233,26 +233,26 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
233
233
|
});
|
234
234
|
}
|
235
235
|
/**
|
236
|
-
* Returns this model with an instruction prompt
|
236
|
+
* Returns this model with an instruction prompt template.
|
237
237
|
*/
|
238
238
|
withInstructionPrompt() {
|
239
|
-
return this.
|
239
|
+
return this.withPromptTemplate(instruction());
|
240
240
|
}
|
241
241
|
/**
|
242
|
-
* Returns this model with a chat prompt
|
242
|
+
* Returns this model with a chat prompt template.
|
243
243
|
*/
|
244
244
|
withChatPrompt(options) {
|
245
|
-
return this.
|
245
|
+
return this.withPromptTemplate(chat(options));
|
246
246
|
}
|
247
|
-
|
248
|
-
return new
|
247
|
+
withPromptTemplate(promptTemplate) {
|
248
|
+
return new PromptTemplateTextStreamingModel({
|
249
249
|
model: this.withSettings({
|
250
250
|
stopSequences: [
|
251
251
|
...(this.settings.stopSequences ?? []),
|
252
|
-
...
|
252
|
+
...promptTemplate.stopSequences,
|
253
253
|
],
|
254
254
|
}),
|
255
|
-
|
255
|
+
promptTemplate,
|
256
256
|
});
|
257
257
|
}
|
258
258
|
withSettings(additionalSettings) {
|
@@ -5,7 +5,7 @@ const zod_1 = require("zod");
|
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
7
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
8
|
-
const
|
8
|
+
const PromptTemplateImageGenerationModel_js_1 = require("../../model-function/generate-image/PromptTemplateImageGenerationModel.cjs");
|
9
9
|
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
10
10
|
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
11
11
|
exports.OPENAI_IMAGE_MODELS = {
|
@@ -122,10 +122,10 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
122
122
|
base64Image: response.data[0].b64_json,
|
123
123
|
};
|
124
124
|
}
|
125
|
-
|
126
|
-
return new
|
125
|
+
withPromptTemplate(promptTemplate) {
|
126
|
+
return new PromptTemplateImageGenerationModel_js_1.PromptTemplateImageGenerationModel({
|
127
127
|
model: this,
|
128
|
-
|
128
|
+
promptTemplate,
|
129
129
|
});
|
130
130
|
}
|
131
131
|
withSettings(additionalSettings) {
|
@@ -3,9 +3,9 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
|
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
-
import {
|
6
|
+
import { PromptTemplate } from "../../model-function/PromptTemplate.js";
|
7
7
|
import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-function/generate-image/ImageGenerationModel.js";
|
8
|
-
import {
|
8
|
+
import { PromptTemplateImageGenerationModel } from "../../model-function/generate-image/PromptTemplateImageGenerationModel.js";
|
9
9
|
export declare const OPENAI_IMAGE_MODELS: {
|
10
10
|
"dall-e-2": {
|
11
11
|
getCost(settings: OpenAIImageGenerationSettings): 2000 | 1800 | 1600 | null;
|
@@ -61,7 +61,7 @@ export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImag
|
|
61
61
|
};
|
62
62
|
base64Image: string;
|
63
63
|
}>;
|
64
|
-
|
64
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, string>): PromptTemplateImageGenerationModel<INPUT_PROMPT, string, OpenAIImageGenerationSettings, this>;
|
65
65
|
withSettings(additionalSettings: Partial<OpenAIImageGenerationSettings>): this;
|
66
66
|
}
|
67
67
|
export type OpenAIImageGenerationResponseFormatType<T> = {
|
@@ -2,7 +2,7 @@ import { z } from "zod";
|
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
-
import {
|
5
|
+
import { PromptTemplateImageGenerationModel } from "../../model-function/generate-image/PromptTemplateImageGenerationModel.js";
|
6
6
|
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
7
7
|
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
8
8
|
export const OPENAI_IMAGE_MODELS = {
|
@@ -118,10 +118,10 @@ export class OpenAIImageGenerationModel extends AbstractModel {
|
|
118
118
|
base64Image: response.data[0].b64_json,
|
119
119
|
};
|
120
120
|
}
|
121
|
-
|
122
|
-
return new
|
121
|
+
withPromptTemplate(promptTemplate) {
|
122
|
+
return new PromptTemplateImageGenerationModel({
|
123
123
|
model: this,
|
124
|
-
|
124
|
+
promptTemplate,
|
125
125
|
});
|
126
126
|
}
|
127
127
|
withSettings(additionalSettings) {
|