modelfusion 0.117.0 → 0.119.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +60 -0
- package/README.md +10 -9
- package/core/getFunctionCallLogger.cjs +6 -6
- package/core/getFunctionCallLogger.js +6 -6
- package/model-function/ModelCallEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +1 -1
- package/model-function/embed/embed.cjs +5 -5
- package/model-function/embed/embed.d.ts +2 -2
- package/model-function/embed/embed.js +5 -5
- package/model-function/executeStandardCall.cjs +3 -3
- package/model-function/executeStandardCall.d.ts +2 -2
- package/model-function/executeStandardCall.js +3 -3
- package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.cjs +2 -2
- package/model-function/generate-speech/generateSpeech.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.js +2 -2
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +10 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -0
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +10 -1
- package/model-function/generate-structure/StructureFromTextPromptTemplate.d.ts +12 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -22
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +0 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -22
- package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-structure/jsonStructurePrompt.cjs +4 -12
- package/model-function/generate-structure/jsonStructurePrompt.js +4 -12
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +2 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +6 -0
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +5 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.js +6 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +6 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +6 -0
- package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.d.ts +7 -4
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +8 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +5 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +6 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +2 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +8 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +1 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +34 -1
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +9 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +31 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +28 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +29 -1
- package/model-function/generate-text/prompt-template/index.cjs +1 -0
- package/model-function/generate-text/prompt-template/index.d.ts +1 -0
- package/model-function/generate-text/prompt-template/index.js +1 -0
- package/model-function/generate-transcription/TranscriptionEvent.d.ts +1 -1
- package/model-function/generate-transcription/TranscriptionModel.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +3 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +5 -4
- package/model-provider/cohere/CohereTextGenerationModel.js +6 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +6 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +5 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +6 -3
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +15 -1
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -0
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +13 -0
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +40 -33
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +20 -9
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +40 -33
- package/model-provider/llamacpp/LlamaCppFacade.cjs +4 -3
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -1
- package/model-provider/llamacpp/LlamaCppFacade.js +2 -1
- package/model-provider/llamacpp/LlamaCppGrammars.cjs +3 -1
- package/model-provider/llamacpp/LlamaCppGrammars.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppGrammars.js +1 -0
- package/model-provider/llamacpp/LlamaCppPrompt.cjs +59 -0
- package/model-provider/llamacpp/LlamaCppPrompt.d.ts +14 -0
- package/model-provider/llamacpp/LlamaCppPrompt.js +31 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +3 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -1
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +3 -3
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.cjs +113 -0
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.d.ts +7 -0
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.js +109 -0
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.cjs +150 -0
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.d.ts +1 -0
- package/model-provider/llamacpp/convertJsonSchemaToGBNF.test.js +148 -0
- package/model-provider/llamacpp/index.cjs +2 -3
- package/model-provider/llamacpp/index.d.ts +1 -2
- package/model-provider/llamacpp/index.js +1 -2
- package/model-provider/mistral/MistralChatModel.cjs +6 -3
- package/model-provider/mistral/MistralChatModel.d.ts +5 -4
- package/model-provider/mistral/MistralChatModel.js +6 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +1 -1
- package/model-provider/mistral/MistralTextEmbeddingModel.js +3 -3
- package/model-provider/ollama/OllamaChatModel.cjs +3 -3
- package/model-provider/ollama/OllamaChatModel.d.ts +2 -2
- package/model-provider/ollama/OllamaChatModel.js +3 -3
- package/model-provider/ollama/OllamaCompletionModel.cjs +6 -3
- package/model-provider/ollama/OllamaCompletionModel.d.ts +15 -14
- package/model-provider/ollama/OllamaCompletionModel.js +6 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +1 -1
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +3 -3
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +12 -12
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +6 -6
- package/model-provider/openai/AbstractOpenAIChatModel.js +12 -12
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +9 -6
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +3 -2
- package/model-provider/openai/AbstractOpenAICompletionModel.js +9 -6
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -3
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -1
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +3 -3
- package/package.json +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +2 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +2 -2
- package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +1 -1
- package/tool/generate-tool-call/generateToolCall.cjs +2 -2
- package/tool/generate-tool-call/generateToolCall.js +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +1 -1
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +2 -2
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.cjs +2 -2
- package/tool/generate-tool-calls/generateToolCalls.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.js +2 -2
@@ -7,6 +7,7 @@ export * from "./InvalidPromptError.js";
|
|
7
7
|
export * as Llama2Prompt from "./Llama2PromptTemplate.js";
|
8
8
|
export * as MistralInstructPrompt from "./MistralInstructPromptTemplate.js";
|
9
9
|
export * as NeuralChatPrompt from "./NeuralChatPromptTemplate.js";
|
10
|
+
export * from "./PromptTemplateProvider.js";
|
10
11
|
export * as TextPrompt from "./TextPromptTemplate.js";
|
11
12
|
export * as VicunaPrompt from "./VicunaPromptTemplate.js";
|
12
13
|
export * from "./trimChatPrompt.js";
|
@@ -4,7 +4,7 @@ export interface TranscriptionModelSettings extends ModelSettings {
|
|
4
4
|
}
|
5
5
|
export interface TranscriptionModel<DATA, SETTINGS extends TranscriptionModelSettings = TranscriptionModelSettings> extends Model<SETTINGS> {
|
6
6
|
doTranscribe: (data: DATA, options: FunctionCallOptions) => PromiseLike<{
|
7
|
-
|
7
|
+
rawResponse: unknown;
|
8
8
|
transcription: string;
|
9
9
|
}>;
|
10
10
|
}
|
@@ -11,7 +11,7 @@ async function generateTranscription(model, data, options) {
|
|
11
11
|
generateResponse: async (options) => {
|
12
12
|
const result = await model.doTranscribe(data, options);
|
13
13
|
return {
|
14
|
-
|
14
|
+
rawResponse: result.rawResponse,
|
15
15
|
extractedValue: result.transcription,
|
16
16
|
};
|
17
17
|
},
|
@@ -8,7 +8,7 @@ export async function generateTranscription(model, data, options) {
|
|
8
8
|
generateResponse: async (options) => {
|
9
9
|
const result = await model.doTranscribe(data, options);
|
10
10
|
return {
|
11
|
-
|
11
|
+
rawResponse: result.rawResponse,
|
12
12
|
extractedValue: result.transcription,
|
13
13
|
};
|
14
14
|
},
|
@@ -74,10 +74,10 @@ class Automatic1111ImageGenerationModel extends AbstractModel_js_1.AbstractModel
|
|
74
74
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
75
75
|
}
|
76
76
|
async doGenerateImages(prompt, options) {
|
77
|
-
const
|
77
|
+
const rawResponse = await this.callAPI(prompt, options);
|
78
78
|
return {
|
79
|
-
|
80
|
-
base64Images:
|
79
|
+
rawResponse,
|
80
|
+
base64Images: rawResponse.images,
|
81
81
|
};
|
82
82
|
}
|
83
83
|
withTextPrompt() {
|
@@ -40,7 +40,7 @@ export declare class Automatic1111ImageGenerationModel extends AbstractModel<Aut
|
|
40
40
|
callAPI(input: Automatic1111ImageGenerationPrompt, callOptions: FunctionCallOptions): Promise<Automatic1111ImageGenerationResponse>;
|
41
41
|
get settingsForEvent(): Partial<Automatic1111ImageGenerationSettings>;
|
42
42
|
doGenerateImages(prompt: Automatic1111ImageGenerationPrompt, options: FunctionCallOptions): Promise<{
|
43
|
-
|
43
|
+
rawResponse: {
|
44
44
|
images: string[];
|
45
45
|
parameters: {};
|
46
46
|
info: string;
|
@@ -71,10 +71,10 @@ export class Automatic1111ImageGenerationModel extends AbstractModel {
|
|
71
71
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
72
72
|
}
|
73
73
|
async doGenerateImages(prompt, options) {
|
74
|
-
const
|
74
|
+
const rawResponse = await this.callAPI(prompt, options);
|
75
75
|
return {
|
76
|
-
|
77
|
-
base64Images:
|
76
|
+
rawResponse,
|
77
|
+
base64Images: rawResponse.images,
|
78
78
|
};
|
79
79
|
}
|
80
80
|
withTextPrompt() {
|
@@ -148,10 +148,10 @@ class CohereTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
148
148
|
};
|
149
149
|
}
|
150
150
|
async doEmbedValues(texts, options) {
|
151
|
-
const
|
151
|
+
const rawResponse = await this.callAPI(texts, options);
|
152
152
|
return {
|
153
|
-
|
154
|
-
embeddings:
|
153
|
+
rawResponse,
|
154
|
+
embeddings: rawResponse.embeddings,
|
155
155
|
};
|
156
156
|
}
|
157
157
|
withSettings(additionalSettings) {
|
@@ -73,7 +73,7 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
|
|
73
73
|
callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<CohereTextEmbeddingResponse>;
|
74
74
|
get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
|
75
75
|
doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
|
76
|
-
|
76
|
+
rawResponse: {
|
77
77
|
embeddings: number[][];
|
78
78
|
texts: string[];
|
79
79
|
id: string;
|
@@ -145,10 +145,10 @@ export class CohereTextEmbeddingModel extends AbstractModel {
|
|
145
145
|
};
|
146
146
|
}
|
147
147
|
async doEmbedValues(texts, options) {
|
148
|
-
const
|
148
|
+
const rawResponse = await this.callAPI(texts, options);
|
149
149
|
return {
|
150
|
-
|
151
|
-
embeddings:
|
150
|
+
rawResponse,
|
151
|
+
embeddings: rawResponse.embeddings,
|
152
152
|
};
|
153
153
|
}
|
154
154
|
withSettings(additionalSettings) {
|
@@ -138,10 +138,10 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
138
138
|
schema: (0, ZodSchema_js_1.zodSchema)(cohereTextGenerationResponseSchema),
|
139
139
|
}));
|
140
140
|
}
|
141
|
-
processTextGenerationResponse(
|
141
|
+
processTextGenerationResponse(rawResponse) {
|
142
142
|
return {
|
143
|
-
|
144
|
-
textGenerationResults:
|
143
|
+
rawResponse,
|
144
|
+
textGenerationResults: rawResponse.generations.map((generation) => ({
|
145
145
|
text: generation.text,
|
146
146
|
finishReason: this.translateFinishReason(generation.finish_reason),
|
147
147
|
})),
|
@@ -182,6 +182,9 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
182
182
|
withChatPrompt(options) {
|
183
183
|
return this.withPromptTemplate((0, TextPromptTemplate_js_1.chat)(options));
|
184
184
|
}
|
185
|
+
withJsonOutput() {
|
186
|
+
return this;
|
187
|
+
}
|
185
188
|
withPromptTemplate(promptTemplate) {
|
186
189
|
return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
|
187
190
|
model: this.withSettings({
|
@@ -59,7 +59,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
59
59
|
}): Promise<RESPONSE>;
|
60
60
|
get settingsForEvent(): Partial<CohereTextGenerationModelSettings>;
|
61
61
|
doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
|
62
|
-
|
62
|
+
rawResponse: {
|
63
63
|
id: string;
|
64
64
|
prompt: string;
|
65
65
|
generations: {
|
@@ -79,7 +79,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
79
79
|
}[];
|
80
80
|
}>;
|
81
81
|
restoreGeneratedTexts(rawResponse: unknown): {
|
82
|
-
|
82
|
+
rawResponse: {
|
83
83
|
id: string;
|
84
84
|
prompt: string;
|
85
85
|
generations: {
|
@@ -98,8 +98,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
98
98
|
finishReason: TextGenerationFinishReason;
|
99
99
|
}[];
|
100
100
|
};
|
101
|
-
processTextGenerationResponse(
|
102
|
-
|
101
|
+
processTextGenerationResponse(rawResponse: CohereTextGenerationResponse): {
|
102
|
+
rawResponse: {
|
103
103
|
id: string;
|
104
104
|
prompt: string;
|
105
105
|
generations: {
|
@@ -152,6 +152,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
152
152
|
user?: string;
|
153
153
|
assistant?: string;
|
154
154
|
}): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
|
155
|
+
withJsonOutput(): this;
|
155
156
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
|
156
157
|
withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
|
157
158
|
}
|
@@ -135,10 +135,10 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
135
135
|
schema: zodSchema(cohereTextGenerationResponseSchema),
|
136
136
|
}));
|
137
137
|
}
|
138
|
-
processTextGenerationResponse(
|
138
|
+
processTextGenerationResponse(rawResponse) {
|
139
139
|
return {
|
140
|
-
|
141
|
-
textGenerationResults:
|
140
|
+
rawResponse,
|
141
|
+
textGenerationResults: rawResponse.generations.map((generation) => ({
|
142
142
|
text: generation.text,
|
143
143
|
finishReason: this.translateFinishReason(generation.finish_reason),
|
144
144
|
})),
|
@@ -179,6 +179,9 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
179
179
|
withChatPrompt(options) {
|
180
180
|
return this.withPromptTemplate(chat(options));
|
181
181
|
}
|
182
|
+
withJsonOutput() {
|
183
|
+
return this;
|
184
|
+
}
|
182
185
|
withPromptTemplate(promptTemplate) {
|
183
186
|
return new PromptTemplateTextStreamingModel({
|
184
187
|
model: this.withSettings({
|
@@ -117,10 +117,10 @@ class HuggingFaceTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
117
117
|
};
|
118
118
|
}
|
119
119
|
async doEmbedValues(texts, options) {
|
120
|
-
const
|
120
|
+
const rawResponse = await this.callAPI(texts, options);
|
121
121
|
return {
|
122
|
-
|
123
|
-
embeddings:
|
122
|
+
rawResponse,
|
123
|
+
embeddings: rawResponse,
|
124
124
|
};
|
125
125
|
}
|
126
126
|
withSettings(additionalSettings) {
|
@@ -46,7 +46,7 @@ export declare class HuggingFaceTextEmbeddingModel extends AbstractModel<Hugging
|
|
46
46
|
get settingsForEvent(): Partial<HuggingFaceTextEmbeddingModelSettings>;
|
47
47
|
readonly countPromptTokens: undefined;
|
48
48
|
doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
|
49
|
-
|
49
|
+
rawResponse: number[][];
|
50
50
|
embeddings: number[][];
|
51
51
|
}>;
|
52
52
|
withSettings(additionalSettings: Partial<HuggingFaceTextEmbeddingModelSettings>): this;
|
@@ -114,10 +114,10 @@ export class HuggingFaceTextEmbeddingModel extends AbstractModel {
|
|
114
114
|
};
|
115
115
|
}
|
116
116
|
async doEmbedValues(texts, options) {
|
117
|
-
const
|
117
|
+
const rawResponse = await this.callAPI(texts, options);
|
118
118
|
return {
|
119
|
-
|
120
|
-
embeddings:
|
119
|
+
rawResponse,
|
120
|
+
embeddings: rawResponse,
|
121
121
|
};
|
122
122
|
}
|
123
123
|
withSettings(additionalSettings) {
|
@@ -116,15 +116,18 @@ class HuggingFaceTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
116
116
|
schema: (0, ZodSchema_js_1.zodSchema)(huggingFaceTextGenerationResponseSchema),
|
117
117
|
}));
|
118
118
|
}
|
119
|
-
processTextGenerationResponse(
|
119
|
+
processTextGenerationResponse(rawResponse) {
|
120
120
|
return {
|
121
|
-
|
122
|
-
textGenerationResults:
|
121
|
+
rawResponse,
|
122
|
+
textGenerationResults: rawResponse.map((response) => ({
|
123
123
|
text: response.generated_text,
|
124
124
|
finishReason: "unknown",
|
125
125
|
})),
|
126
126
|
};
|
127
127
|
}
|
128
|
+
withJsonOutput() {
|
129
|
+
return this;
|
130
|
+
}
|
128
131
|
withPromptTemplate(promptTemplate) {
|
129
132
|
return new PromptTemplateTextGenerationModel_js_1.PromptTemplateTextGenerationModel({
|
130
133
|
model: this, // stop tokens are not supported by this model
|
@@ -43,7 +43,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
43
43
|
callAPI(prompt: string, callOptions: FunctionCallOptions): Promise<HuggingFaceTextGenerationResponse>;
|
44
44
|
get settingsForEvent(): Partial<HuggingFaceTextGenerationModelSettings>;
|
45
45
|
doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
|
46
|
-
|
46
|
+
rawResponse: {
|
47
47
|
generated_text: string;
|
48
48
|
}[];
|
49
49
|
textGenerationResults: {
|
@@ -52,7 +52,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
52
52
|
}[];
|
53
53
|
}>;
|
54
54
|
restoreGeneratedTexts(rawResponse: unknown): {
|
55
|
-
|
55
|
+
rawResponse: {
|
56
56
|
generated_text: string;
|
57
57
|
}[];
|
58
58
|
textGenerationResults: {
|
@@ -60,8 +60,8 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
60
60
|
finishReason: "unknown";
|
61
61
|
}[];
|
62
62
|
};
|
63
|
-
processTextGenerationResponse(
|
64
|
-
|
63
|
+
processTextGenerationResponse(rawResponse: HuggingFaceTextGenerationResponse): {
|
64
|
+
rawResponse: {
|
65
65
|
generated_text: string;
|
66
66
|
}[];
|
67
67
|
textGenerationResults: {
|
@@ -69,6 +69,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
69
69
|
finishReason: "unknown";
|
70
70
|
}[];
|
71
71
|
};
|
72
|
+
withJsonOutput(): this;
|
72
73
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextGenerationModel<INPUT_PROMPT, string, HuggingFaceTextGenerationModelSettings, this>;
|
73
74
|
withSettings(additionalSettings: Partial<HuggingFaceTextGenerationModelSettings>): this;
|
74
75
|
}
|
@@ -113,15 +113,18 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
|
|
113
113
|
schema: zodSchema(huggingFaceTextGenerationResponseSchema),
|
114
114
|
}));
|
115
115
|
}
|
116
|
-
processTextGenerationResponse(
|
116
|
+
processTextGenerationResponse(rawResponse) {
|
117
117
|
return {
|
118
|
-
|
119
|
-
textGenerationResults:
|
118
|
+
rawResponse,
|
119
|
+
textGenerationResults: rawResponse.map((response) => ({
|
120
120
|
text: response.generated_text,
|
121
121
|
finishReason: "unknown",
|
122
122
|
})),
|
123
123
|
};
|
124
124
|
}
|
125
|
+
withJsonOutput() {
|
126
|
+
return this;
|
127
|
+
}
|
125
128
|
withPromptTemplate(promptTemplate) {
|
126
129
|
return new PromptTemplateTextGenerationModel({
|
127
130
|
model: this, // stop tokens are not supported by this model
|
@@ -1,11 +1,25 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.chat = exports.instruction = void 0;
|
3
|
+
exports.chat = exports.instruction = exports.text = void 0;
|
4
4
|
const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
|
5
5
|
const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
|
6
|
+
const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
|
6
7
|
// default Vicuna 1 system message
|
7
8
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
8
9
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
10
|
+
/**
|
11
|
+
* Text prompt.
|
12
|
+
*/
|
13
|
+
function text() {
|
14
|
+
const delegate = (0, TextPromptTemplate_js_1.text)();
|
15
|
+
return {
|
16
|
+
stopSequences: [],
|
17
|
+
format(prompt) {
|
18
|
+
return { text: delegate.format(prompt) };
|
19
|
+
},
|
20
|
+
};
|
21
|
+
}
|
22
|
+
exports.text = text;
|
9
23
|
/**
|
10
24
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
11
25
|
*
|
@@ -2,6 +2,10 @@ import { TextGenerationPromptTemplate } from "../../model-function/generate-text
|
|
2
2
|
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
3
|
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
|
5
|
+
/**
|
6
|
+
* Text prompt.
|
7
|
+
*/
|
8
|
+
export declare function text(): TextGenerationPromptTemplate<string, LlamaCppCompletionPrompt>;
|
5
9
|
/**
|
6
10
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
7
11
|
*
|
@@ -1,8 +1,21 @@
|
|
1
1
|
import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
|
2
2
|
import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
|
3
|
+
import { text as vicunaText } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
3
4
|
// default Vicuna 1 system message
|
4
5
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
5
6
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
7
|
+
/**
|
8
|
+
* Text prompt.
|
9
|
+
*/
|
10
|
+
export function text() {
|
11
|
+
const delegate = vicunaText();
|
12
|
+
return {
|
13
|
+
stopSequences: [],
|
14
|
+
format(prompt) {
|
15
|
+
return { text: delegate.format(prompt) };
|
16
|
+
},
|
17
|
+
};
|
18
|
+
}
|
6
19
|
/**
|
7
20
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
8
21
|
*
|
@@ -8,14 +8,16 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
|
8
8
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
9
|
const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
|
10
10
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
11
|
+
const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
11
12
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
12
13
|
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
13
14
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
14
15
|
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
15
16
|
const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
|
16
17
|
const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
18
|
+
const LlamaCppPrompt_js_1 = require("./LlamaCppPrompt.cjs");
|
17
19
|
const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
|
18
|
-
const
|
20
|
+
const convertJsonSchemaToGBNF_js_1 = require("./convertJsonSchemaToGBNF.cjs");
|
19
21
|
class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
20
22
|
constructor(settings = {}) {
|
21
23
|
super({ settings });
|
@@ -140,23 +142,23 @@ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
140
142
|
schema: (0, ZodSchema_js_1.zodSchema)(llamaCppTextGenerationResponseSchema),
|
141
143
|
}));
|
142
144
|
}
|
143
|
-
processTextGenerationResponse(
|
145
|
+
processTextGenerationResponse(rawResponse) {
|
144
146
|
return {
|
145
|
-
|
147
|
+
rawResponse,
|
146
148
|
textGenerationResults: [
|
147
149
|
{
|
148
|
-
text:
|
149
|
-
finishReason:
|
150
|
+
text: rawResponse.content,
|
151
|
+
finishReason: rawResponse.stopped_eos || rawResponse.stopped_word
|
150
152
|
? "stop"
|
151
|
-
:
|
153
|
+
: rawResponse.stopped_limit
|
152
154
|
? "length"
|
153
155
|
: "unknown",
|
154
156
|
},
|
155
157
|
],
|
156
158
|
usage: {
|
157
|
-
promptTokens:
|
158
|
-
completionTokens:
|
159
|
-
totalTokens:
|
159
|
+
promptTokens: rawResponse.tokens_evaluated,
|
160
|
+
completionTokens: rawResponse.tokens_predicted,
|
161
|
+
totalTokens: rawResponse.tokens_evaluated + rawResponse.tokens_predicted,
|
160
162
|
},
|
161
163
|
};
|
162
164
|
}
|
@@ -168,33 +170,38 @@ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
168
170
|
extractTextDelta(delta) {
|
169
171
|
return delta.content;
|
170
172
|
}
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
173
|
+
asStructureGenerationModel(promptTemplate) {
|
174
|
+
return "adaptModel" in promptTemplate
|
175
|
+
? new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
176
|
+
model: promptTemplate.adaptModel(this),
|
177
|
+
template: promptTemplate,
|
178
|
+
})
|
179
|
+
: new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
180
|
+
model: this,
|
181
|
+
template: promptTemplate,
|
182
|
+
});
|
176
183
|
}
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
184
|
+
withJsonOutput(schema) {
|
185
|
+
// don't override the grammar if it's already set (to allow user to override)
|
186
|
+
if (this.settings.grammar != null) {
|
187
|
+
return this;
|
188
|
+
}
|
189
|
+
const grammar = (0, convertJsonSchemaToGBNF_js_1.convertJsonSchemaToGBNF)(schema.getJsonSchema());
|
190
|
+
return this.withSettings({
|
191
|
+
grammar: grammar,
|
183
192
|
});
|
184
193
|
}
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
return
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
promptTemplate,
|
197
|
-
});
|
194
|
+
get promptTemplateProvider() {
|
195
|
+
return this.settings.promptTemplate ?? LlamaCppPrompt_js_1.Text;
|
196
|
+
}
|
197
|
+
withTextPrompt() {
|
198
|
+
return this.withPromptTemplate(this.promptTemplateProvider.text());
|
199
|
+
}
|
200
|
+
withInstructionPrompt() {
|
201
|
+
return this.withPromptTemplate(this.promptTemplateProvider.instruction());
|
202
|
+
}
|
203
|
+
withChatPrompt() {
|
204
|
+
return this.withPromptTemplate(this.promptTemplateProvider.chat());
|
198
205
|
}
|
199
206
|
/**
|
200
207
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
@@ -2,11 +2,18 @@ import { z } from "zod";
|
|
2
2
|
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
|
+
import { JsonSchemaProducer } from "../../core/schema/JsonSchemaProducer.js";
|
6
|
+
import { Schema } from "../../core/schema/Schema.js";
|
5
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
8
|
import { Delta } from "../../model-function/Delta.js";
|
9
|
+
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
10
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
7
11
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
12
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
13
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
14
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
15
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
16
|
+
import { TextGenerationPromptTemplateProvider } from "../../model-function/generate-text/prompt-template/PromptTemplateProvider.js";
|
10
17
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
11
18
|
export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
|
12
19
|
api?: ApiConfiguration;
|
@@ -121,6 +128,10 @@ export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends num
|
|
121
128
|
* If is -1 the task will be assigned to a Idle slot (default: -1)
|
122
129
|
*/
|
123
130
|
slotId?: number;
|
131
|
+
/**
|
132
|
+
* Prompt template provider that is used when calling `.withTextPrompt()`, `withInstructionPrompt()` or `withChatPrompt()`.
|
133
|
+
*/
|
134
|
+
promptTemplate?: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
124
135
|
}
|
125
136
|
export interface LlamaCppCompletionPrompt {
|
126
137
|
/**
|
@@ -144,7 +155,7 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
144
155
|
get settingsForEvent(): Partial<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>>;
|
145
156
|
countPromptTokens(prompt: LlamaCppCompletionPrompt): Promise<number>;
|
146
157
|
doGenerateTexts(prompt: LlamaCppCompletionPrompt, options: FunctionCallOptions): Promise<{
|
147
|
-
|
158
|
+
rawResponse: {
|
148
159
|
model: string;
|
149
160
|
stop: true;
|
150
161
|
content: string;
|
@@ -204,7 +215,7 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
204
215
|
};
|
205
216
|
}>;
|
206
217
|
restoreGeneratedTexts(rawResponse: unknown): {
|
207
|
-
|
218
|
+
rawResponse: {
|
208
219
|
model: string;
|
209
220
|
stop: true;
|
210
221
|
content: string;
|
@@ -263,8 +274,8 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
263
274
|
totalTokens: number;
|
264
275
|
};
|
265
276
|
};
|
266
|
-
processTextGenerationResponse(
|
267
|
-
|
277
|
+
processTextGenerationResponse(rawResponse: LlamaCppTextGenerationResponse): {
|
278
|
+
rawResponse: {
|
268
279
|
model: string;
|
269
280
|
stop: true;
|
270
281
|
content: string;
|
@@ -376,12 +387,12 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
376
387
|
content: string;
|
377
388
|
}>>>;
|
378
389
|
extractTextDelta(delta: unknown): string;
|
379
|
-
|
390
|
+
asStructureGenerationModel<INPUT_PROMPT, LlamaCppPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, LlamaCppPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, LlamaCppPrompt, TextStreamingModel<LlamaCppPrompt, TextGenerationModelSettings>>;
|
391
|
+
withJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): this;
|
392
|
+
private get promptTemplateProvider();
|
380
393
|
withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
381
|
-
|
382
|
-
|
383
|
-
*/
|
384
|
-
withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
|
394
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<InstructionPrompt, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
395
|
+
withChatPrompt(): PromptTemplateTextStreamingModel<ChatPrompt, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
385
396
|
/**
|
386
397
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
387
398
|
*/
|