modelfusion 0.117.0 → 0.118.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +26 -0
- package/README.md +10 -9
- package/core/getFunctionCallLogger.cjs +6 -6
- package/core/getFunctionCallLogger.js +6 -6
- package/model-function/ModelCallEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +1 -1
- package/model-function/embed/embed.cjs +5 -5
- package/model-function/embed/embed.d.ts +2 -2
- package/model-function/embed/embed.js +5 -5
- package/model-function/executeStandardCall.cjs +3 -3
- package/model-function/executeStandardCall.d.ts +2 -2
- package/model-function/executeStandardCall.js +3 -3
- package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.cjs +2 -2
- package/model-function/generate-speech/generateSpeech.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.js +2 -2
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +1 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
- package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +2 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +8 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +5 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +6 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +2 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +8 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +1 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +34 -1
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +9 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +31 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +28 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +29 -1
- package/model-function/generate-text/prompt-template/index.cjs +1 -0
- package/model-function/generate-text/prompt-template/index.d.ts +1 -0
- package/model-function/generate-text/prompt-template/index.js +1 -0
- package/model-function/generate-transcription/TranscriptionEvent.d.ts +1 -1
- package/model-function/generate-transcription/TranscriptionModel.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +3 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -4
- package/model-provider/cohere/CohereTextGenerationModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -3
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +15 -1
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -0
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +13 -0
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +31 -28
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +17 -8
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +31 -28
- package/model-provider/llamacpp/LlamaCppFacade.cjs +4 -3
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -1
- package/model-provider/llamacpp/LlamaCppFacade.js +2 -1
- package/model-provider/llamacpp/LlamaCppPrompt.cjs +59 -0
- package/model-provider/llamacpp/LlamaCppPrompt.d.ts +14 -0
- package/model-provider/llamacpp/LlamaCppPrompt.js +31 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +3 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -1
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +3 -3
- package/model-provider/llamacpp/index.cjs +2 -3
- package/model-provider/llamacpp/index.d.ts +1 -2
- package/model-provider/llamacpp/index.js +1 -2
- package/model-provider/mistral/MistralChatModel.cjs +3 -3
- package/model-provider/mistral/MistralChatModel.d.ts +4 -4
- package/model-provider/mistral/MistralChatModel.js +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +1 -1
- package/model-provider/mistral/MistralTextEmbeddingModel.js +3 -3
- package/model-provider/ollama/OllamaChatModel.cjs +3 -3
- package/model-provider/ollama/OllamaChatModel.d.ts +2 -2
- package/model-provider/ollama/OllamaChatModel.js +3 -3
- package/model-provider/ollama/OllamaCompletionModel.cjs +3 -3
- package/model-provider/ollama/OllamaCompletionModel.d.ts +14 -14
- package/model-provider/ollama/OllamaCompletionModel.js +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +1 -1
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +3 -3
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +12 -12
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +6 -6
- package/model-provider/openai/AbstractOpenAIChatModel.js +12 -12
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +6 -6
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +2 -2
- package/model-provider/openai/AbstractOpenAICompletionModel.js +6 -6
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -3
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -1
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +3 -3
- package/package.json +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +2 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +2 -2
- package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +1 -1
- package/tool/generate-tool-call/generateToolCall.cjs +2 -2
- package/tool/generate-tool-call/generateToolCall.js +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +1 -1
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +2 -2
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.cjs +2 -2
- package/tool/generate-tool-calls/generateToolCalls.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.js +2 -2
@@ -2,6 +2,10 @@ import { TextGenerationPromptTemplate } from "../../model-function/generate-text
|
|
2
2
|
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
3
|
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
|
5
|
+
/**
|
6
|
+
* Text prompt.
|
7
|
+
*/
|
8
|
+
export declare function text(): TextGenerationPromptTemplate<string, LlamaCppCompletionPrompt>;
|
5
9
|
/**
|
6
10
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
7
11
|
*
|
@@ -1,8 +1,21 @@
|
|
1
1
|
import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
|
2
2
|
import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
|
3
|
+
import { text as vicunaText } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
3
4
|
// default Vicuna 1 system message
|
4
5
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
5
6
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
7
|
+
/**
|
8
|
+
* Text prompt.
|
9
|
+
*/
|
10
|
+
export function text() {
|
11
|
+
const delegate = vicunaText();
|
12
|
+
return {
|
13
|
+
stopSequences: [],
|
14
|
+
format(prompt) {
|
15
|
+
return { text: delegate.format(prompt) };
|
16
|
+
},
|
17
|
+
};
|
18
|
+
}
|
6
19
|
/**
|
7
20
|
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
8
21
|
*
|
@@ -8,14 +8,16 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
|
8
8
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
9
|
const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
|
10
10
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
11
|
+
const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
11
12
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
12
13
|
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
13
14
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
14
15
|
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
15
16
|
const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
|
16
17
|
const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
17
|
-
const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
|
18
18
|
const LlamaCppGrammars_js_1 = require("./LlamaCppGrammars.cjs");
|
19
|
+
const LlamaCppPrompt_js_1 = require("./LlamaCppPrompt.cjs");
|
20
|
+
const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
|
19
21
|
class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
20
22
|
constructor(settings = {}) {
|
21
23
|
super({ settings });
|
@@ -140,23 +142,23 @@ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
140
142
|
schema: (0, ZodSchema_js_1.zodSchema)(llamaCppTextGenerationResponseSchema),
|
141
143
|
}));
|
142
144
|
}
|
143
|
-
processTextGenerationResponse(
|
145
|
+
processTextGenerationResponse(rawResponse) {
|
144
146
|
return {
|
145
|
-
|
147
|
+
rawResponse,
|
146
148
|
textGenerationResults: [
|
147
149
|
{
|
148
|
-
text:
|
149
|
-
finishReason:
|
150
|
+
text: rawResponse.content,
|
151
|
+
finishReason: rawResponse.stopped_eos || rawResponse.stopped_word
|
150
152
|
? "stop"
|
151
|
-
:
|
153
|
+
: rawResponse.stopped_limit
|
152
154
|
? "length"
|
153
155
|
: "unknown",
|
154
156
|
},
|
155
157
|
],
|
156
158
|
usage: {
|
157
|
-
promptTokens:
|
158
|
-
completionTokens:
|
159
|
-
totalTokens:
|
159
|
+
promptTokens: rawResponse.tokens_evaluated,
|
160
|
+
completionTokens: rawResponse.tokens_predicted,
|
161
|
+
totalTokens: rawResponse.tokens_evaluated + rawResponse.tokens_predicted,
|
160
162
|
},
|
161
163
|
};
|
162
164
|
}
|
@@ -168,33 +170,34 @@ class LlamaCppCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
168
170
|
extractTextDelta(delta) {
|
169
171
|
return delta.content;
|
170
172
|
}
|
173
|
+
asStructureGenerationModel(promptTemplate) {
|
174
|
+
return "adaptModel" in promptTemplate
|
175
|
+
? new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
176
|
+
model: promptTemplate.adaptModel(this),
|
177
|
+
template: promptTemplate,
|
178
|
+
})
|
179
|
+
: new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
180
|
+
model: this,
|
181
|
+
template: promptTemplate,
|
182
|
+
});
|
183
|
+
}
|
171
184
|
withJsonOutput() {
|
172
185
|
// don't override the grammar if it's already set (to support more restrictive grammars)
|
173
186
|
return this.settings.grammar == null
|
174
187
|
? this.withSettings({ grammar: LlamaCppGrammars_js_1.json })
|
175
188
|
: this;
|
176
189
|
}
|
190
|
+
get promptTemplateProvider() {
|
191
|
+
return this.settings.promptTemplate ?? LlamaCppPrompt_js_1.Text;
|
192
|
+
}
|
177
193
|
withTextPrompt() {
|
178
|
-
return this.withPromptTemplate(
|
179
|
-
format(prompt) {
|
180
|
-
return { text: prompt };
|
181
|
-
},
|
182
|
-
stopSequences: [],
|
183
|
-
});
|
194
|
+
return this.withPromptTemplate(this.promptTemplateProvider.text());
|
184
195
|
}
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
return
|
190
|
-
model: this.withTextPrompt().withSettings({
|
191
|
-
stopSequences: [
|
192
|
-
...(this.settings.stopSequences ?? []),
|
193
|
-
...promptTemplate.stopSequences,
|
194
|
-
],
|
195
|
-
}),
|
196
|
-
promptTemplate,
|
197
|
-
});
|
196
|
+
withInstructionPrompt() {
|
197
|
+
return this.withPromptTemplate(this.promptTemplateProvider.instruction());
|
198
|
+
}
|
199
|
+
withChatPrompt() {
|
200
|
+
return this.withPromptTemplate(this.promptTemplateProvider.chat());
|
198
201
|
}
|
199
202
|
/**
|
200
203
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
@@ -4,9 +4,14 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
6
|
import { Delta } from "../../model-function/Delta.js";
|
7
|
+
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
8
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
7
9
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
10
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
11
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
12
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
13
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
14
|
+
import { TextGenerationPromptTemplateProvider } from "../../model-function/generate-text/prompt-template/PromptTemplateProvider.js";
|
10
15
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
11
16
|
export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
|
12
17
|
api?: ApiConfiguration;
|
@@ -121,6 +126,10 @@ export interface LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE extends num
|
|
121
126
|
* If is -1 the task will be assigned to a Idle slot (default: -1)
|
122
127
|
*/
|
123
128
|
slotId?: number;
|
129
|
+
/**
|
130
|
+
* Prompt template provider that is used when calling `.withTextPrompt()`, `withInstructionPrompt()` or `withChatPrompt()`.
|
131
|
+
*/
|
132
|
+
promptTemplate?: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
124
133
|
}
|
125
134
|
export interface LlamaCppCompletionPrompt {
|
126
135
|
/**
|
@@ -144,7 +153,7 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
144
153
|
get settingsForEvent(): Partial<LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>>;
|
145
154
|
countPromptTokens(prompt: LlamaCppCompletionPrompt): Promise<number>;
|
146
155
|
doGenerateTexts(prompt: LlamaCppCompletionPrompt, options: FunctionCallOptions): Promise<{
|
147
|
-
|
156
|
+
rawResponse: {
|
148
157
|
model: string;
|
149
158
|
stop: true;
|
150
159
|
content: string;
|
@@ -204,7 +213,7 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
204
213
|
};
|
205
214
|
}>;
|
206
215
|
restoreGeneratedTexts(rawResponse: unknown): {
|
207
|
-
|
216
|
+
rawResponse: {
|
208
217
|
model: string;
|
209
218
|
stop: true;
|
210
219
|
content: string;
|
@@ -263,8 +272,8 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
263
272
|
totalTokens: number;
|
264
273
|
};
|
265
274
|
};
|
266
|
-
processTextGenerationResponse(
|
267
|
-
|
275
|
+
processTextGenerationResponse(rawResponse: LlamaCppTextGenerationResponse): {
|
276
|
+
rawResponse: {
|
268
277
|
model: string;
|
269
278
|
stop: true;
|
270
279
|
content: string;
|
@@ -376,12 +385,12 @@ export declare class LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE extends number
|
|
376
385
|
content: string;
|
377
386
|
}>>>;
|
378
387
|
extractTextDelta(delta: unknown): string;
|
388
|
+
asStructureGenerationModel<INPUT_PROMPT, LlamaCppPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, LlamaCppPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, LlamaCppPrompt, TextStreamingModel<LlamaCppPrompt, TextGenerationModelSettings>>;
|
379
389
|
withJsonOutput(): this;
|
390
|
+
private get promptTemplateProvider();
|
380
391
|
withTextPrompt(): PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
381
|
-
|
382
|
-
|
383
|
-
*/
|
384
|
-
withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
|
392
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<InstructionPrompt, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
393
|
+
withChatPrompt(): PromptTemplateTextStreamingModel<ChatPrompt, LlamaCppCompletionPrompt, LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
385
394
|
/**
|
386
395
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
387
396
|
*/
|
@@ -5,14 +5,16 @@ import { zodSchema } from "../../core/schema/ZodSchema.js";
|
|
5
5
|
import { parseJSON } from "../../core/schema/parseJSON.js";
|
6
6
|
import { validateTypes } from "../../core/schema/validateTypes.js";
|
7
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
8
9
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
10
|
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
10
11
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
11
12
|
import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
|
12
13
|
import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
|
13
14
|
import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
|
14
|
-
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
15
15
|
import { json } from "./LlamaCppGrammars.js";
|
16
|
+
import { Text } from "./LlamaCppPrompt.js";
|
17
|
+
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
16
18
|
export class LlamaCppCompletionModel extends AbstractModel {
|
17
19
|
constructor(settings = {}) {
|
18
20
|
super({ settings });
|
@@ -137,23 +139,23 @@ export class LlamaCppCompletionModel extends AbstractModel {
|
|
137
139
|
schema: zodSchema(llamaCppTextGenerationResponseSchema),
|
138
140
|
}));
|
139
141
|
}
|
140
|
-
processTextGenerationResponse(
|
142
|
+
processTextGenerationResponse(rawResponse) {
|
141
143
|
return {
|
142
|
-
|
144
|
+
rawResponse,
|
143
145
|
textGenerationResults: [
|
144
146
|
{
|
145
|
-
text:
|
146
|
-
finishReason:
|
147
|
+
text: rawResponse.content,
|
148
|
+
finishReason: rawResponse.stopped_eos || rawResponse.stopped_word
|
147
149
|
? "stop"
|
148
|
-
:
|
150
|
+
: rawResponse.stopped_limit
|
149
151
|
? "length"
|
150
152
|
: "unknown",
|
151
153
|
},
|
152
154
|
],
|
153
155
|
usage: {
|
154
|
-
promptTokens:
|
155
|
-
completionTokens:
|
156
|
-
totalTokens:
|
156
|
+
promptTokens: rawResponse.tokens_evaluated,
|
157
|
+
completionTokens: rawResponse.tokens_predicted,
|
158
|
+
totalTokens: rawResponse.tokens_evaluated + rawResponse.tokens_predicted,
|
157
159
|
},
|
158
160
|
};
|
159
161
|
}
|
@@ -165,33 +167,34 @@ export class LlamaCppCompletionModel extends AbstractModel {
|
|
165
167
|
extractTextDelta(delta) {
|
166
168
|
return delta.content;
|
167
169
|
}
|
170
|
+
asStructureGenerationModel(promptTemplate) {
|
171
|
+
return "adaptModel" in promptTemplate
|
172
|
+
? new StructureFromTextStreamingModel({
|
173
|
+
model: promptTemplate.adaptModel(this),
|
174
|
+
template: promptTemplate,
|
175
|
+
})
|
176
|
+
: new StructureFromTextStreamingModel({
|
177
|
+
model: this,
|
178
|
+
template: promptTemplate,
|
179
|
+
});
|
180
|
+
}
|
168
181
|
withJsonOutput() {
|
169
182
|
// don't override the grammar if it's already set (to support more restrictive grammars)
|
170
183
|
return this.settings.grammar == null
|
171
184
|
? this.withSettings({ grammar: json })
|
172
185
|
: this;
|
173
186
|
}
|
187
|
+
get promptTemplateProvider() {
|
188
|
+
return this.settings.promptTemplate ?? Text;
|
189
|
+
}
|
174
190
|
withTextPrompt() {
|
175
|
-
return this.withPromptTemplate(
|
176
|
-
format(prompt) {
|
177
|
-
return { text: prompt };
|
178
|
-
},
|
179
|
-
stopSequences: [],
|
180
|
-
});
|
191
|
+
return this.withPromptTemplate(this.promptTemplateProvider.text());
|
181
192
|
}
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
return
|
187
|
-
model: this.withTextPrompt().withSettings({
|
188
|
-
stopSequences: [
|
189
|
-
...(this.settings.stopSequences ?? []),
|
190
|
-
...promptTemplate.stopSequences,
|
191
|
-
],
|
192
|
-
}),
|
193
|
-
promptTemplate,
|
194
|
-
});
|
193
|
+
withInstructionPrompt() {
|
194
|
+
return this.withPromptTemplate(this.promptTemplateProvider.instruction());
|
195
|
+
}
|
196
|
+
withChatPrompt() {
|
197
|
+
return this.withPromptTemplate(this.promptTemplateProvider.chat());
|
195
198
|
}
|
196
199
|
/**
|
197
200
|
* Maps the prompt for the full Llama.cpp prompt template (incl. image support).
|
@@ -23,7 +23,7 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
23
23
|
return result;
|
24
24
|
};
|
25
25
|
Object.defineProperty(exports, "__esModule", { value: true });
|
26
|
-
exports.grammar = exports.Tokenizer = exports.TextEmbedder = exports.
|
26
|
+
exports.prompt = exports.grammar = exports.Tokenizer = exports.TextEmbedder = exports.CompletionTextGenerator = exports.Api = void 0;
|
27
27
|
const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
|
28
28
|
const LlamaCppCompletionModel_js_1 = require("./LlamaCppCompletionModel.cjs");
|
29
29
|
const LlamaCppTextEmbeddingModel_js_1 = require("./LlamaCppTextEmbeddingModel.cjs");
|
@@ -36,10 +36,10 @@ function Api(settings) {
|
|
36
36
|
return new LlamaCppApiConfiguration_js_1.LlamaCppApiConfiguration(settings);
|
37
37
|
}
|
38
38
|
exports.Api = Api;
|
39
|
-
function
|
39
|
+
function CompletionTextGenerator(settings = {}) {
|
40
40
|
return new LlamaCppCompletionModel_js_1.LlamaCppCompletionModel(settings);
|
41
41
|
}
|
42
|
-
exports.
|
42
|
+
exports.CompletionTextGenerator = CompletionTextGenerator;
|
43
43
|
function TextEmbedder(settings = {}) {
|
44
44
|
return new LlamaCppTextEmbeddingModel_js_1.LlamaCppTextEmbeddingModel(settings);
|
45
45
|
}
|
@@ -52,3 +52,4 @@ exports.Tokenizer = Tokenizer;
|
|
52
52
|
* GBNF grammars. You can use them in the `grammar` option of the `TextGenerator` model.
|
53
53
|
*/
|
54
54
|
exports.grammar = __importStar(require("./LlamaCppGrammars.cjs"));
|
55
|
+
exports.prompt = __importStar(require("./LlamaCppPrompt.cjs"));
|
@@ -9,10 +9,11 @@ import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
|
9
9
|
* It calls the API at http://127.0.0.1:8080 by default.
|
10
10
|
*/
|
11
11
|
export declare function Api(settings: PartialBaseUrlPartsApiConfigurationOptions): LlamaCppApiConfiguration;
|
12
|
-
export declare function
|
12
|
+
export declare function CompletionTextGenerator<CONTEXT_WINDOW_SIZE extends number>(settings?: LlamaCppCompletionModelSettings<CONTEXT_WINDOW_SIZE>): LlamaCppCompletionModel<CONTEXT_WINDOW_SIZE>;
|
13
13
|
export declare function TextEmbedder(settings?: LlamaCppTextEmbeddingModelSettings): LlamaCppTextEmbeddingModel;
|
14
14
|
export declare function Tokenizer(api?: ApiConfiguration): LlamaCppTokenizer;
|
15
15
|
/**
|
16
16
|
* GBNF grammars. You can use them in the `grammar` option of the `TextGenerator` model.
|
17
17
|
*/
|
18
18
|
export * as grammar from "./LlamaCppGrammars.js";
|
19
|
+
export * as prompt from "./LlamaCppPrompt.js";
|
@@ -9,7 +9,7 @@ import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
|
9
9
|
export function Api(settings) {
|
10
10
|
return new LlamaCppApiConfiguration(settings);
|
11
11
|
}
|
12
|
-
export function
|
12
|
+
export function CompletionTextGenerator(settings = {}) {
|
13
13
|
return new LlamaCppCompletionModel(settings);
|
14
14
|
}
|
15
15
|
export function TextEmbedder(settings = {}) {
|
@@ -22,3 +22,4 @@ export function Tokenizer(api = new LlamaCppApiConfiguration()) {
|
|
22
22
|
* GBNF grammars. You can use them in the `grammar` option of the `TextGenerator` model.
|
23
23
|
*/
|
24
24
|
export * as grammar from "./LlamaCppGrammars.js";
|
25
|
+
export * as prompt from "./LlamaCppPrompt.js";
|
@@ -0,0 +1,59 @@
|
|
1
|
+
"use strict";
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
3
|
+
if (k2 === undefined) k2 = k;
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
7
|
+
}
|
8
|
+
Object.defineProperty(o, k2, desc);
|
9
|
+
}) : (function(o, m, k, k2) {
|
10
|
+
if (k2 === undefined) k2 = k;
|
11
|
+
o[k2] = m[k];
|
12
|
+
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
18
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
19
|
+
if (mod && mod.__esModule) return mod;
|
20
|
+
var result = {};
|
21
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
22
|
+
__setModuleDefault(result, mod);
|
23
|
+
return result;
|
24
|
+
};
|
25
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
26
|
+
exports.BakLLaVA1 = exports.Vicuna = exports.Alpaca = exports.NeuralChat = exports.Llama2 = exports.ChatML = exports.Mistral = exports.Text = exports.asLlamaCppTextPromptTemplateProvider = exports.asLlamaCppPromptTemplate = void 0;
|
27
|
+
const alpacaPrompt = __importStar(require("../../model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs"));
|
28
|
+
const chatMlPrompt = __importStar(require("../../model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs"));
|
29
|
+
const llama2Prompt = __importStar(require("../../model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs"));
|
30
|
+
const mistralPrompt = __importStar(require("../../model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs"));
|
31
|
+
const neuralChatPrompt = __importStar(require("../../model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs"));
|
32
|
+
const textPrompt = __importStar(require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs"));
|
33
|
+
const vicunaPrompt = __importStar(require("../../model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs"));
|
34
|
+
const LlamaCppBakLLaVA1Prompt = __importStar(require("./LlamaCppBakLLaVA1PromptTemplate.cjs"));
|
35
|
+
function asLlamaCppPromptTemplate(promptTemplate) {
|
36
|
+
return {
|
37
|
+
format: (prompt) => ({
|
38
|
+
text: promptTemplate.format(prompt),
|
39
|
+
}),
|
40
|
+
stopSequences: promptTemplate.stopSequences,
|
41
|
+
};
|
42
|
+
}
|
43
|
+
exports.asLlamaCppPromptTemplate = asLlamaCppPromptTemplate;
|
44
|
+
function asLlamaCppTextPromptTemplateProvider(promptTemplateProvider) {
|
45
|
+
return {
|
46
|
+
text: () => asLlamaCppPromptTemplate(promptTemplateProvider.text()),
|
47
|
+
instruction: () => asLlamaCppPromptTemplate(promptTemplateProvider.instruction()),
|
48
|
+
chat: () => asLlamaCppPromptTemplate(promptTemplateProvider.chat()),
|
49
|
+
};
|
50
|
+
}
|
51
|
+
exports.asLlamaCppTextPromptTemplateProvider = asLlamaCppTextPromptTemplateProvider;
|
52
|
+
exports.Text = asLlamaCppTextPromptTemplateProvider(textPrompt);
|
53
|
+
exports.Mistral = asLlamaCppTextPromptTemplateProvider(mistralPrompt);
|
54
|
+
exports.ChatML = asLlamaCppTextPromptTemplateProvider(chatMlPrompt);
|
55
|
+
exports.Llama2 = asLlamaCppTextPromptTemplateProvider(llama2Prompt);
|
56
|
+
exports.NeuralChat = asLlamaCppTextPromptTemplateProvider(neuralChatPrompt);
|
57
|
+
exports.Alpaca = asLlamaCppTextPromptTemplateProvider(alpacaPrompt);
|
58
|
+
exports.Vicuna = asLlamaCppTextPromptTemplateProvider(vicunaPrompt);
|
59
|
+
exports.BakLLaVA1 = LlamaCppBakLLaVA1Prompt;
|
@@ -0,0 +1,14 @@
|
|
1
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
+
import { TextGenerationPromptTemplateProvider } from "../../model-function/generate-text/prompt-template/PromptTemplateProvider.js";
|
3
|
+
import * as LlamaCppBakLLaVA1Prompt from "./LlamaCppBakLLaVA1PromptTemplate.js";
|
4
|
+
import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
|
5
|
+
export declare function asLlamaCppPromptTemplate<SOURCE_PROMPT>(promptTemplate: TextGenerationPromptTemplate<SOURCE_PROMPT, string>): TextGenerationPromptTemplate<SOURCE_PROMPT, LlamaCppCompletionPrompt>;
|
6
|
+
export declare function asLlamaCppTextPromptTemplateProvider(promptTemplateProvider: TextGenerationPromptTemplateProvider<string>): TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
7
|
+
export declare const Text: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
8
|
+
export declare const Mistral: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
9
|
+
export declare const ChatML: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
10
|
+
export declare const Llama2: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
11
|
+
export declare const NeuralChat: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
12
|
+
export declare const Alpaca: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
13
|
+
export declare const Vicuna: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
|
14
|
+
export declare const BakLLaVA1: typeof LlamaCppBakLLaVA1Prompt;
|
@@ -0,0 +1,31 @@
|
|
1
|
+
import * as alpacaPrompt from "../../model-function/generate-text/prompt-template/AlpacaPromptTemplate.js";
|
2
|
+
import * as chatMlPrompt from "../../model-function/generate-text/prompt-template/ChatMLPromptTemplate.js";
|
3
|
+
import * as llama2Prompt from "../../model-function/generate-text/prompt-template/Llama2PromptTemplate.js";
|
4
|
+
import * as mistralPrompt from "../../model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js";
|
5
|
+
import * as neuralChatPrompt from "../../model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js";
|
6
|
+
import * as textPrompt from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
7
|
+
import * as vicunaPrompt from "../../model-function/generate-text/prompt-template/VicunaPromptTemplate.js";
|
8
|
+
import * as LlamaCppBakLLaVA1Prompt from "./LlamaCppBakLLaVA1PromptTemplate.js";
|
9
|
+
export function asLlamaCppPromptTemplate(promptTemplate) {
|
10
|
+
return {
|
11
|
+
format: (prompt) => ({
|
12
|
+
text: promptTemplate.format(prompt),
|
13
|
+
}),
|
14
|
+
stopSequences: promptTemplate.stopSequences,
|
15
|
+
};
|
16
|
+
}
|
17
|
+
export function asLlamaCppTextPromptTemplateProvider(promptTemplateProvider) {
|
18
|
+
return {
|
19
|
+
text: () => asLlamaCppPromptTemplate(promptTemplateProvider.text()),
|
20
|
+
instruction: () => asLlamaCppPromptTemplate(promptTemplateProvider.instruction()),
|
21
|
+
chat: () => asLlamaCppPromptTemplate(promptTemplateProvider.chat()),
|
22
|
+
};
|
23
|
+
}
|
24
|
+
export const Text = asLlamaCppTextPromptTemplateProvider(textPrompt);
|
25
|
+
export const Mistral = asLlamaCppTextPromptTemplateProvider(mistralPrompt);
|
26
|
+
export const ChatML = asLlamaCppTextPromptTemplateProvider(chatMlPrompt);
|
27
|
+
export const Llama2 = asLlamaCppTextPromptTemplateProvider(llama2Prompt);
|
28
|
+
export const NeuralChat = asLlamaCppTextPromptTemplateProvider(neuralChatPrompt);
|
29
|
+
export const Alpaca = asLlamaCppTextPromptTemplateProvider(alpacaPrompt);
|
30
|
+
export const Vicuna = asLlamaCppTextPromptTemplateProvider(vicunaPrompt);
|
31
|
+
export const BakLLaVA1 = LlamaCppBakLLaVA1Prompt;
|
@@ -80,10 +80,10 @@ class LlamaCppTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
80
80
|
};
|
81
81
|
}
|
82
82
|
async doEmbedValues(texts, options) {
|
83
|
-
const
|
83
|
+
const rawResponse = await this.callAPI(texts, options);
|
84
84
|
return {
|
85
|
-
|
86
|
-
embeddings: [
|
85
|
+
rawResponse,
|
86
|
+
embeddings: [rawResponse.embedding],
|
87
87
|
};
|
88
88
|
}
|
89
89
|
withSettings(additionalSettings) {
|
@@ -21,7 +21,7 @@ export declare class LlamaCppTextEmbeddingModel extends AbstractModel<LlamaCppTe
|
|
21
21
|
callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<LlamaCppTextEmbeddingResponse>;
|
22
22
|
get settingsForEvent(): Partial<LlamaCppTextEmbeddingModelSettings>;
|
23
23
|
doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
|
24
|
-
|
24
|
+
rawResponse: {
|
25
25
|
embedding: number[];
|
26
26
|
};
|
27
27
|
embeddings: number[][];
|
@@ -77,10 +77,10 @@ export class LlamaCppTextEmbeddingModel extends AbstractModel {
|
|
77
77
|
};
|
78
78
|
}
|
79
79
|
async doEmbedValues(texts, options) {
|
80
|
-
const
|
80
|
+
const rawResponse = await this.callAPI(texts, options);
|
81
81
|
return {
|
82
|
-
|
83
|
-
embeddings: [
|
82
|
+
rawResponse,
|
83
|
+
embeddings: [rawResponse.embedding],
|
84
84
|
};
|
85
85
|
}
|
86
86
|
withSettings(additionalSettings) {
|
@@ -26,10 +26,9 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
26
26
|
return result;
|
27
27
|
};
|
28
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
29
|
-
exports.llamacpp =
|
29
|
+
exports.llamacpp = void 0;
|
30
30
|
__exportStar(require("./LlamaCppApiConfiguration.cjs"), exports);
|
31
|
-
|
31
|
+
__exportStar(require("./LlamaCppCompletionModel.cjs"), exports);
|
32
32
|
exports.llamacpp = __importStar(require("./LlamaCppFacade.cjs"));
|
33
33
|
__exportStar(require("./LlamaCppTextEmbeddingModel.cjs"), exports);
|
34
|
-
__exportStar(require("./LlamaCppCompletionModel.cjs"), exports);
|
35
34
|
__exportStar(require("./LlamaCppTokenizer.cjs"), exports);
|
@@ -1,7 +1,6 @@
|
|
1
1
|
export * from "./LlamaCppApiConfiguration.js";
|
2
|
-
export *
|
2
|
+
export * from "./LlamaCppCompletionModel.js";
|
3
3
|
export { LlamaCppErrorData } from "./LlamaCppError.js";
|
4
4
|
export * as llamacpp from "./LlamaCppFacade.js";
|
5
5
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
6
|
-
export * from "./LlamaCppCompletionModel.js";
|
7
6
|
export * from "./LlamaCppTokenizer.js";
|
@@ -1,6 +1,5 @@
|
|
1
1
|
export * from "./LlamaCppApiConfiguration.js";
|
2
|
-
export *
|
2
|
+
export * from "./LlamaCppCompletionModel.js";
|
3
3
|
export * as llamacpp from "./LlamaCppFacade.js";
|
4
4
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
5
|
-
export * from "./LlamaCppCompletionModel.js";
|
6
5
|
export * from "./LlamaCppTokenizer.js";
|
@@ -97,10 +97,10 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
|
|
97
97
|
schema: (0, ZodSchema_js_1.zodSchema)(mistralChatResponseSchema),
|
98
98
|
}));
|
99
99
|
}
|
100
|
-
processTextGenerationResponse(
|
100
|
+
processTextGenerationResponse(rawResponse) {
|
101
101
|
return {
|
102
|
-
|
103
|
-
textGenerationResults:
|
102
|
+
rawResponse,
|
103
|
+
textGenerationResults: rawResponse.choices.map((choice) => ({
|
104
104
|
text: choice.message.content,
|
105
105
|
finishReason: this.translateFinishReason(choice.finish_reason),
|
106
106
|
})),
|
@@ -59,7 +59,7 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
|
|
59
59
|
}): Promise<RESULT>;
|
60
60
|
get settingsForEvent(): Partial<MistralChatModelSettings>;
|
61
61
|
doGenerateTexts(prompt: MistralChatPrompt, options: FunctionCallOptions): Promise<{
|
62
|
-
|
62
|
+
rawResponse: {
|
63
63
|
object: string;
|
64
64
|
model: string;
|
65
65
|
usage: {
|
@@ -84,7 +84,7 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
|
|
84
84
|
}[];
|
85
85
|
}>;
|
86
86
|
restoreGeneratedTexts(rawResponse: unknown): {
|
87
|
-
|
87
|
+
rawResponse: {
|
88
88
|
object: string;
|
89
89
|
model: string;
|
90
90
|
usage: {
|
@@ -108,8 +108,8 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
|
|
108
108
|
finishReason: TextGenerationFinishReason;
|
109
109
|
}[];
|
110
110
|
};
|
111
|
-
processTextGenerationResponse(
|
112
|
-
|
111
|
+
processTextGenerationResponse(rawResponse: MistralChatResponse): {
|
112
|
+
rawResponse: {
|
113
113
|
object: string;
|
114
114
|
model: string;
|
115
115
|
usage: {
|
@@ -94,10 +94,10 @@ export class MistralChatModel extends AbstractModel {
|
|
94
94
|
schema: zodSchema(mistralChatResponseSchema),
|
95
95
|
}));
|
96
96
|
}
|
97
|
-
processTextGenerationResponse(
|
97
|
+
processTextGenerationResponse(rawResponse) {
|
98
98
|
return {
|
99
|
-
|
100
|
-
textGenerationResults:
|
99
|
+
rawResponse,
|
100
|
+
textGenerationResults: rawResponse.choices.map((choice) => ({
|
101
101
|
text: choice.message.content,
|
102
102
|
finishReason: this.translateFinishReason(choice.finish_reason),
|
103
103
|
})),
|
@@ -79,10 +79,10 @@ class MistralTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
79
79
|
};
|
80
80
|
}
|
81
81
|
async doEmbedValues(texts, options) {
|
82
|
-
const
|
82
|
+
const rawResponse = await this.callAPI(texts, options);
|
83
83
|
return {
|
84
|
-
|
85
|
-
embeddings:
|
84
|
+
rawResponse,
|
85
|
+
embeddings: rawResponse.data.map((entry) => entry.embedding),
|
86
86
|
};
|
87
87
|
}
|
88
88
|
withSettings(additionalSettings) {
|
@@ -30,7 +30,7 @@ export declare class MistralTextEmbeddingModel extends AbstractModel<MistralText
|
|
30
30
|
callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<MistralTextEmbeddingResponse>;
|
31
31
|
get settingsForEvent(): Partial<MistralTextEmbeddingModelSettings>;
|
32
32
|
doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
|
33
|
-
|
33
|
+
rawResponse: {
|
34
34
|
object: string;
|
35
35
|
model: string;
|
36
36
|
usage: {
|