modelfusion 0.116.1 → 0.118.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +67 -0
- package/README.md +14 -11
- package/core/getFunctionCallLogger.cjs +6 -6
- package/core/getFunctionCallLogger.js +6 -6
- package/model-function/ModelCallEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingEvent.d.ts +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +1 -1
- package/model-function/embed/embed.cjs +5 -5
- package/model-function/embed/embed.d.ts +2 -2
- package/model-function/embed/embed.js +5 -5
- package/model-function/executeStandardCall.cjs +3 -3
- package/model-function/executeStandardCall.d.ts +2 -2
- package/model-function/executeStandardCall.js +3 -3
- package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
- package/model-function/generate-image/ImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +1 -1
- package/model-function/generate-image/generateImage.cjs +2 -2
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-image/generateImage.js +2 -2
- package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.cjs +2 -2
- package/model-function/generate-speech/generateSpeech.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.js +2 -2
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +1 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
- package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +2 -2
- package/model-function/generate-structure/generateStructure.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.js +2 -2
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +2 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/generateText.cjs +3 -3
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/generateText.js +3 -3
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +8 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +5 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +6 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +2 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +8 -0
- package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +1 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +34 -1
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +9 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +31 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +28 -0
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +29 -1
- package/model-function/generate-text/prompt-template/index.cjs +1 -0
- package/model-function/generate-text/prompt-template/index.d.ts +1 -0
- package/model-function/generate-text/prompt-template/index.js +1 -0
- package/model-function/generate-transcription/TranscriptionEvent.d.ts +1 -1
- package/model-function/generate-transcription/TranscriptionModel.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +3 -3
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -3
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -4
- package/model-provider/cohere/CohereTextGenerationModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -3
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +15 -1
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -0
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +13 -0
- package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +37 -27
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +18 -8
- package/model-provider/llamacpp/LlamaCppCompletionModel.js +37 -27
- package/model-provider/llamacpp/LlamaCppFacade.cjs +31 -3
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +6 -1
- package/model-provider/llamacpp/LlamaCppFacade.js +6 -1
- package/model-provider/llamacpp/LlamaCppGrammars.cjs +84 -0
- package/model-provider/llamacpp/LlamaCppGrammars.d.ts +18 -0
- package/model-provider/llamacpp/LlamaCppGrammars.js +81 -0
- package/model-provider/llamacpp/LlamaCppPrompt.cjs +59 -0
- package/model-provider/llamacpp/LlamaCppPrompt.d.ts +14 -0
- package/model-provider/llamacpp/LlamaCppPrompt.js +31 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +3 -3
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -1
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +3 -3
- package/model-provider/llamacpp/index.cjs +2 -3
- package/model-provider/llamacpp/index.d.ts +1 -2
- package/model-provider/llamacpp/index.js +1 -2
- package/model-provider/mistral/MistralChatModel.cjs +3 -3
- package/model-provider/mistral/MistralChatModel.d.ts +4 -4
- package/model-provider/mistral/MistralChatModel.js +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.cjs +3 -3
- package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +1 -1
- package/model-provider/mistral/MistralTextEmbeddingModel.js +3 -3
- package/model-provider/ollama/OllamaChatModel.cjs +3 -3
- package/model-provider/ollama/OllamaChatModel.d.ts +2 -2
- package/model-provider/ollama/OllamaChatModel.js +3 -3
- package/model-provider/ollama/OllamaCompletionModel.cjs +3 -3
- package/model-provider/ollama/OllamaCompletionModel.d.ts +14 -14
- package/model-provider/ollama/OllamaCompletionModel.js +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +3 -3
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +1 -1
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +3 -3
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +12 -12
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +6 -6
- package/model-provider/openai/AbstractOpenAIChatModel.js +12 -12
- package/model-provider/openai/AbstractOpenAICompletionModel.cjs +6 -6
- package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +2 -2
- package/model-provider/openai/AbstractOpenAICompletionModel.js +6 -6
- package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -3
- package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIImageGenerationModel.js +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -3
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -3
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
- package/model-provider/openai/OpenAITranscriptionModel.js +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -3
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +3 -3
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -1
- package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +3 -3
- package/package.json +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +2 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +2 -2
- package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +1 -1
- package/tool/generate-tool-call/generateToolCall.cjs +2 -2
- package/tool/generate-tool-call/generateToolCall.js +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +2 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +1 -1
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +2 -2
- package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +1 -1
- package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.cjs +2 -2
- package/tool/generate-tool-calls/generateToolCalls.d.ts +1 -1
- package/tool/generate-tool-calls/generateToolCalls.js +2 -2
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,72 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## v0.118.0 - 2024-01-07
|
4
|
+
|
5
|
+
### Added
|
6
|
+
|
7
|
+
- `LlamaCppCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `llamacpp.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
|
8
|
+
|
9
|
+
```ts
|
10
|
+
const model = llamacpp
|
11
|
+
.CompletionTextGenerator({
|
12
|
+
// run https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF with llama.cpp
|
13
|
+
promptTemplate: llamacpp.prompt.ChatML,
|
14
|
+
contextWindowSize: 4096,
|
15
|
+
maxGenerationTokens: 512,
|
16
|
+
})
|
17
|
+
.withChatPrompt();
|
18
|
+
```
|
19
|
+
|
20
|
+
### Changed
|
21
|
+
|
22
|
+
- **breaking change**: renamed `response` to `rawResponse` when using `fullResponse: true` setting.
|
23
|
+
- **breaking change**: renamed `llamacpp.TextGenerator` to `llamacpp.CompletionTextGenerator`.
|
24
|
+
|
25
|
+
### Removed
|
26
|
+
|
27
|
+
- **breaking change**: removed `.withTextPromptTemplate` on `LlamaCppCompletionModel`.
|
28
|
+
|
29
|
+
## v0.117.0 - 2024-01-06
|
30
|
+
|
31
|
+
### Added
|
32
|
+
|
33
|
+
- Predefined Llama.cpp GBNF grammars:
|
34
|
+
|
35
|
+
- `llamacpp.grammar.json`: Restricts the output to JSON.
|
36
|
+
- `llamacpp.grammar.jsonArray`: Restricts the output to a JSON array.
|
37
|
+
- `llamacpp.grammar.list`: Restricts the output to a newline-separated list where each line starts with `- `.
|
38
|
+
|
39
|
+
- Llama.cpp structure generation support:
|
40
|
+
|
41
|
+
```ts
|
42
|
+
const structure = await generateStructure(
|
43
|
+
llamacpp
|
44
|
+
.TextGenerator({
|
45
|
+
// run openhermes-2.5-mistral-7b.Q4_K_M.gguf in llama.cpp
|
46
|
+
maxGenerationTokens: 1024,
|
47
|
+
temperature: 0,
|
48
|
+
})
|
49
|
+
.withTextPromptTemplate(ChatMLPrompt.instruction()) // needed for jsonStructurePrompt.text()
|
50
|
+
.asStructureGenerationModel(jsonStructurePrompt.text()), // automatically restrict the output to JSON
|
51
|
+
|
52
|
+
zodSchema(
|
53
|
+
z.object({
|
54
|
+
characters: z.array(
|
55
|
+
z.object({
|
56
|
+
name: z.string(),
|
57
|
+
class: z
|
58
|
+
.string()
|
59
|
+
.describe("Character class, e.g. warrior, mage, or thief."),
|
60
|
+
description: z.string(),
|
61
|
+
})
|
62
|
+
),
|
63
|
+
})
|
64
|
+
),
|
65
|
+
|
66
|
+
"Generate 3 character descriptions for a fantasy role playing game. "
|
67
|
+
);
|
68
|
+
```
|
69
|
+
|
3
70
|
## v0.116.0 - 2024-01-05
|
4
71
|
|
5
72
|
### Added
|
package/README.md
CHANGED
@@ -152,7 +152,7 @@ const sentiment = await generateStructure(
|
|
152
152
|
);
|
153
153
|
```
|
154
154
|
|
155
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama)
|
155
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev//integration/model-provider/llama.cpp)
|
156
156
|
|
157
157
|
#### streamStructure
|
158
158
|
|
@@ -198,7 +198,7 @@ for await (const part of structureStream) {
|
|
198
198
|
}
|
199
199
|
```
|
200
200
|
|
201
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama)
|
201
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama), [Llama.cpp](https://modelfusion.dev//integration/model-provider/llama.cpp)
|
202
202
|
|
203
203
|
### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
204
204
|
|
@@ -424,14 +424,15 @@ const text = await generateText(
|
|
424
424
|
#### Instruction Prompt Example
|
425
425
|
|
426
426
|
```ts
|
427
|
-
// example assumes you are running https://huggingface.co/TheBloke/Llama-2-7B-GGUF with llama.cpp
|
428
427
|
const text = await generateText(
|
429
428
|
llamacpp
|
430
|
-
.
|
429
|
+
.CompletionTextGenerator({
|
430
|
+
// run https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF with llama.cpp
|
431
|
+
promptTemplate: llamacpp.prompt.Llama2, // Set prompt template
|
431
432
|
contextWindowSize: 4096, // Llama 2 context window size
|
432
|
-
maxGenerationTokens:
|
433
|
+
maxGenerationTokens: 512,
|
433
434
|
})
|
434
|
-
.
|
435
|
+
.withInstructionPrompt(),
|
435
436
|
{
|
436
437
|
system: "You are a story writer.",
|
437
438
|
instruction: "Write a short story about a robot learning to love.",
|
@@ -503,11 +504,11 @@ const image = await generateImage(
|
|
503
504
|
|
504
505
|
### Metadata and original responses
|
505
506
|
|
506
|
-
ModelFusion model functions return rich responses that include the original response and metadata when you set the `fullResponse` option to `true`.
|
507
|
+
ModelFusion model functions return rich responses that include the raw (original) response and metadata when you set the `fullResponse` option to `true`.
|
507
508
|
|
508
509
|
```ts
|
509
|
-
// access the
|
510
|
-
const { text,
|
510
|
+
// access the raw response (needs to be typed) and the metadata:
|
511
|
+
const { text, rawResponse, metadata } = await generateText(
|
511
512
|
openai.CompletionTextGenerator({
|
512
513
|
model: "gpt-3.5-turbo-instruct",
|
513
514
|
maxGenerationTokens: 1000,
|
@@ -519,8 +520,8 @@ const { text, response, metadata } = await generateText(
|
|
519
520
|
|
520
521
|
console.log(metadata);
|
521
522
|
|
522
|
-
// cast to the response type:
|
523
|
-
for (const choice of (
|
523
|
+
// cast to the raw response type:
|
524
|
+
for (const choice of (rawResponse as OpenAICompletionResponse).choices) {
|
524
525
|
console.log(choice.text);
|
525
526
|
}
|
526
527
|
```
|
@@ -563,6 +564,8 @@ modelfusion.setLogFormat("detailed-object"); // log full events
|
|
563
564
|
- [Split Text](https://modelfusion.dev/guide/text-chunk/split)
|
564
565
|
- [Utilities](https://modelfusion.dev/guide/util/)
|
565
566
|
- [API Configuration](https://modelfusion.dev/guide/util/api-configuration)
|
567
|
+
- [Base URL](https://modelfusion.dev/guide/util/api-configuration/base-url)
|
568
|
+
- [Headers](https://modelfusion.dev/guide/util/api-configuration/headers)
|
566
569
|
- [Retry strategies](https://modelfusion.dev/guide/util/api-configuration/retry)
|
567
570
|
- [Throttling strategies](https://modelfusion.dev/guide/util/api-configuration/throttle)
|
568
571
|
- [Logging](https://modelfusion.dev/guide/util/logging)
|
@@ -36,11 +36,11 @@ const detailedObjectObserver = {
|
|
36
36
|
// Remove the "response" property from the result (if any):
|
37
37
|
if (event.eventType === "finished" &&
|
38
38
|
event.result != null &&
|
39
|
-
"
|
40
|
-
event.result?.
|
39
|
+
"rawResponse" in event.result &&
|
40
|
+
event.result?.rawResponse != null) {
|
41
41
|
event = {
|
42
42
|
...event,
|
43
|
-
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "
|
43
|
+
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
|
44
44
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
45
45
|
),
|
46
46
|
};
|
@@ -73,11 +73,11 @@ const detailedJsonObserver = {
|
|
73
73
|
// Remove the "response" property from the result (if any):
|
74
74
|
if (event.eventType === "finished" &&
|
75
75
|
event.result != null &&
|
76
|
-
"
|
77
|
-
event.result?.
|
76
|
+
"rawResponse" in event.result &&
|
77
|
+
event.result?.rawResponse != null) {
|
78
78
|
event = {
|
79
79
|
...event,
|
80
|
-
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "
|
80
|
+
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
|
81
81
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
82
82
|
),
|
83
83
|
};
|
@@ -32,11 +32,11 @@ const detailedObjectObserver = {
|
|
32
32
|
// Remove the "response" property from the result (if any):
|
33
33
|
if (event.eventType === "finished" &&
|
34
34
|
event.result != null &&
|
35
|
-
"
|
36
|
-
event.result?.
|
35
|
+
"rawResponse" in event.result &&
|
36
|
+
event.result?.rawResponse != null) {
|
37
37
|
event = {
|
38
38
|
...event,
|
39
|
-
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "
|
39
|
+
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
|
40
40
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
41
41
|
),
|
42
42
|
};
|
@@ -69,11 +69,11 @@ const detailedJsonObserver = {
|
|
69
69
|
// Remove the "response" property from the result (if any):
|
70
70
|
if (event.eventType === "finished" &&
|
71
71
|
event.result != null &&
|
72
|
-
"
|
73
|
-
event.result?.
|
72
|
+
"rawResponse" in event.result &&
|
73
|
+
event.result?.rawResponse != null) {
|
74
74
|
event = {
|
75
75
|
...event,
|
76
|
-
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "
|
76
|
+
result: Object.fromEntries(Object.entries(event.result).filter(([k]) => k !== "rawResponse")
|
77
77
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
78
78
|
),
|
79
79
|
};
|
@@ -22,7 +22,7 @@ export interface BaseModelCallStartedEvent extends BaseFunctionStartedEvent {
|
|
22
22
|
}
|
23
23
|
export type BaseModelCallFinishedEventResult = {
|
24
24
|
status: "success";
|
25
|
-
|
25
|
+
rawResponse: unknown;
|
26
26
|
value: unknown;
|
27
27
|
/**
|
28
28
|
* Optional usage information for the model call. The type depends on the call type.
|
@@ -17,7 +17,7 @@ export interface EmbeddingModel<VALUE, SETTINGS extends EmbeddingModelSettings =
|
|
17
17
|
*/
|
18
18
|
readonly isParallelizable: boolean;
|
19
19
|
doEmbedValues(values: VALUE[], options: FunctionCallOptions): PromiseLike<{
|
20
|
-
|
20
|
+
rawResponse: unknown;
|
21
21
|
embeddings: Vector[];
|
22
22
|
}>;
|
23
23
|
}
|
@@ -32,13 +32,13 @@ async function embedMany(model, values, options) {
|
|
32
32
|
responses.push(response);
|
33
33
|
}
|
34
34
|
}
|
35
|
-
const rawResponses = responses.map((response) => response.
|
35
|
+
const rawResponses = responses.map((response) => response.rawResponse);
|
36
36
|
const embeddings = [];
|
37
37
|
for (const response of responses) {
|
38
38
|
embeddings.push(...response.embeddings);
|
39
39
|
}
|
40
40
|
return {
|
41
|
-
|
41
|
+
rawResponse: rawResponses,
|
42
42
|
extractedValue: embeddings,
|
43
43
|
};
|
44
44
|
},
|
@@ -46,7 +46,7 @@ async function embedMany(model, values, options) {
|
|
46
46
|
return options?.fullResponse
|
47
47
|
? {
|
48
48
|
embeddings: fullResponse.value,
|
49
|
-
|
49
|
+
rawResponse: fullResponse.rawResponse,
|
50
50
|
metadata: fullResponse.metadata,
|
51
51
|
}
|
52
52
|
: fullResponse.value;
|
@@ -61,7 +61,7 @@ async function embed(model, value, options) {
|
|
61
61
|
generateResponse: async (options) => {
|
62
62
|
const result = await model.doEmbedValues([value], options);
|
63
63
|
return {
|
64
|
-
|
64
|
+
rawResponse: result.rawResponse,
|
65
65
|
extractedValue: result.embeddings[0],
|
66
66
|
};
|
67
67
|
},
|
@@ -69,7 +69,7 @@ async function embed(model, value, options) {
|
|
69
69
|
return options?.fullResponse
|
70
70
|
? {
|
71
71
|
embedding: fullResponse.value,
|
72
|
-
|
72
|
+
rawResponse: fullResponse.rawResponse,
|
73
73
|
metadata: fullResponse.metadata,
|
74
74
|
}
|
75
75
|
: fullResponse.value;
|
@@ -29,7 +29,7 @@ export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingM
|
|
29
29
|
fullResponse: true;
|
30
30
|
}): Promise<{
|
31
31
|
embeddings: Vector[];
|
32
|
-
|
32
|
+
rawResponse: unknown;
|
33
33
|
metadata: ModelCallMetadata;
|
34
34
|
}>;
|
35
35
|
/**
|
@@ -56,6 +56,6 @@ export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModel
|
|
56
56
|
fullResponse: true;
|
57
57
|
}): Promise<{
|
58
58
|
embedding: Vector;
|
59
|
-
|
59
|
+
rawResponse: unknown;
|
60
60
|
metadata: ModelCallMetadata;
|
61
61
|
}>;
|
@@ -29,13 +29,13 @@ export async function embedMany(model, values, options) {
|
|
29
29
|
responses.push(response);
|
30
30
|
}
|
31
31
|
}
|
32
|
-
const rawResponses = responses.map((response) => response.
|
32
|
+
const rawResponses = responses.map((response) => response.rawResponse);
|
33
33
|
const embeddings = [];
|
34
34
|
for (const response of responses) {
|
35
35
|
embeddings.push(...response.embeddings);
|
36
36
|
}
|
37
37
|
return {
|
38
|
-
|
38
|
+
rawResponse: rawResponses,
|
39
39
|
extractedValue: embeddings,
|
40
40
|
};
|
41
41
|
},
|
@@ -43,7 +43,7 @@ export async function embedMany(model, values, options) {
|
|
43
43
|
return options?.fullResponse
|
44
44
|
? {
|
45
45
|
embeddings: fullResponse.value,
|
46
|
-
|
46
|
+
rawResponse: fullResponse.rawResponse,
|
47
47
|
metadata: fullResponse.metadata,
|
48
48
|
}
|
49
49
|
: fullResponse.value;
|
@@ -57,7 +57,7 @@ export async function embed(model, value, options) {
|
|
57
57
|
generateResponse: async (options) => {
|
58
58
|
const result = await model.doEmbedValues([value], options);
|
59
59
|
return {
|
60
|
-
|
60
|
+
rawResponse: result.rawResponse,
|
61
61
|
extractedValue: result.embeddings[0],
|
62
62
|
};
|
63
63
|
},
|
@@ -65,7 +65,7 @@ export async function embed(model, value, options) {
|
|
65
65
|
return options?.fullResponse
|
66
66
|
? {
|
67
67
|
embedding: fullResponse.value,
|
68
|
-
|
68
|
+
rawResponse: fullResponse.rawResponse,
|
69
69
|
metadata: fullResponse.metadata,
|
70
70
|
}
|
71
71
|
: fullResponse.value;
|
@@ -78,7 +78,7 @@ async function executeStandardCall({ model, options, input, functionType, genera
|
|
78
78
|
});
|
79
79
|
throw result.error;
|
80
80
|
}
|
81
|
-
const
|
81
|
+
const rawResponse = result.value.rawResponse;
|
82
82
|
const value = result.value.extractedValue;
|
83
83
|
const usage = result.value.usage;
|
84
84
|
eventSource.notify({
|
@@ -87,13 +87,13 @@ async function executeStandardCall({ model, options, input, functionType, genera
|
|
87
87
|
result: {
|
88
88
|
status: "success",
|
89
89
|
usage,
|
90
|
-
|
90
|
+
rawResponse,
|
91
91
|
value,
|
92
92
|
},
|
93
93
|
});
|
94
94
|
return {
|
95
95
|
value,
|
96
|
-
|
96
|
+
rawResponse,
|
97
97
|
metadata: {
|
98
98
|
model: model.modelInformation,
|
99
99
|
callId: finishMetadata.callId,
|
@@ -8,12 +8,12 @@ export declare function executeStandardCall<VALUE, MODEL extends Model<ModelSett
|
|
8
8
|
input: unknown;
|
9
9
|
functionType: ModelCallStartedEvent["functionType"];
|
10
10
|
generateResponse: (options: FunctionCallOptions) => PromiseLike<{
|
11
|
-
|
11
|
+
rawResponse: unknown;
|
12
12
|
extractedValue: VALUE;
|
13
13
|
usage?: unknown;
|
14
14
|
}>;
|
15
15
|
}): Promise<{
|
16
16
|
value: VALUE;
|
17
|
-
|
17
|
+
rawResponse: unknown;
|
18
18
|
metadata: ModelCallMetadata;
|
19
19
|
}>;
|
@@ -75,7 +75,7 @@ export async function executeStandardCall({ model, options, input, functionType,
|
|
75
75
|
});
|
76
76
|
throw result.error;
|
77
77
|
}
|
78
|
-
const
|
78
|
+
const rawResponse = result.value.rawResponse;
|
79
79
|
const value = result.value.extractedValue;
|
80
80
|
const usage = result.value.usage;
|
81
81
|
eventSource.notify({
|
@@ -84,13 +84,13 @@ export async function executeStandardCall({ model, options, input, functionType,
|
|
84
84
|
result: {
|
85
85
|
status: "success",
|
86
86
|
usage,
|
87
|
-
|
87
|
+
rawResponse,
|
88
88
|
value,
|
89
89
|
},
|
90
90
|
});
|
91
91
|
return {
|
92
92
|
value,
|
93
|
-
|
93
|
+
rawResponse,
|
94
94
|
metadata: {
|
95
95
|
model: model.modelInformation,
|
96
96
|
callId: finishMetadata.callId,
|
@@ -15,7 +15,7 @@ export interface ImageGenerationModelSettings extends ModelSettings {
|
|
15
15
|
}
|
16
16
|
export interface ImageGenerationModel<PROMPT, SETTINGS extends ImageGenerationModelSettings = ImageGenerationModelSettings> extends Model<SETTINGS> {
|
17
17
|
doGenerateImages(prompt: PROMPT, options: FunctionCallOptions): PromiseLike<{
|
18
|
-
|
18
|
+
rawResponse: unknown;
|
19
19
|
base64Images: string[];
|
20
20
|
}>;
|
21
21
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, PROMPT>): ImageGenerationModel<INPUT_PROMPT, SETTINGS>;
|
@@ -11,7 +11,7 @@ export declare class PromptTemplateImageGenerationModel<PROMPT, MODEL_PROMPT, SE
|
|
11
11
|
get modelInformation(): import("../ModelInformation.js").ModelInformation;
|
12
12
|
get settings(): SETTINGS;
|
13
13
|
doGenerateImages(prompt: PROMPT, options: FunctionCallOptions): PromiseLike<{
|
14
|
-
|
14
|
+
rawResponse: unknown;
|
15
15
|
base64Images: string[];
|
16
16
|
}>;
|
17
17
|
get settingsForEvent(): Partial<SETTINGS>;
|
@@ -11,7 +11,7 @@ async function generateImage(model, prompt, options) {
|
|
11
11
|
generateResponse: async (options) => {
|
12
12
|
const result = await model.doGenerateImages(prompt, options);
|
13
13
|
return {
|
14
|
-
|
14
|
+
rawResponse: result.rawResponse,
|
15
15
|
extractedValue: result.base64Images,
|
16
16
|
};
|
17
17
|
},
|
@@ -24,7 +24,7 @@ async function generateImage(model, prompt, options) {
|
|
24
24
|
imageBase64: imagesBase64[0],
|
25
25
|
images,
|
26
26
|
imagesBase64,
|
27
|
-
|
27
|
+
rawResponse: fullResponse.rawResponse,
|
28
28
|
metadata: fullResponse.metadata,
|
29
29
|
}
|
30
30
|
: images[0];
|
@@ -8,7 +8,7 @@ export async function generateImage(model, prompt, options) {
|
|
8
8
|
generateResponse: async (options) => {
|
9
9
|
const result = await model.doGenerateImages(prompt, options);
|
10
10
|
return {
|
11
|
-
|
11
|
+
rawResponse: result.rawResponse,
|
12
12
|
extractedValue: result.base64Images,
|
13
13
|
};
|
14
14
|
},
|
@@ -21,7 +21,7 @@ export async function generateImage(model, prompt, options) {
|
|
21
21
|
imageBase64: imagesBase64[0],
|
22
22
|
images,
|
23
23
|
imagesBase64,
|
24
|
-
|
24
|
+
rawResponse: fullResponse.rawResponse,
|
25
25
|
metadata: fullResponse.metadata,
|
26
26
|
}
|
27
27
|
: images[0];
|
@@ -11,7 +11,7 @@ async function generateSpeech(model, text, options) {
|
|
11
11
|
generateResponse: async (options) => {
|
12
12
|
const response = await model.doGenerateSpeechStandard(text, options);
|
13
13
|
return {
|
14
|
-
response,
|
14
|
+
rawResponse: response,
|
15
15
|
extractedValue: response,
|
16
16
|
};
|
17
17
|
},
|
@@ -19,7 +19,7 @@ async function generateSpeech(model, text, options) {
|
|
19
19
|
return options?.fullResponse
|
20
20
|
? {
|
21
21
|
speech: fullResponse.value,
|
22
|
-
|
22
|
+
rawResponse: fullResponse.rawResponse,
|
23
23
|
metadata: fullResponse.metadata,
|
24
24
|
}
|
25
25
|
: fullResponse.value;
|
@@ -8,7 +8,7 @@ export async function generateSpeech(model, text, options) {
|
|
8
8
|
generateResponse: async (options) => {
|
9
9
|
const response = await model.doGenerateSpeechStandard(text, options);
|
10
10
|
return {
|
11
|
-
response,
|
11
|
+
rawResponse: response,
|
12
12
|
extractedValue: response,
|
13
13
|
};
|
14
14
|
},
|
@@ -16,7 +16,7 @@ export async function generateSpeech(model, text, options) {
|
|
16
16
|
return options?.fullResponse
|
17
17
|
? {
|
18
18
|
speech: fullResponse.value,
|
19
|
-
|
19
|
+
rawResponse: fullResponse.rawResponse,
|
20
20
|
metadata: fullResponse.metadata,
|
21
21
|
}
|
22
22
|
: fullResponse.value;
|
@@ -30,7 +30,7 @@ class StructureFromTextGenerationModel {
|
|
30
30
|
return this.model.settingsForEvent;
|
31
31
|
}
|
32
32
|
async doGenerateStructure(schema, prompt, options) {
|
33
|
-
const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
33
|
+
const { rawResponse: response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
34
34
|
...options,
|
35
35
|
fullResponse: true,
|
36
36
|
});
|
@@ -27,7 +27,7 @@ export class StructureFromTextGenerationModel {
|
|
27
27
|
return this.model.settingsForEvent;
|
28
28
|
}
|
29
29
|
async doGenerateStructure(schema, prompt, options) {
|
30
|
-
const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
30
|
+
const { rawResponse: response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
31
31
|
...options,
|
32
32
|
fullResponse: true,
|
33
33
|
});
|
@@ -12,7 +12,7 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
|
|
12
12
|
super(options);
|
13
13
|
}
|
14
14
|
async doGenerateStructure(schema, prompt, options) {
|
15
|
-
const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
15
|
+
const { rawResponse: response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
16
16
|
...options,
|
17
17
|
fullResponse: true,
|
18
18
|
});
|
@@ -9,7 +9,7 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
|
|
9
9
|
super(options);
|
10
10
|
}
|
11
11
|
async doGenerateStructure(schema, prompt, options) {
|
12
|
-
const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
12
|
+
const { rawResponse: response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
13
13
|
...options,
|
14
14
|
fullResponse: true,
|
15
15
|
});
|
@@ -29,7 +29,7 @@ async function generateStructure(model, schema, prompt, options) {
|
|
29
29
|
}
|
30
30
|
const value = parseResult.data;
|
31
31
|
return {
|
32
|
-
|
32
|
+
rawResponse: result.response,
|
33
33
|
extractedValue: value,
|
34
34
|
usage: result.usage,
|
35
35
|
};
|
@@ -38,7 +38,7 @@ async function generateStructure(model, schema, prompt, options) {
|
|
38
38
|
return options?.fullResponse
|
39
39
|
? {
|
40
40
|
structure: fullResponse.value,
|
41
|
-
|
41
|
+
rawResponse: fullResponse.rawResponse,
|
42
42
|
metadata: fullResponse.metadata,
|
43
43
|
}
|
44
44
|
: fullResponse.value;
|
@@ -26,7 +26,7 @@ export async function generateStructure(model, schema, prompt, options) {
|
|
26
26
|
}
|
27
27
|
const value = parseResult.data;
|
28
28
|
return {
|
29
|
-
|
29
|
+
rawResponse: result.response,
|
30
30
|
extractedValue: value,
|
31
31
|
usage: result.usage,
|
32
32
|
};
|
@@ -35,7 +35,7 @@ export async function generateStructure(model, schema, prompt, options) {
|
|
35
35
|
return options?.fullResponse
|
36
36
|
? {
|
37
37
|
structure: fullResponse.value,
|
38
|
-
|
38
|
+
rawResponse: fullResponse.rawResponse,
|
39
39
|
metadata: fullResponse.metadata,
|
40
40
|
}
|
41
41
|
: fullResponse.value;
|