modelfusion 0.117.0 → 0.118.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. package/CHANGELOG.md +26 -0
  2. package/README.md +10 -9
  3. package/core/getFunctionCallLogger.cjs +6 -6
  4. package/core/getFunctionCallLogger.js +6 -6
  5. package/model-function/ModelCallEvent.d.ts +1 -1
  6. package/model-function/embed/EmbeddingEvent.d.ts +1 -1
  7. package/model-function/embed/EmbeddingModel.d.ts +1 -1
  8. package/model-function/embed/embed.cjs +5 -5
  9. package/model-function/embed/embed.d.ts +2 -2
  10. package/model-function/embed/embed.js +5 -5
  11. package/model-function/executeStandardCall.cjs +3 -3
  12. package/model-function/executeStandardCall.d.ts +2 -2
  13. package/model-function/executeStandardCall.js +3 -3
  14. package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
  15. package/model-function/generate-image/ImageGenerationModel.d.ts +1 -1
  16. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +1 -1
  17. package/model-function/generate-image/generateImage.cjs +2 -2
  18. package/model-function/generate-image/generateImage.d.ts +1 -1
  19. package/model-function/generate-image/generateImage.js +2 -2
  20. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +1 -1
  21. package/model-function/generate-speech/generateSpeech.cjs +2 -2
  22. package/model-function/generate-speech/generateSpeech.d.ts +1 -1
  23. package/model-function/generate-speech/generateSpeech.js +2 -2
  24. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +1 -1
  25. package/model-function/generate-structure/StructureFromTextGenerationModel.js +1 -1
  26. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +1 -1
  27. package/model-function/generate-structure/StructureFromTextStreamingModel.js +1 -1
  28. package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
  29. package/model-function/generate-structure/generateStructure.cjs +2 -2
  30. package/model-function/generate-structure/generateStructure.d.ts +1 -1
  31. package/model-function/generate-structure/generateStructure.js +2 -2
  32. package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +2 -2
  33. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
  34. package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
  35. package/model-function/generate-text/TextGenerationModel.d.ts +2 -2
  36. package/model-function/generate-text/generateText.cjs +3 -3
  37. package/model-function/generate-text/generateText.d.ts +1 -1
  38. package/model-function/generate-text/generateText.js +3 -3
  39. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +8 -1
  40. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +5 -0
  41. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +6 -0
  42. package/model-function/generate-text/prompt-template/PromptTemplateProvider.cjs +2 -0
  43. package/model-function/generate-text/prompt-template/PromptTemplateProvider.d.ts +8 -0
  44. package/model-function/generate-text/prompt-template/PromptTemplateProvider.js +1 -0
  45. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +34 -1
  46. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +9 -0
  47. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +31 -0
  48. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +28 -0
  49. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +29 -1
  50. package/model-function/generate-text/prompt-template/index.cjs +1 -0
  51. package/model-function/generate-text/prompt-template/index.d.ts +1 -0
  52. package/model-function/generate-text/prompt-template/index.js +1 -0
  53. package/model-function/generate-transcription/TranscriptionEvent.d.ts +1 -1
  54. package/model-function/generate-transcription/TranscriptionModel.d.ts +1 -1
  55. package/model-function/generate-transcription/generateTranscription.cjs +1 -1
  56. package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
  57. package/model-function/generate-transcription/generateTranscription.js +1 -1
  58. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +3 -3
  59. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
  60. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +3 -3
  61. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -3
  62. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
  63. package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -3
  64. package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
  65. package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -4
  66. package/model-provider/cohere/CohereTextGenerationModel.js +3 -3
  67. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +3 -3
  68. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
  69. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +3 -3
  70. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -3
  71. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -4
  72. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -3
  73. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +15 -1
  74. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -0
  75. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +13 -0
  76. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +31 -28
  77. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +17 -8
  78. package/model-provider/llamacpp/LlamaCppCompletionModel.js +31 -28
  79. package/model-provider/llamacpp/LlamaCppFacade.cjs +4 -3
  80. package/model-provider/llamacpp/LlamaCppFacade.d.ts +2 -1
  81. package/model-provider/llamacpp/LlamaCppFacade.js +2 -1
  82. package/model-provider/llamacpp/LlamaCppPrompt.cjs +59 -0
  83. package/model-provider/llamacpp/LlamaCppPrompt.d.ts +14 -0
  84. package/model-provider/llamacpp/LlamaCppPrompt.js +31 -0
  85. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +3 -3
  86. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +1 -1
  87. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +3 -3
  88. package/model-provider/llamacpp/index.cjs +2 -3
  89. package/model-provider/llamacpp/index.d.ts +1 -2
  90. package/model-provider/llamacpp/index.js +1 -2
  91. package/model-provider/mistral/MistralChatModel.cjs +3 -3
  92. package/model-provider/mistral/MistralChatModel.d.ts +4 -4
  93. package/model-provider/mistral/MistralChatModel.js +3 -3
  94. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +3 -3
  95. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +1 -1
  96. package/model-provider/mistral/MistralTextEmbeddingModel.js +3 -3
  97. package/model-provider/ollama/OllamaChatModel.cjs +3 -3
  98. package/model-provider/ollama/OllamaChatModel.d.ts +2 -2
  99. package/model-provider/ollama/OllamaChatModel.js +3 -3
  100. package/model-provider/ollama/OllamaCompletionModel.cjs +3 -3
  101. package/model-provider/ollama/OllamaCompletionModel.d.ts +14 -14
  102. package/model-provider/ollama/OllamaCompletionModel.js +3 -3
  103. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +3 -3
  104. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +1 -1
  105. package/model-provider/ollama/OllamaTextEmbeddingModel.js +3 -3
  106. package/model-provider/openai/AbstractOpenAIChatModel.cjs +12 -12
  107. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +6 -6
  108. package/model-provider/openai/AbstractOpenAIChatModel.js +12 -12
  109. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +6 -6
  110. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +2 -2
  111. package/model-provider/openai/AbstractOpenAICompletionModel.js +6 -6
  112. package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -3
  113. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +1 -1
  114. package/model-provider/openai/OpenAIImageGenerationModel.js +3 -3
  115. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -3
  116. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
  117. package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -3
  118. package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -3
  119. package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
  120. package/model-provider/openai/OpenAITranscriptionModel.js +3 -3
  121. package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -3
  122. package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
  123. package/model-provider/stability/StabilityImageGenerationModel.js +3 -3
  124. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +3 -3
  125. package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +1 -1
  126. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +3 -3
  127. package/package.json +1 -1
  128. package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +2 -2
  129. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +1 -1
  130. package/tool/generate-tool-call/TextGenerationToolCallModel.js +2 -2
  131. package/tool/generate-tool-call/ToolCallGenerationEvent.d.ts +1 -1
  132. package/tool/generate-tool-call/ToolCallGenerationModel.d.ts +1 -1
  133. package/tool/generate-tool-call/generateToolCall.cjs +2 -2
  134. package/tool/generate-tool-call/generateToolCall.js +2 -2
  135. package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +2 -2
  136. package/tool/generate-tool-calls/TextGenerationToolCallsModel.d.ts +1 -1
  137. package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +2 -2
  138. package/tool/generate-tool-calls/ToolCallsGenerationEvent.d.ts +1 -1
  139. package/tool/generate-tool-calls/ToolCallsGenerationModel.d.ts +1 -1
  140. package/tool/generate-tool-calls/generateToolCalls.cjs +2 -2
  141. package/tool/generate-tool-calls/generateToolCalls.d.ts +1 -1
  142. package/tool/generate-tool-calls/generateToolCalls.js +2 -2
@@ -40,7 +40,7 @@ async function generateText(model, prompt, options) {
40
40
  }
41
41
  const result = await model.doGenerateTexts(prompt, options);
42
42
  try {
43
- await options.cache.storeValue(cacheKey, result.response);
43
+ await options.cache.storeValue(cacheKey, result.rawResponse);
44
44
  }
45
45
  catch (err) {
46
46
  cacheErrors = [...(cacheErrors ?? []), err];
@@ -60,7 +60,7 @@ async function generateText(model, prompt, options) {
60
60
  : result.textGenerationResults;
61
61
  // TODO add cache information
62
62
  return {
63
- response: result.response,
63
+ rawResponse: result.rawResponse,
64
64
  extractedValue: textGenerationResults,
65
65
  usage: result.usage,
66
66
  };
@@ -74,7 +74,7 @@ async function generateText(model, prompt, options) {
74
74
  finishReason: firstResult.finishReason,
75
75
  texts: textGenerationResults.map((textGeneration) => textGeneration.text),
76
76
  textGenerationResults,
77
- response: fullResponse.response,
77
+ rawResponse: fullResponse.rawResponse,
78
78
  metadata: fullResponse.metadata,
79
79
  }
80
80
  : firstResult.text;
@@ -33,6 +33,6 @@ export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT,
33
33
  finishReason: TextGenerationFinishReason;
34
34
  texts: string[];
35
35
  textGenerationResults: TextGenerationResult[];
36
- response: unknown;
36
+ rawResponse: unknown;
37
37
  metadata: ModelCallMetadata;
38
38
  }>;
@@ -37,7 +37,7 @@ export async function generateText(model, prompt, options) {
37
37
  }
38
38
  const result = await model.doGenerateTexts(prompt, options);
39
39
  try {
40
- await options.cache.storeValue(cacheKey, result.response);
40
+ await options.cache.storeValue(cacheKey, result.rawResponse);
41
41
  }
42
42
  catch (err) {
43
43
  cacheErrors = [...(cacheErrors ?? []), err];
@@ -57,7 +57,7 @@ export async function generateText(model, prompt, options) {
57
57
  : result.textGenerationResults;
58
58
  // TODO add cache information
59
59
  return {
60
- response: result.response,
60
+ rawResponse: result.rawResponse,
61
61
  extractedValue: textGenerationResults,
62
62
  usage: result.usage,
63
63
  };
@@ -71,7 +71,7 @@ export async function generateText(model, prompt, options) {
71
71
  finishReason: firstResult.finishReason,
72
72
  texts: textGenerationResults.map((textGeneration) => textGeneration.text),
73
73
  textGenerationResults,
74
- response: fullResponse.response,
74
+ rawResponse: fullResponse.rawResponse,
75
75
  metadata: fullResponse.metadata,
76
76
  }
77
77
  : firstResult.text;
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.instruction = exports.text = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const ContentPart_js_1 = require("./ContentPart.cjs");
5
5
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
6
6
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
@@ -81,3 +81,10 @@ function instruction() {
81
81
  };
82
82
  }
83
83
  exports.instruction = instruction;
84
+ /**
85
+ * Not supported by Alpaca.
86
+ */
87
+ function chat() {
88
+ throw new Error("Chat prompts are not supported by the Alpaca format.");
89
+ }
90
+ exports.chat = chat;
@@ -1,4 +1,5 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
2
3
  import { InstructionPrompt } from "./InstructionPrompt.js";
3
4
  /**
4
5
  * Formats a text prompt as an Alpaca prompt.
@@ -44,3 +45,7 @@ export declare function instruction(): TextGenerationPromptTemplate<InstructionP
44
45
  input?: string;
45
46
  }, // optional input supported by Alpaca
46
47
  string>;
48
+ /**
49
+ * Not supported by Alpaca.
50
+ */
51
+ export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, string>;
@@ -76,3 +76,9 @@ export function instruction() {
76
76
  },
77
77
  };
78
78
  }
79
+ /**
80
+ * Not supported by Alpaca.
81
+ */
82
+ export function chat() {
83
+ throw new Error("Chat prompts are not supported by the Alpaca format.");
84
+ }
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,8 @@
1
+ import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
+ import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
+ export interface TextGenerationPromptTemplateProvider<TARGET_PROMPT> {
5
+ text(): TextGenerationPromptTemplate<string, TARGET_PROMPT>;
6
+ instruction(): TextGenerationPromptTemplate<InstructionPrompt, TARGET_PROMPT>;
7
+ chat(): TextGenerationPromptTemplate<ChatPrompt, TARGET_PROMPT>;
8
+ }
@@ -1,11 +1,44 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const ContentPart_js_1 = require("./ContentPart.cjs");
5
5
  const InvalidPromptError_js_1 = require("./InvalidPromptError.cjs");
6
6
  // default Vicuna 1 system message
7
7
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
8
8
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
9
+ /**
10
+ * Formats a text prompt as a Vicuna prompt.
11
+ */
12
+ function text() {
13
+ return {
14
+ stopSequences: [],
15
+ format(prompt) {
16
+ let text = DEFAULT_SYSTEM_MESSAGE;
17
+ text += "\n\nUSER: ";
18
+ text += prompt;
19
+ text += "\n\nASSISTANT: ";
20
+ return text;
21
+ },
22
+ };
23
+ }
24
+ exports.text = text;
25
+ /**
26
+ * Formats an instruction prompt as a Vicuna prompt.
27
+ */
28
+ function instruction() {
29
+ return {
30
+ format(prompt) {
31
+ let text = prompt.system != null
32
+ ? `${prompt.system}\n\n`
33
+ : `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
34
+ text += `USER: ${(0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt)}\n`;
35
+ text += `ASSISTANT: `;
36
+ return text;
37
+ },
38
+ stopSequences: [`\nUSER:`],
39
+ };
40
+ }
41
+ exports.instruction = instruction;
9
42
  /**
10
43
  * Formats a chat prompt as a Vicuna prompt.
11
44
  *
@@ -1,5 +1,14 @@
1
1
  import { TextGenerationPromptTemplate } from "../TextGenerationPromptTemplate.js";
2
2
  import { ChatPrompt } from "./ChatPrompt.js";
3
+ import { InstructionPrompt } from "./InstructionPrompt.js";
4
+ /**
5
+ * Formats a text prompt as a Vicuna prompt.
6
+ */
7
+ export declare function text(): TextGenerationPromptTemplate<string, string>;
8
+ /**
9
+ * Formats an instruction prompt as a Vicuna prompt.
10
+ */
11
+ export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, string>;
3
12
  /**
4
13
  * Formats a chat prompt as a Vicuna prompt.
5
14
  *
@@ -3,6 +3,37 @@ import { InvalidPromptError } from "./InvalidPromptError.js";
3
3
  // default Vicuna 1 system message
4
4
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
5
5
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
6
+ /**
7
+ * Formats a text prompt as a Vicuna prompt.
8
+ */
9
+ export function text() {
10
+ return {
11
+ stopSequences: [],
12
+ format(prompt) {
13
+ let text = DEFAULT_SYSTEM_MESSAGE;
14
+ text += "\n\nUSER: ";
15
+ text += prompt;
16
+ text += "\n\nASSISTANT: ";
17
+ return text;
18
+ },
19
+ };
20
+ }
21
+ /**
22
+ * Formats an instruction prompt as a Vicuna prompt.
23
+ */
24
+ export function instruction() {
25
+ return {
26
+ format(prompt) {
27
+ let text = prompt.system != null
28
+ ? `${prompt.system}\n\n`
29
+ : `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
30
+ text += `USER: ${validateContentIsString(prompt.instruction, prompt)}\n`;
31
+ text += `ASSISTANT: `;
32
+ return text;
33
+ },
34
+ stopSequences: [`\nUSER:`],
35
+ };
36
+ }
6
37
  /**
7
38
  * Formats a chat prompt as a Vicuna prompt.
8
39
  *
@@ -1,6 +1,34 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  const VicunaPromptTemplate_js_1 = require("./VicunaPromptTemplate.cjs");
4
+ describe("text prompt", () => {
5
+ it("should format prompt", () => {
6
+ const prompt = (0, VicunaPromptTemplate_js_1.text)().format("prompt");
7
+ expect(prompt).toMatchSnapshot();
8
+ });
9
+ });
10
+ describe("instruction prompt", () => {
11
+ it("should format prompt with instruction", () => {
12
+ const prompt = (0, VicunaPromptTemplate_js_1.instruction)().format({
13
+ instruction: "instruction",
14
+ });
15
+ expect(prompt).toMatchSnapshot();
16
+ });
17
+ it("should format prompt with system and instruction", () => {
18
+ const prompt = (0, VicunaPromptTemplate_js_1.instruction)().format({
19
+ system: "system",
20
+ instruction: "instruction",
21
+ });
22
+ expect(prompt).toMatchSnapshot();
23
+ });
24
+ it("should format prompt with instruction and response prefix", () => {
25
+ const prompt = (0, VicunaPromptTemplate_js_1.instruction)().format({
26
+ instruction: "instruction",
27
+ responsePrefix: "response prefix",
28
+ });
29
+ expect(prompt).toMatchSnapshot();
30
+ });
31
+ });
4
32
  describe("chat prompt", () => {
5
33
  it("should format prompt with user message", () => {
6
34
  const prompt = (0, VicunaPromptTemplate_js_1.chat)().format({
@@ -1,4 +1,32 @@
1
- import { chat } from "./VicunaPromptTemplate.js";
1
+ import { chat, instruction, text } from "./VicunaPromptTemplate.js";
2
+ describe("text prompt", () => {
3
+ it("should format prompt", () => {
4
+ const prompt = text().format("prompt");
5
+ expect(prompt).toMatchSnapshot();
6
+ });
7
+ });
8
+ describe("instruction prompt", () => {
9
+ it("should format prompt with instruction", () => {
10
+ const prompt = instruction().format({
11
+ instruction: "instruction",
12
+ });
13
+ expect(prompt).toMatchSnapshot();
14
+ });
15
+ it("should format prompt with system and instruction", () => {
16
+ const prompt = instruction().format({
17
+ system: "system",
18
+ instruction: "instruction",
19
+ });
20
+ expect(prompt).toMatchSnapshot();
21
+ });
22
+ it("should format prompt with instruction and response prefix", () => {
23
+ const prompt = instruction().format({
24
+ instruction: "instruction",
25
+ responsePrefix: "response prefix",
26
+ });
27
+ expect(prompt).toMatchSnapshot();
28
+ });
29
+ });
2
30
  describe("chat prompt", () => {
3
31
  it("should format prompt with user message", () => {
4
32
  const prompt = chat().format({
@@ -36,6 +36,7 @@ __exportStar(require("./InvalidPromptError.cjs"), exports);
36
36
  exports.Llama2Prompt = __importStar(require("./Llama2PromptTemplate.cjs"));
37
37
  exports.MistralInstructPrompt = __importStar(require("./MistralInstructPromptTemplate.cjs"));
38
38
  exports.NeuralChatPrompt = __importStar(require("./NeuralChatPromptTemplate.cjs"));
39
+ __exportStar(require("./PromptTemplateProvider.cjs"), exports);
39
40
  exports.TextPrompt = __importStar(require("./TextPromptTemplate.cjs"));
40
41
  exports.VicunaPrompt = __importStar(require("./VicunaPromptTemplate.cjs"));
41
42
  __exportStar(require("./trimChatPrompt.cjs"), exports);
@@ -7,6 +7,7 @@ export * from "./InvalidPromptError.js";
7
7
  export * as Llama2Prompt from "./Llama2PromptTemplate.js";
8
8
  export * as MistralInstructPrompt from "./MistralInstructPromptTemplate.js";
9
9
  export * as NeuralChatPrompt from "./NeuralChatPromptTemplate.js";
10
+ export * from "./PromptTemplateProvider.js";
10
11
  export * as TextPrompt from "./TextPromptTemplate.js";
11
12
  export * as VicunaPrompt from "./VicunaPromptTemplate.js";
12
13
  export * from "./trimChatPrompt.js";
@@ -7,6 +7,7 @@ export * from "./InvalidPromptError.js";
7
7
  export * as Llama2Prompt from "./Llama2PromptTemplate.js";
8
8
  export * as MistralInstructPrompt from "./MistralInstructPromptTemplate.js";
9
9
  export * as NeuralChatPrompt from "./NeuralChatPromptTemplate.js";
10
+ export * from "./PromptTemplateProvider.js";
10
11
  export * as TextPrompt from "./TextPromptTemplate.js";
11
12
  export * as VicunaPrompt from "./VicunaPromptTemplate.js";
12
13
  export * from "./trimChatPrompt.js";
@@ -4,7 +4,7 @@ export interface TranscriptionStartedEvent extends BaseModelCallStartedEvent {
4
4
  }
5
5
  export type TranscriptionFinishedEventResult = {
6
6
  status: "success";
7
- response: unknown;
7
+ rawResponse: unknown;
8
8
  value: string;
9
9
  } | {
10
10
  status: "error";
@@ -4,7 +4,7 @@ export interface TranscriptionModelSettings extends ModelSettings {
4
4
  }
5
5
  export interface TranscriptionModel<DATA, SETTINGS extends TranscriptionModelSettings = TranscriptionModelSettings> extends Model<SETTINGS> {
6
6
  doTranscribe: (data: DATA, options: FunctionCallOptions) => PromiseLike<{
7
- response: unknown;
7
+ rawResponse: unknown;
8
8
  transcription: string;
9
9
  }>;
10
10
  }
@@ -11,7 +11,7 @@ async function generateTranscription(model, data, options) {
11
11
  generateResponse: async (options) => {
12
12
  const result = await model.doTranscribe(data, options);
13
13
  return {
14
- response: result.response,
14
+ rawResponse: result.rawResponse,
15
15
  extractedValue: result.transcription,
16
16
  };
17
17
  },
@@ -27,6 +27,6 @@ export declare function generateTranscription<DATA>(model: TranscriptionModel<DA
27
27
  fullResponse: true;
28
28
  }): Promise<{
29
29
  value: string;
30
- response: unknown;
30
+ rawResponse: unknown;
31
31
  metadata: ModelCallMetadata;
32
32
  }>;
@@ -8,7 +8,7 @@ export async function generateTranscription(model, data, options) {
8
8
  generateResponse: async (options) => {
9
9
  const result = await model.doTranscribe(data, options);
10
10
  return {
11
- response: result.response,
11
+ rawResponse: result.rawResponse,
12
12
  extractedValue: result.transcription,
13
13
  };
14
14
  },
@@ -74,10 +74,10 @@ class Automatic1111ImageGenerationModel extends AbstractModel_js_1.AbstractModel
74
74
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
75
75
  }
76
76
  async doGenerateImages(prompt, options) {
77
- const response = await this.callAPI(prompt, options);
77
+ const rawResponse = await this.callAPI(prompt, options);
78
78
  return {
79
- response,
80
- base64Images: response.images,
79
+ rawResponse,
80
+ base64Images: rawResponse.images,
81
81
  };
82
82
  }
83
83
  withTextPrompt() {
@@ -40,7 +40,7 @@ export declare class Automatic1111ImageGenerationModel extends AbstractModel<Aut
40
40
  callAPI(input: Automatic1111ImageGenerationPrompt, callOptions: FunctionCallOptions): Promise<Automatic1111ImageGenerationResponse>;
41
41
  get settingsForEvent(): Partial<Automatic1111ImageGenerationSettings>;
42
42
  doGenerateImages(prompt: Automatic1111ImageGenerationPrompt, options: FunctionCallOptions): Promise<{
43
- response: {
43
+ rawResponse: {
44
44
  images: string[];
45
45
  parameters: {};
46
46
  info: string;
@@ -71,10 +71,10 @@ export class Automatic1111ImageGenerationModel extends AbstractModel {
71
71
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
72
72
  }
73
73
  async doGenerateImages(prompt, options) {
74
- const response = await this.callAPI(prompt, options);
74
+ const rawResponse = await this.callAPI(prompt, options);
75
75
  return {
76
- response,
77
- base64Images: response.images,
76
+ rawResponse,
77
+ base64Images: rawResponse.images,
78
78
  };
79
79
  }
80
80
  withTextPrompt() {
@@ -148,10 +148,10 @@ class CohereTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
148
148
  };
149
149
  }
150
150
  async doEmbedValues(texts, options) {
151
- const response = await this.callAPI(texts, options);
151
+ const rawResponse = await this.callAPI(texts, options);
152
152
  return {
153
- response,
154
- embeddings: response.embeddings,
153
+ rawResponse,
154
+ embeddings: rawResponse.embeddings,
155
155
  };
156
156
  }
157
157
  withSettings(additionalSettings) {
@@ -73,7 +73,7 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
73
73
  callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<CohereTextEmbeddingResponse>;
74
74
  get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
75
75
  doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
76
- response: {
76
+ rawResponse: {
77
77
  embeddings: number[][];
78
78
  texts: string[];
79
79
  id: string;
@@ -145,10 +145,10 @@ export class CohereTextEmbeddingModel extends AbstractModel {
145
145
  };
146
146
  }
147
147
  async doEmbedValues(texts, options) {
148
- const response = await this.callAPI(texts, options);
148
+ const rawResponse = await this.callAPI(texts, options);
149
149
  return {
150
- response,
151
- embeddings: response.embeddings,
150
+ rawResponse,
151
+ embeddings: rawResponse.embeddings,
152
152
  };
153
153
  }
154
154
  withSettings(additionalSettings) {
@@ -138,10 +138,10 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
138
138
  schema: (0, ZodSchema_js_1.zodSchema)(cohereTextGenerationResponseSchema),
139
139
  }));
140
140
  }
141
- processTextGenerationResponse(response) {
141
+ processTextGenerationResponse(rawResponse) {
142
142
  return {
143
- response,
144
- textGenerationResults: response.generations.map((generation) => ({
143
+ rawResponse,
144
+ textGenerationResults: rawResponse.generations.map((generation) => ({
145
145
  text: generation.text,
146
146
  finishReason: this.translateFinishReason(generation.finish_reason),
147
147
  })),
@@ -59,7 +59,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
59
59
  }): Promise<RESPONSE>;
60
60
  get settingsForEvent(): Partial<CohereTextGenerationModelSettings>;
61
61
  doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
62
- response: {
62
+ rawResponse: {
63
63
  id: string;
64
64
  prompt: string;
65
65
  generations: {
@@ -79,7 +79,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
79
79
  }[];
80
80
  }>;
81
81
  restoreGeneratedTexts(rawResponse: unknown): {
82
- response: {
82
+ rawResponse: {
83
83
  id: string;
84
84
  prompt: string;
85
85
  generations: {
@@ -98,8 +98,8 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
98
98
  finishReason: TextGenerationFinishReason;
99
99
  }[];
100
100
  };
101
- processTextGenerationResponse(response: CohereTextGenerationResponse): {
102
- response: {
101
+ processTextGenerationResponse(rawResponse: CohereTextGenerationResponse): {
102
+ rawResponse: {
103
103
  id: string;
104
104
  prompt: string;
105
105
  generations: {
@@ -135,10 +135,10 @@ export class CohereTextGenerationModel extends AbstractModel {
135
135
  schema: zodSchema(cohereTextGenerationResponseSchema),
136
136
  }));
137
137
  }
138
- processTextGenerationResponse(response) {
138
+ processTextGenerationResponse(rawResponse) {
139
139
  return {
140
- response,
141
- textGenerationResults: response.generations.map((generation) => ({
140
+ rawResponse,
141
+ textGenerationResults: rawResponse.generations.map((generation) => ({
142
142
  text: generation.text,
143
143
  finishReason: this.translateFinishReason(generation.finish_reason),
144
144
  })),
@@ -117,10 +117,10 @@ class HuggingFaceTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
117
117
  };
118
118
  }
119
119
  async doEmbedValues(texts, options) {
120
- const response = await this.callAPI(texts, options);
120
+ const rawResponse = await this.callAPI(texts, options);
121
121
  return {
122
- response,
123
- embeddings: response,
122
+ rawResponse,
123
+ embeddings: rawResponse,
124
124
  };
125
125
  }
126
126
  withSettings(additionalSettings) {
@@ -46,7 +46,7 @@ export declare class HuggingFaceTextEmbeddingModel extends AbstractModel<Hugging
46
46
  get settingsForEvent(): Partial<HuggingFaceTextEmbeddingModelSettings>;
47
47
  readonly countPromptTokens: undefined;
48
48
  doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
49
- response: number[][];
49
+ rawResponse: number[][];
50
50
  embeddings: number[][];
51
51
  }>;
52
52
  withSettings(additionalSettings: Partial<HuggingFaceTextEmbeddingModelSettings>): this;
@@ -114,10 +114,10 @@ export class HuggingFaceTextEmbeddingModel extends AbstractModel {
114
114
  };
115
115
  }
116
116
  async doEmbedValues(texts, options) {
117
- const response = await this.callAPI(texts, options);
117
+ const rawResponse = await this.callAPI(texts, options);
118
118
  return {
119
- response,
120
- embeddings: response,
119
+ rawResponse,
120
+ embeddings: rawResponse,
121
121
  };
122
122
  }
123
123
  withSettings(additionalSettings) {
@@ -116,10 +116,10 @@ class HuggingFaceTextGenerationModel extends AbstractModel_js_1.AbstractModel {
116
116
  schema: (0, ZodSchema_js_1.zodSchema)(huggingFaceTextGenerationResponseSchema),
117
117
  }));
118
118
  }
119
- processTextGenerationResponse(response) {
119
+ processTextGenerationResponse(rawResponse) {
120
120
  return {
121
- response,
122
- textGenerationResults: response.map((response) => ({
121
+ rawResponse,
122
+ textGenerationResults: rawResponse.map((response) => ({
123
123
  text: response.generated_text,
124
124
  finishReason: "unknown",
125
125
  })),
@@ -43,7 +43,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
43
43
  callAPI(prompt: string, callOptions: FunctionCallOptions): Promise<HuggingFaceTextGenerationResponse>;
44
44
  get settingsForEvent(): Partial<HuggingFaceTextGenerationModelSettings>;
45
45
  doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
46
- response: {
46
+ rawResponse: {
47
47
  generated_text: string;
48
48
  }[];
49
49
  textGenerationResults: {
@@ -52,7 +52,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
52
52
  }[];
53
53
  }>;
54
54
  restoreGeneratedTexts(rawResponse: unknown): {
55
- response: {
55
+ rawResponse: {
56
56
  generated_text: string;
57
57
  }[];
58
58
  textGenerationResults: {
@@ -60,8 +60,8 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
60
60
  finishReason: "unknown";
61
61
  }[];
62
62
  };
63
- processTextGenerationResponse(response: HuggingFaceTextGenerationResponse): {
64
- response: {
63
+ processTextGenerationResponse(rawResponse: HuggingFaceTextGenerationResponse): {
64
+ rawResponse: {
65
65
  generated_text: string;
66
66
  }[];
67
67
  textGenerationResults: {
@@ -113,10 +113,10 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
113
113
  schema: zodSchema(huggingFaceTextGenerationResponseSchema),
114
114
  }));
115
115
  }
116
- processTextGenerationResponse(response) {
116
+ processTextGenerationResponse(rawResponse) {
117
117
  return {
118
- response,
119
- textGenerationResults: response.map((response) => ({
118
+ rawResponse,
119
+ textGenerationResults: rawResponse.map((response) => ({
120
120
  text: response.generated_text,
121
121
  finishReason: "unknown",
122
122
  })),
@@ -1,11 +1,25 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.chat = exports.instruction = void 0;
3
+ exports.chat = exports.instruction = exports.text = void 0;
4
4
  const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
5
5
  const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
6
+ const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
6
7
  // default Vicuna 1 system message
7
8
  const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
8
9
  "The assistant gives helpful, detailed, and polite answers to the user's questions.";
10
+ /**
11
+ * Text prompt.
12
+ */
13
+ function text() {
14
+ const delegate = (0, TextPromptTemplate_js_1.text)();
15
+ return {
16
+ stopSequences: [],
17
+ format(prompt) {
18
+ return { text: delegate.format(prompt) };
19
+ },
20
+ };
21
+ }
22
+ exports.text = text;
9
23
  /**
10
24
  * BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
11
25
  *