modelfusion 0.113.0 → 0.114.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/CHANGELOG.md +59 -0
  2. package/README.md +89 -89
  3. package/core/FunctionOptions.d.ts +14 -0
  4. package/core/api/AbstractApiConfiguration.cjs +16 -1
  5. package/core/api/AbstractApiConfiguration.d.ts +7 -3
  6. package/core/api/AbstractApiConfiguration.js +16 -1
  7. package/core/api/ApiConfiguration.d.ts +10 -1
  8. package/core/api/BaseUrlApiConfiguration.cjs +9 -5
  9. package/core/api/BaseUrlApiConfiguration.d.ts +7 -7
  10. package/core/api/BaseUrlApiConfiguration.js +9 -5
  11. package/core/api/CustomHeaderProvider.cjs +2 -0
  12. package/core/api/CustomHeaderProvider.d.ts +2 -0
  13. package/core/api/CustomHeaderProvider.js +1 -0
  14. package/core/api/index.cjs +1 -0
  15. package/core/api/index.d.ts +1 -0
  16. package/core/api/index.js +1 -0
  17. package/core/cache/Cache.cjs +2 -0
  18. package/core/cache/Cache.d.ts +12 -0
  19. package/core/cache/Cache.js +1 -0
  20. package/core/cache/MemoryCache.cjs +23 -0
  21. package/core/cache/MemoryCache.d.ts +15 -0
  22. package/core/cache/MemoryCache.js +19 -0
  23. package/core/cache/index.cjs +18 -0
  24. package/core/cache/index.d.ts +2 -0
  25. package/core/cache/index.js +2 -0
  26. package/core/index.cjs +1 -0
  27. package/core/index.d.ts +1 -0
  28. package/core/index.js +1 -0
  29. package/core/schema/TypeValidationError.cjs +36 -0
  30. package/core/schema/TypeValidationError.d.ts +15 -0
  31. package/core/schema/TypeValidationError.js +32 -0
  32. package/core/schema/index.cjs +2 -0
  33. package/core/schema/index.d.ts +2 -0
  34. package/core/schema/index.js +2 -0
  35. package/core/schema/parseJSON.cjs +6 -14
  36. package/core/schema/parseJSON.d.ts +3 -2
  37. package/core/schema/parseJSON.js +6 -14
  38. package/core/schema/validateTypes.cjs +65 -0
  39. package/core/schema/validateTypes.d.ts +34 -0
  40. package/core/schema/validateTypes.js +60 -0
  41. package/model-function/embed/EmbeddingModel.d.ts +2 -2
  42. package/model-function/executeStandardCall.cjs +3 -1
  43. package/model-function/executeStandardCall.d.ts +2 -2
  44. package/model-function/executeStandardCall.js +3 -1
  45. package/model-function/executeStreamCall.cjs +2 -1
  46. package/model-function/executeStreamCall.d.ts +2 -2
  47. package/model-function/executeStreamCall.js +2 -1
  48. package/model-function/generate-image/ImageGenerationModel.d.ts +2 -2
  49. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +2 -2
  50. package/model-function/generate-speech/SpeechGenerationModel.d.ts +3 -3
  51. package/model-function/generate-structure/generateStructure.cjs +4 -1
  52. package/model-function/generate-structure/generateStructure.js +4 -1
  53. package/model-function/generate-structure/streamStructure.cjs +4 -1
  54. package/model-function/generate-structure/streamStructure.js +4 -1
  55. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +3 -0
  56. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +11 -2
  57. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +3 -0
  58. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -2
  59. package/model-function/generate-text/TextGenerationModel.d.ts +12 -3
  60. package/model-function/generate-text/generateText.cjs +43 -1
  61. package/model-function/generate-text/generateText.js +43 -1
  62. package/model-function/generate-transcription/TranscriptionModel.d.ts +2 -2
  63. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +20 -8
  64. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +27 -5
  65. package/model-provider/anthropic/AnthropicTextGenerationModel.js +20 -8
  66. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +8 -3
  67. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +3 -3
  68. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +8 -3
  69. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +8 -3
  70. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  71. package/model-provider/cohere/CohereTextEmbeddingModel.js +8 -3
  72. package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -8
  73. package/model-provider/cohere/CohereTextGenerationModel.d.ts +45 -5
  74. package/model-provider/cohere/CohereTextGenerationModel.js +20 -8
  75. package/model-provider/cohere/CohereTokenizer.cjs +16 -6
  76. package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
  77. package/model-provider/cohere/CohereTokenizer.js +16 -6
  78. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +1 -1
  79. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +1 -1
  80. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +8 -3
  81. package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +2 -2
  82. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +8 -3
  83. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +8 -3
  84. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +3 -3
  85. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +8 -3
  86. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -4
  87. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +21 -3
  88. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -4
  89. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +20 -8
  90. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +125 -5
  91. package/model-provider/llamacpp/LlamaCppCompletionModel.js +20 -8
  92. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +8 -3
  93. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +3 -3
  94. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +8 -3
  95. package/model-provider/llamacpp/LlamaCppTokenizer.cjs +8 -3
  96. package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +2 -2
  97. package/model-provider/llamacpp/LlamaCppTokenizer.js +8 -3
  98. package/model-provider/lmnt/LmntSpeechModel.cjs +8 -3
  99. package/model-provider/lmnt/LmntSpeechModel.d.ts +2 -2
  100. package/model-provider/lmnt/LmntSpeechModel.js +8 -3
  101. package/model-provider/mistral/MistralChatModel.cjs +20 -8
  102. package/model-provider/mistral/MistralChatModel.d.ts +55 -5
  103. package/model-provider/mistral/MistralChatModel.js +20 -8
  104. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +8 -3
  105. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +3 -3
  106. package/model-provider/mistral/MistralTextEmbeddingModel.js +8 -3
  107. package/model-provider/ollama/OllamaChatModel.cjs +20 -8
  108. package/model-provider/ollama/OllamaChatModel.d.ts +27 -5
  109. package/model-provider/ollama/OllamaChatModel.js +20 -8
  110. package/model-provider/ollama/OllamaCompletionModel.cjs +20 -7
  111. package/model-provider/ollama/OllamaCompletionModel.d.ts +43 -5
  112. package/model-provider/ollama/OllamaCompletionModel.js +20 -7
  113. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +8 -3
  114. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +3 -3
  115. package/model-provider/ollama/OllamaTextEmbeddingModel.js +8 -3
  116. package/model-provider/openai/AbstractOpenAIChatModel.cjs +23 -13
  117. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +94 -7
  118. package/model-provider/openai/AbstractOpenAIChatModel.js +23 -13
  119. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +21 -9
  120. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +35 -5
  121. package/model-provider/openai/AbstractOpenAICompletionModel.js +21 -9
  122. package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +5 -2
  123. package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +2 -1
  124. package/model-provider/openai/AzureOpenAIApiConfiguration.js +5 -2
  125. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +12 -6
  126. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +89 -5
  127. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +12 -6
  128. package/model-provider/openai/OpenAIImageGenerationModel.cjs +10 -6
  129. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +4 -4
  130. package/model-provider/openai/OpenAIImageGenerationModel.js +10 -6
  131. package/model-provider/openai/OpenAISpeechModel.cjs +9 -4
  132. package/model-provider/openai/OpenAISpeechModel.d.ts +3 -3
  133. package/model-provider/openai/OpenAISpeechModel.js +9 -4
  134. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +11 -6
  135. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
  136. package/model-provider/openai/OpenAITextEmbeddingModel.js +11 -6
  137. package/model-provider/openai/OpenAITranscriptionModel.cjs +9 -6
  138. package/model-provider/openai/OpenAITranscriptionModel.d.ts +4 -4
  139. package/model-provider/openai/OpenAITranscriptionModel.js +9 -6
  140. package/model-provider/stability/StabilityImageGenerationModel.cjs +10 -5
  141. package/model-provider/stability/StabilityImageGenerationModel.d.ts +3 -3
  142. package/model-provider/stability/StabilityImageGenerationModel.js +10 -5
  143. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +9 -7
  144. package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +3 -3
  145. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +9 -7
  146. package/observability/helicone/HeliconeOpenAIApiConfiguration.cjs +2 -1
  147. package/observability/helicone/HeliconeOpenAIApiConfiguration.d.ts +3 -1
  148. package/observability/helicone/HeliconeOpenAIApiConfiguration.js +2 -1
  149. package/package.json +1 -1
@@ -44,6 +44,9 @@ class PromptTemplateTextGenerationModel {
44
44
  const mappedPrompt = this.promptTemplate.format(prompt);
45
45
  return this.model.doGenerateTexts(mappedPrompt, options);
46
46
  }
47
+ restoreGeneratedTexts(rawResponse) {
48
+ return this.model.restoreGeneratedTexts(rawResponse);
49
+ }
47
50
  get settingsForEvent() {
48
51
  return this.model.settingsForEvent;
49
52
  }
@@ -1,4 +1,4 @@
1
- import { FunctionOptions } from "../../core/FunctionOptions.js";
1
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
2
2
  import { TextGenerationToolCallModel, ToolCallPromptTemplate } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
3
3
  import { TextGenerationToolCallsModel } from "../../tool/generate-tool-calls/TextGenerationToolCallsModel.js";
4
4
  import { ToolCallsPromptTemplate } from "../../tool/generate-tool-calls/ToolCallsPromptTemplate.js";
@@ -18,7 +18,7 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
18
18
  get tokenizer(): MODEL["tokenizer"];
19
19
  get contextWindowSize(): MODEL["contextWindowSize"];
20
20
  get countPromptTokens(): MODEL["countPromptTokens"] extends undefined ? undefined : (prompt: PROMPT) => PromiseLike<number>;
21
- doGenerateTexts(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
21
+ doGenerateTexts(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<{
22
22
  response: unknown;
23
23
  textGenerationResults: import("./TextGenerationResult.js").TextGenerationResult[];
24
24
  usage?: {
@@ -27,6 +27,15 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
27
27
  totalTokens: number;
28
28
  } | undefined;
29
29
  }>;
30
+ restoreGeneratedTexts(rawResponse: unknown): {
31
+ response: unknown;
32
+ textGenerationResults: import("./TextGenerationResult.js").TextGenerationResult[];
33
+ usage?: {
34
+ promptTokens: number;
35
+ completionTokens: number;
36
+ totalTokens: number;
37
+ } | undefined;
38
+ };
30
39
  get settingsForEvent(): Partial<SETTINGS>;
31
40
  asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationToolCallModel<INPUT_PROMPT, PROMPT, this>;
32
41
  asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationToolCallsModel<INPUT_PROMPT, PROMPT, this>;
@@ -41,6 +41,9 @@ export class PromptTemplateTextGenerationModel {
41
41
  const mappedPrompt = this.promptTemplate.format(prompt);
42
42
  return this.model.doGenerateTexts(mappedPrompt, options);
43
43
  }
44
+ restoreGeneratedTexts(rawResponse) {
45
+ return this.model.restoreGeneratedTexts(rawResponse);
46
+ }
44
47
  get settingsForEvent() {
45
48
  return this.model.settingsForEvent;
46
49
  }
@@ -1,4 +1,4 @@
1
- import { FunctionOptions } from "../../core/FunctionOptions.js";
1
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
2
2
  import { StructureFromTextPromptTemplate } from "../generate-structure/StructureFromTextPromptTemplate.js";
3
3
  import { StructureFromTextStreamingModel } from "../generate-structure/StructureFromTextStreamingModel.js";
4
4
  import { PromptTemplateTextGenerationModel } from "./PromptTemplateTextGenerationModel.js";
@@ -9,7 +9,7 @@ export declare class PromptTemplateTextStreamingModel<PROMPT, MODEL_PROMPT, SETT
9
9
  model: MODEL;
10
10
  promptTemplate: TextGenerationPromptTemplate<PROMPT, MODEL_PROMPT>;
11
11
  });
12
- doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<import("../Delta.js").Delta<unknown>>>;
12
+ doStreamText(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<AsyncIterable<import("../Delta.js").Delta<unknown>>>;
13
13
  extractTextDelta(delta: unknown): string | undefined;
14
14
  asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextStreamingModel<INPUT_PROMPT, PROMPT, this>;
15
15
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextStreamingModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
@@ -1,4 +1,4 @@
1
- import { FunctionOptions } from "../../core/FunctionOptions.js";
1
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
2
2
  import { Delta } from "../Delta.js";
3
3
  import { Model, ModelSettings } from "../Model.js";
4
4
  import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
@@ -62,7 +62,7 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
62
62
  * Optional. Implement if you have a tokenizer and want to count the number of tokens in a prompt.
63
63
  */
64
64
  readonly countPromptTokens: ((prompt: PROMPT) => PromiseLike<number>) | undefined;
65
- doGenerateTexts(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
65
+ doGenerateTexts(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<{
66
66
  response: unknown;
67
67
  textGenerationResults: TextGenerationResult[];
68
68
  usage?: {
@@ -71,6 +71,15 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
71
71
  totalTokens: number;
72
72
  };
73
73
  }>;
74
+ restoreGeneratedTexts(rawResponse: unknown): {
75
+ response: unknown;
76
+ textGenerationResults: TextGenerationResult[];
77
+ usage?: {
78
+ promptTokens: number;
79
+ completionTokens: number;
80
+ totalTokens: number;
81
+ };
82
+ };
74
83
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
75
84
  /**
76
85
  * Optional. When available, forces the model to return JSON as the text output.
@@ -78,7 +87,7 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
78
87
  withJsonOutput?(): this;
79
88
  }
80
89
  export interface TextStreamingModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
81
- doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
90
+ doStreamText(prompt: PROMPT, options?: FunctionCallOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
82
91
  extractTextDelta(delta: unknown): string | undefined;
83
92
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextStreamingModel<INPUT_PROMPT, SETTINGS>;
84
93
  }
@@ -9,7 +9,48 @@ async function generateText(model, prompt, options) {
9
9
  model,
10
10
  options,
11
11
  generateResponse: async (options) => {
12
- const result = await model.doGenerateTexts(prompt, options);
12
+ async function getGeneratedTexts() {
13
+ if (options?.cache == null) {
14
+ return {
15
+ ...(await model.doGenerateTexts(prompt, options)),
16
+ cache: undefined,
17
+ };
18
+ }
19
+ let cacheErrors = undefined;
20
+ const cacheKey = {
21
+ functionType: "generate-text",
22
+ functionId: options?.functionId,
23
+ input: {
24
+ model,
25
+ settings: model.settingsForEvent, // TODO should include full model information
26
+ prompt,
27
+ },
28
+ };
29
+ try {
30
+ const cachedRawResponse = await options.cache.lookupValue(cacheKey);
31
+ if (cachedRawResponse != null) {
32
+ return {
33
+ ...model.restoreGeneratedTexts(cachedRawResponse),
34
+ cache: { status: "hit" },
35
+ };
36
+ }
37
+ }
38
+ catch (err) {
39
+ cacheErrors = [err];
40
+ }
41
+ const result = await model.doGenerateTexts(prompt, options);
42
+ try {
43
+ await options.cache.storeValue(cacheKey, result.response);
44
+ }
45
+ catch (err) {
46
+ cacheErrors = [...(cacheErrors ?? []), err];
47
+ }
48
+ return {
49
+ ...result,
50
+ cache: { status: "miss", errors: cacheErrors },
51
+ };
52
+ }
53
+ const result = await getGeneratedTexts();
13
54
  const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
14
55
  const textGenerationResults = shouldTrimWhitespace
15
56
  ? result.textGenerationResults.map((textGeneration) => ({
@@ -17,6 +58,7 @@ async function generateText(model, prompt, options) {
17
58
  finishReason: textGeneration.finishReason,
18
59
  }))
19
60
  : result.textGenerationResults;
61
+ // TODO add cache information
20
62
  return {
21
63
  response: result.response,
22
64
  extractedValue: textGenerationResults,
@@ -6,7 +6,48 @@ export async function generateText(model, prompt, options) {
6
6
  model,
7
7
  options,
8
8
  generateResponse: async (options) => {
9
- const result = await model.doGenerateTexts(prompt, options);
9
+ async function getGeneratedTexts() {
10
+ if (options?.cache == null) {
11
+ return {
12
+ ...(await model.doGenerateTexts(prompt, options)),
13
+ cache: undefined,
14
+ };
15
+ }
16
+ let cacheErrors = undefined;
17
+ const cacheKey = {
18
+ functionType: "generate-text",
19
+ functionId: options?.functionId,
20
+ input: {
21
+ model,
22
+ settings: model.settingsForEvent, // TODO should include full model information
23
+ prompt,
24
+ },
25
+ };
26
+ try {
27
+ const cachedRawResponse = await options.cache.lookupValue(cacheKey);
28
+ if (cachedRawResponse != null) {
29
+ return {
30
+ ...model.restoreGeneratedTexts(cachedRawResponse),
31
+ cache: { status: "hit" },
32
+ };
33
+ }
34
+ }
35
+ catch (err) {
36
+ cacheErrors = [err];
37
+ }
38
+ const result = await model.doGenerateTexts(prompt, options);
39
+ try {
40
+ await options.cache.storeValue(cacheKey, result.response);
41
+ }
42
+ catch (err) {
43
+ cacheErrors = [...(cacheErrors ?? []), err];
44
+ }
45
+ return {
46
+ ...result,
47
+ cache: { status: "miss", errors: cacheErrors },
48
+ };
49
+ }
50
+ const result = await getGeneratedTexts();
10
51
  const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
11
52
  const textGenerationResults = shouldTrimWhitespace
12
53
  ? result.textGenerationResults.map((textGeneration) => ({
@@ -14,6 +55,7 @@ export async function generateText(model, prompt, options) {
14
55
  finishReason: textGeneration.finishReason,
15
56
  }))
16
57
  : result.textGenerationResults;
58
+ // TODO add cache information
17
59
  return {
18
60
  response: result.response,
19
61
  extractedValue: textGenerationResults,
@@ -1,9 +1,9 @@
1
- import { FunctionOptions } from "../../core/FunctionOptions.js";
1
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
2
2
  import { Model, ModelSettings } from "../Model.js";
3
3
  export interface TranscriptionModelSettings extends ModelSettings {
4
4
  }
5
5
  export interface TranscriptionModel<DATA, SETTINGS extends TranscriptionModelSettings = TranscriptionModelSettings> extends Model<SETTINGS> {
6
- doTranscribe: (data: DATA, options?: FunctionOptions) => PromiseLike<{
6
+ doTranscribe: (data: DATA, options: FunctionCallOptions) => PromiseLike<{
7
7
  response: unknown;
8
8
  transcription: string;
9
9
  }>;
@@ -6,6 +6,7 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
+ const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
9
10
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
11
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
11
12
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
@@ -69,17 +70,22 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
69
70
  get modelName() {
70
71
  return this.settings.model;
71
72
  }
72
- async callAPI(prompt, options) {
73
+ async callAPI(prompt, callOptions, options) {
73
74
  const api = this.settings.api ?? new AnthropicApiConfiguration_js_1.AnthropicApiConfiguration();
74
75
  const responseFormat = options.responseFormat;
75
- const abortSignal = options.run?.abortSignal;
76
+ const abortSignal = callOptions.run?.abortSignal;
76
77
  const userId = this.settings.userId;
77
78
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
78
79
  retry: api.retry,
79
80
  throttle: api.throttle,
80
81
  call: async () => (0, postToApi_js_1.postJsonToApi)({
81
82
  url: api.assembleUrl(`/complete`),
82
- headers: api.headers,
83
+ headers: api.headers({
84
+ functionType: callOptions.functionType,
85
+ functionId: callOptions.functionId,
86
+ run: callOptions.run,
87
+ callId: callOptions.callId,
88
+ }),
83
89
  body: {
84
90
  model: this.settings.model,
85
91
  prompt,
@@ -108,10 +114,17 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
108
114
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
109
115
  }
110
116
  async doGenerateTexts(prompt, options) {
111
- const response = await this.callAPI(prompt, {
112
- ...options,
117
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
113
118
  responseFormat: exports.AnthropicTextGenerationResponseFormat.json,
114
- });
119
+ }));
120
+ }
121
+ restoreGeneratedTexts(rawResponse) {
122
+ return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
123
+ structure: rawResponse,
124
+ schema: (0, ZodSchema_js_1.zodSchema)(anthropicTextGenerationResponseSchema),
125
+ }));
126
+ }
127
+ processTextGenerationResponse(response) {
115
128
  return {
116
129
  response,
117
130
  textGenerationResults: [
@@ -133,8 +146,7 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
133
146
  }
134
147
  }
135
148
  doStreamText(prompt, options) {
136
- return this.callAPI(prompt, {
137
- ...options,
149
+ return this.callAPI(prompt, options, {
138
150
  responseFormat: exports.AnthropicTextGenerationResponseFormat.deltaIterable,
139
151
  });
140
152
  }
@@ -1,5 +1,5 @@
1
1
  import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
2
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
@@ -46,11 +46,11 @@ export declare class AnthropicTextGenerationModel extends AbstractModel<Anthropi
46
46
  readonly contextWindowSize: number;
47
47
  readonly tokenizer: undefined;
48
48
  readonly countPromptTokens: undefined;
49
- callAPI<RESPONSE>(prompt: string, options: {
49
+ callAPI<RESPONSE>(prompt: string, callOptions: FunctionCallOptions, options: {
50
50
  responseFormat: AnthropicTextGenerationResponseFormatType<RESPONSE>;
51
- } & FunctionOptions): Promise<RESPONSE>;
51
+ }): Promise<RESPONSE>;
52
52
  get settingsForEvent(): Partial<AnthropicTextGenerationModelSettings>;
53
- doGenerateTexts(prompt: string, options?: FunctionOptions): Promise<{
53
+ doGenerateTexts(prompt: string, options: FunctionCallOptions): Promise<{
54
54
  response: {
55
55
  model: string;
56
56
  completion: string;
@@ -61,8 +61,30 @@ export declare class AnthropicTextGenerationModel extends AbstractModel<Anthropi
61
61
  finishReason: TextGenerationFinishReason;
62
62
  }[];
63
63
  }>;
64
+ restoreGeneratedTexts(rawResponse: unknown): {
65
+ response: {
66
+ model: string;
67
+ completion: string;
68
+ stop_reason: string;
69
+ };
70
+ textGenerationResults: {
71
+ text: string;
72
+ finishReason: TextGenerationFinishReason;
73
+ }[];
74
+ };
75
+ processTextGenerationResponse(response: AnthropicTextGenerationResponse): {
76
+ response: {
77
+ model: string;
78
+ completion: string;
79
+ stop_reason: string;
80
+ };
81
+ textGenerationResults: {
82
+ text: string;
83
+ finishReason: TextGenerationFinishReason;
84
+ }[];
85
+ };
64
86
  private translateFinishReason;
65
- doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<{
87
+ doStreamText(prompt: string, options: FunctionCallOptions): Promise<AsyncIterable<Delta<{
66
88
  model: string;
67
89
  completion: string;
68
90
  stop_reason: string | null;
@@ -3,6 +3,7 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { zodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { parseJSON } from "../../core/schema/parseJSON.js";
6
+ import { validateTypes } from "../../core/schema/validateTypes.js";
6
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
8
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
9
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
@@ -66,17 +67,22 @@ export class AnthropicTextGenerationModel extends AbstractModel {
66
67
  get modelName() {
67
68
  return this.settings.model;
68
69
  }
69
- async callAPI(prompt, options) {
70
+ async callAPI(prompt, callOptions, options) {
70
71
  const api = this.settings.api ?? new AnthropicApiConfiguration();
71
72
  const responseFormat = options.responseFormat;
72
- const abortSignal = options.run?.abortSignal;
73
+ const abortSignal = callOptions.run?.abortSignal;
73
74
  const userId = this.settings.userId;
74
75
  return callWithRetryAndThrottle({
75
76
  retry: api.retry,
76
77
  throttle: api.throttle,
77
78
  call: async () => postJsonToApi({
78
79
  url: api.assembleUrl(`/complete`),
79
- headers: api.headers,
80
+ headers: api.headers({
81
+ functionType: callOptions.functionType,
82
+ functionId: callOptions.functionId,
83
+ run: callOptions.run,
84
+ callId: callOptions.callId,
85
+ }),
80
86
  body: {
81
87
  model: this.settings.model,
82
88
  prompt,
@@ -105,10 +111,17 @@ export class AnthropicTextGenerationModel extends AbstractModel {
105
111
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
106
112
  }
107
113
  async doGenerateTexts(prompt, options) {
108
- const response = await this.callAPI(prompt, {
109
- ...options,
114
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
110
115
  responseFormat: AnthropicTextGenerationResponseFormat.json,
111
- });
116
+ }));
117
+ }
118
+ restoreGeneratedTexts(rawResponse) {
119
+ return this.processTextGenerationResponse(validateTypes({
120
+ structure: rawResponse,
121
+ schema: zodSchema(anthropicTextGenerationResponseSchema),
122
+ }));
123
+ }
124
+ processTextGenerationResponse(response) {
112
125
  return {
113
126
  response,
114
127
  textGenerationResults: [
@@ -130,8 +143,7 @@ export class AnthropicTextGenerationModel extends AbstractModel {
130
143
  }
131
144
  }
132
145
  doStreamText(prompt, options) {
133
- return this.callAPI(prompt, {
134
- ...options,
146
+ return this.callAPI(prompt, options, {
135
147
  responseFormat: AnthropicTextGenerationResponseFormat.deltaIterable,
136
148
  });
137
149
  }
@@ -28,15 +28,20 @@ class Automatic1111ImageGenerationModel extends AbstractModel_js_1.AbstractModel
28
28
  get modelName() {
29
29
  return this.settings.model;
30
30
  }
31
- async callAPI(input, options) {
31
+ async callAPI(input, callOptions) {
32
32
  const api = this.settings.api ?? new Automatic1111ApiConfiguration_js_1.Automatic1111ApiConfiguration();
33
- const abortSignal = options?.run?.abortSignal;
33
+ const abortSignal = callOptions.run?.abortSignal;
34
34
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
35
35
  retry: api.retry,
36
36
  throttle: api.throttle,
37
37
  call: async () => (0, postToApi_js_1.postJsonToApi)({
38
38
  url: api.assembleUrl(`/txt2img`),
39
- headers: api.headers,
39
+ headers: api.headers({
40
+ functionType: callOptions.functionType,
41
+ functionId: callOptions.functionId,
42
+ run: callOptions.run,
43
+ callId: callOptions.callId,
44
+ }),
40
45
  body: {
41
46
  prompt: input.prompt,
42
47
  negative_prompt: input.negativePrompt,
@@ -1,5 +1,5 @@
1
1
  import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
2
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptTemplate } from "../../model-function/PromptTemplate.js";
@@ -37,9 +37,9 @@ export declare class Automatic1111ImageGenerationModel extends AbstractModel<Aut
37
37
  constructor(settings: Automatic1111ImageGenerationSettings);
38
38
  readonly provider: "Automatic1111";
39
39
  get modelName(): string;
40
- callAPI(input: Automatic1111ImageGenerationPrompt, options?: FunctionOptions): Promise<Automatic1111ImageGenerationResponse>;
40
+ callAPI(input: Automatic1111ImageGenerationPrompt, callOptions: FunctionCallOptions): Promise<Automatic1111ImageGenerationResponse>;
41
41
  get settingsForEvent(): Partial<Automatic1111ImageGenerationSettings>;
42
- doGenerateImages(prompt: Automatic1111ImageGenerationPrompt, options?: FunctionOptions): Promise<{
42
+ doGenerateImages(prompt: Automatic1111ImageGenerationPrompt, options: FunctionCallOptions): Promise<{
43
43
  response: {
44
44
  images: string[];
45
45
  parameters: {};
@@ -25,15 +25,20 @@ export class Automatic1111ImageGenerationModel extends AbstractModel {
25
25
  get modelName() {
26
26
  return this.settings.model;
27
27
  }
28
- async callAPI(input, options) {
28
+ async callAPI(input, callOptions) {
29
29
  const api = this.settings.api ?? new Automatic1111ApiConfiguration();
30
- const abortSignal = options?.run?.abortSignal;
30
+ const abortSignal = callOptions.run?.abortSignal;
31
31
  return callWithRetryAndThrottle({
32
32
  retry: api.retry,
33
33
  throttle: api.throttle,
34
34
  call: async () => postJsonToApi({
35
35
  url: api.assembleUrl(`/txt2img`),
36
- headers: api.headers,
36
+ headers: api.headers({
37
+ functionType: callOptions.functionType,
38
+ functionId: callOptions.functionId,
39
+ run: callOptions.run,
40
+ callId: callOptions.callId,
41
+ }),
37
42
  body: {
38
43
  prompt: input.prompt,
39
44
  negative_prompt: input.negativePrompt,
@@ -113,18 +113,23 @@ class CohereTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
113
113
  async detokenize(tokens) {
114
114
  return this.tokenizer.detokenize(tokens);
115
115
  }
116
- async callAPI(texts, options) {
116
+ async callAPI(texts, callOptions) {
117
117
  if (texts.length > this.maxValuesPerCall) {
118
118
  throw new Error(`The Cohere embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
119
119
  }
120
120
  const api = this.settings.api ?? new CohereApiConfiguration_js_1.CohereApiConfiguration();
121
- const abortSignal = options?.run?.abortSignal;
121
+ const abortSignal = callOptions.run?.abortSignal;
122
122
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
123
123
  retry: api.retry,
124
124
  throttle: api.throttle,
125
125
  call: async () => (0, postToApi_js_1.postJsonToApi)({
126
126
  url: api.assembleUrl(`/embed`),
127
- headers: api.headers,
127
+ headers: api.headers({
128
+ functionType: callOptions.functionType,
129
+ functionId: callOptions.functionId,
130
+ run: callOptions.run,
131
+ callId: callOptions.callId,
132
+ }),
128
133
  body: {
129
134
  model: this.settings.model,
130
135
  texts,
@@ -1,5 +1,5 @@
1
1
  import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
2
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
@@ -70,9 +70,9 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
70
70
  tokenTexts: string[];
71
71
  }>;
72
72
  detokenize(tokens: number[]): Promise<string>;
73
- callAPI(texts: Array<string>, options?: FunctionOptions): Promise<CohereTextEmbeddingResponse>;
73
+ callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<CohereTextEmbeddingResponse>;
74
74
  get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
75
- doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
75
+ doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
76
76
  response: {
77
77
  embeddings: number[][];
78
78
  texts: string[];
@@ -110,18 +110,23 @@ export class CohereTextEmbeddingModel extends AbstractModel {
110
110
  async detokenize(tokens) {
111
111
  return this.tokenizer.detokenize(tokens);
112
112
  }
113
- async callAPI(texts, options) {
113
+ async callAPI(texts, callOptions) {
114
114
  if (texts.length > this.maxValuesPerCall) {
115
115
  throw new Error(`The Cohere embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
116
116
  }
117
117
  const api = this.settings.api ?? new CohereApiConfiguration();
118
- const abortSignal = options?.run?.abortSignal;
118
+ const abortSignal = callOptions.run?.abortSignal;
119
119
  return callWithRetryAndThrottle({
120
120
  retry: api.retry,
121
121
  throttle: api.throttle,
122
122
  call: async () => postJsonToApi({
123
123
  url: api.assembleUrl(`/embed`),
124
- headers: api.headers,
124
+ headers: api.headers({
125
+ functionType: callOptions.functionType,
126
+ functionId: callOptions.functionId,
127
+ run: callOptions.run,
128
+ callId: callOptions.callId,
129
+ }),
125
130
  body: {
126
131
  model: this.settings.model,
127
132
  texts,
@@ -5,6 +5,7 @@ const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
+ const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
8
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
10
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
10
11
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
@@ -73,16 +74,21 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
73
74
  async countPromptTokens(input) {
74
75
  return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
75
76
  }
76
- async callAPI(prompt, options) {
77
+ async callAPI(prompt, callOptions, options) {
77
78
  const api = this.settings.api ?? new CohereApiConfiguration_js_1.CohereApiConfiguration();
78
79
  const responseFormat = options.responseFormat;
79
- const abortSignal = options.run?.abortSignal;
80
+ const abortSignal = callOptions.run?.abortSignal;
80
81
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
81
82
  retry: api.retry,
82
83
  throttle: api.throttle,
83
84
  call: async () => (0, postToApi_js_1.postJsonToApi)({
84
85
  url: api.assembleUrl(`/generate`),
85
- headers: api.headers,
86
+ headers: api.headers({
87
+ functionType: callOptions.functionType,
88
+ functionId: callOptions.functionId,
89
+ run: callOptions.run,
90
+ callId: callOptions.callId,
91
+ }),
86
92
  body: {
87
93
  stream: responseFormat.stream,
88
94
  model: this.settings.model,
@@ -122,10 +128,17 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
122
128
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
123
129
  }
124
130
  async doGenerateTexts(prompt, options) {
125
- const response = await this.callAPI(prompt, {
126
- ...options,
131
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
127
132
  responseFormat: exports.CohereTextGenerationResponseFormat.json,
128
- });
133
+ }));
134
+ }
135
+ restoreGeneratedTexts(rawResponse) {
136
+ return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
137
+ structure: rawResponse,
138
+ schema: (0, ZodSchema_js_1.zodSchema)(cohereTextGenerationResponseSchema),
139
+ }));
140
+ }
141
+ processTextGenerationResponse(response) {
129
142
  return {
130
143
  response,
131
144
  textGenerationResults: response.generations.map((generation) => ({
@@ -149,8 +162,7 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
149
162
  }
150
163
  }
151
164
  doStreamText(prompt, options) {
152
- return this.callAPI(prompt, {
153
- ...options,
165
+ return this.callAPI(prompt, options, {
154
166
  responseFormat: exports.CohereTextGenerationResponseFormat.deltaIterable,
155
167
  });
156
168
  }