modelfusion 0.112.0 → 0.114.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. package/CHANGELOG.md +105 -0
  2. package/README.md +108 -212
  3. package/core/FunctionOptions.d.ts +14 -0
  4. package/core/api/AbstractApiConfiguration.cjs +16 -1
  5. package/core/api/AbstractApiConfiguration.d.ts +7 -3
  6. package/core/api/AbstractApiConfiguration.js +16 -1
  7. package/core/api/ApiConfiguration.d.ts +10 -1
  8. package/core/api/BaseUrlApiConfiguration.cjs +9 -5
  9. package/core/api/BaseUrlApiConfiguration.d.ts +7 -7
  10. package/core/api/BaseUrlApiConfiguration.js +9 -5
  11. package/core/api/CustomHeaderProvider.cjs +2 -0
  12. package/core/api/CustomHeaderProvider.d.ts +2 -0
  13. package/core/api/CustomHeaderProvider.js +1 -0
  14. package/core/api/index.cjs +1 -0
  15. package/core/api/index.d.ts +1 -0
  16. package/core/api/index.js +1 -0
  17. package/core/cache/Cache.cjs +2 -0
  18. package/core/cache/Cache.d.ts +12 -0
  19. package/core/cache/Cache.js +1 -0
  20. package/core/cache/MemoryCache.cjs +23 -0
  21. package/core/cache/MemoryCache.d.ts +15 -0
  22. package/core/cache/MemoryCache.js +19 -0
  23. package/core/cache/index.cjs +18 -0
  24. package/core/cache/index.d.ts +2 -0
  25. package/core/cache/index.js +2 -0
  26. package/core/index.cjs +1 -0
  27. package/core/index.d.ts +1 -0
  28. package/core/index.js +1 -0
  29. package/core/schema/TypeValidationError.cjs +36 -0
  30. package/core/schema/TypeValidationError.d.ts +15 -0
  31. package/core/schema/TypeValidationError.js +32 -0
  32. package/core/schema/index.cjs +2 -0
  33. package/core/schema/index.d.ts +2 -0
  34. package/core/schema/index.js +2 -0
  35. package/core/schema/parseJSON.cjs +6 -14
  36. package/core/schema/parseJSON.d.ts +3 -2
  37. package/core/schema/parseJSON.js +6 -14
  38. package/core/schema/validateTypes.cjs +65 -0
  39. package/core/schema/validateTypes.d.ts +34 -0
  40. package/core/schema/validateTypes.js +60 -0
  41. package/model-function/embed/EmbeddingModel.d.ts +2 -2
  42. package/model-function/executeStandardCall.cjs +3 -1
  43. package/model-function/executeStandardCall.d.ts +2 -2
  44. package/model-function/executeStandardCall.js +3 -1
  45. package/model-function/executeStreamCall.cjs +2 -1
  46. package/model-function/executeStreamCall.d.ts +2 -2
  47. package/model-function/executeStreamCall.js +2 -1
  48. package/model-function/generate-image/ImageGenerationModel.d.ts +2 -2
  49. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +2 -2
  50. package/model-function/generate-speech/SpeechGenerationModel.d.ts +3 -3
  51. package/model-function/generate-structure/StructureFromTextPromptTemplate.d.ts +13 -0
  52. package/model-function/generate-structure/generateStructure.cjs +4 -1
  53. package/model-function/generate-structure/generateStructure.js +4 -1
  54. package/model-function/generate-structure/jsonStructurePrompt.cjs +12 -0
  55. package/model-function/generate-structure/jsonStructurePrompt.d.ts +3 -3
  56. package/model-function/generate-structure/jsonStructurePrompt.js +12 -0
  57. package/model-function/generate-structure/streamStructure.cjs +4 -1
  58. package/model-function/generate-structure/streamStructure.js +4 -1
  59. package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +3 -0
  60. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +11 -2
  61. package/model-function/generate-text/PromptTemplateTextGenerationModel.js +3 -0
  62. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -2
  63. package/model-function/generate-text/TextGenerationModel.d.ts +16 -3
  64. package/model-function/generate-text/generateText.cjs +43 -1
  65. package/model-function/generate-text/generateText.js +43 -1
  66. package/model-function/generate-transcription/TranscriptionModel.d.ts +2 -2
  67. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +20 -8
  68. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +27 -5
  69. package/model-provider/anthropic/AnthropicTextGenerationModel.js +20 -8
  70. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +8 -3
  71. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +3 -3
  72. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +8 -3
  73. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +8 -3
  74. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
  75. package/model-provider/cohere/CohereTextEmbeddingModel.js +8 -3
  76. package/model-provider/cohere/CohereTextGenerationModel.cjs +20 -8
  77. package/model-provider/cohere/CohereTextGenerationModel.d.ts +45 -5
  78. package/model-provider/cohere/CohereTextGenerationModel.js +20 -8
  79. package/model-provider/cohere/CohereTokenizer.cjs +16 -6
  80. package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
  81. package/model-provider/cohere/CohereTokenizer.js +16 -6
  82. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.cjs +1 -1
  83. package/model-provider/elevenlabs/ElevenLabsApiConfiguration.js +1 -1
  84. package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +8 -3
  85. package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +2 -2
  86. package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +8 -3
  87. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +8 -3
  88. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +3 -3
  89. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +8 -3
  90. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -4
  91. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +21 -3
  92. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -4
  93. package/model-provider/llamacpp/LlamaCppCompletionModel.cjs +20 -8
  94. package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +125 -5
  95. package/model-provider/llamacpp/LlamaCppCompletionModel.js +20 -8
  96. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +8 -3
  97. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +3 -3
  98. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +8 -3
  99. package/model-provider/llamacpp/LlamaCppTokenizer.cjs +8 -3
  100. package/model-provider/llamacpp/LlamaCppTokenizer.d.ts +2 -2
  101. package/model-provider/llamacpp/LlamaCppTokenizer.js +8 -3
  102. package/model-provider/lmnt/LmntSpeechModel.cjs +8 -3
  103. package/model-provider/lmnt/LmntSpeechModel.d.ts +2 -2
  104. package/model-provider/lmnt/LmntSpeechModel.js +8 -3
  105. package/model-provider/mistral/MistralChatModel.cjs +20 -8
  106. package/model-provider/mistral/MistralChatModel.d.ts +55 -5
  107. package/model-provider/mistral/MistralChatModel.js +20 -8
  108. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +8 -3
  109. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +3 -3
  110. package/model-provider/mistral/MistralTextEmbeddingModel.js +8 -3
  111. package/model-provider/ollama/OllamaChatModel.cjs +35 -8
  112. package/model-provider/ollama/OllamaChatModel.d.ts +31 -5
  113. package/model-provider/ollama/OllamaChatModel.js +35 -8
  114. package/model-provider/ollama/OllamaCompletionModel.cjs +20 -7
  115. package/model-provider/ollama/OllamaCompletionModel.d.ts +43 -5
  116. package/model-provider/ollama/OllamaCompletionModel.js +20 -7
  117. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +8 -3
  118. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +3 -3
  119. package/model-provider/ollama/OllamaTextEmbeddingModel.js +8 -3
  120. package/model-provider/openai/AbstractOpenAIChatModel.cjs +23 -13
  121. package/model-provider/openai/AbstractOpenAIChatModel.d.ts +94 -7
  122. package/model-provider/openai/AbstractOpenAIChatModel.js +23 -13
  123. package/model-provider/openai/AbstractOpenAICompletionModel.cjs +21 -9
  124. package/model-provider/openai/AbstractOpenAICompletionModel.d.ts +35 -5
  125. package/model-provider/openai/AbstractOpenAICompletionModel.js +21 -9
  126. package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +5 -2
  127. package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +2 -1
  128. package/model-provider/openai/AzureOpenAIApiConfiguration.js +5 -2
  129. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +12 -6
  130. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +89 -5
  131. package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +12 -6
  132. package/model-provider/openai/OpenAIChatModel.cjs +12 -4
  133. package/model-provider/openai/OpenAIChatModel.d.ts +3 -2
  134. package/model-provider/openai/OpenAIChatModel.js +12 -4
  135. package/model-provider/openai/OpenAIImageGenerationModel.cjs +10 -6
  136. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +4 -4
  137. package/model-provider/openai/OpenAIImageGenerationModel.js +10 -6
  138. package/model-provider/openai/OpenAISpeechModel.cjs +9 -4
  139. package/model-provider/openai/OpenAISpeechModel.d.ts +3 -3
  140. package/model-provider/openai/OpenAISpeechModel.js +9 -4
  141. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +11 -6
  142. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
  143. package/model-provider/openai/OpenAITextEmbeddingModel.js +11 -6
  144. package/model-provider/openai/OpenAITranscriptionModel.cjs +9 -6
  145. package/model-provider/openai/OpenAITranscriptionModel.d.ts +4 -4
  146. package/model-provider/openai/OpenAITranscriptionModel.js +9 -6
  147. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +12 -4
  148. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +3 -2
  149. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +12 -4
  150. package/model-provider/stability/StabilityImageGenerationModel.cjs +10 -5
  151. package/model-provider/stability/StabilityImageGenerationModel.d.ts +3 -3
  152. package/model-provider/stability/StabilityImageGenerationModel.js +10 -5
  153. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +9 -7
  154. package/model-provider/whispercpp/WhisperCppTranscriptionModel.d.ts +3 -3
  155. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +9 -7
  156. package/observability/helicone/HeliconeOpenAIApiConfiguration.cjs +2 -1
  157. package/observability/helicone/HeliconeOpenAIApiConfiguration.d.ts +3 -1
  158. package/observability/helicone/HeliconeOpenAIApiConfiguration.js +2 -1
  159. package/package.json +2 -2
@@ -27,15 +27,20 @@ export class LlamaCppTokenizer {
27
27
  });
28
28
  this.api = api;
29
29
  }
30
- async callTokenizeAPI(text, context) {
30
+ async callTokenizeAPI(text, callOptions) {
31
31
  const api = this.api;
32
- const abortSignal = context?.abortSignal;
32
+ const abortSignal = callOptions?.run?.abortSignal;
33
33
  return callWithRetryAndThrottle({
34
34
  retry: api.retry,
35
35
  throttle: api.throttle,
36
36
  call: async () => postJsonToApi({
37
37
  url: api.assembleUrl(`/tokenize`),
38
- headers: api.headers,
38
+ headers: api.headers({
39
+ functionType: "tokenize",
40
+ functionId: callOptions?.functionId,
41
+ run: callOptions?.run,
42
+ callId: "",
43
+ }),
39
44
  body: {
40
45
  content: text,
41
46
  },
@@ -25,9 +25,9 @@ class LmntSpeechModel extends AbstractModel_js_1.AbstractModel {
25
25
  get modelName() {
26
26
  return this.settings.voice;
27
27
  }
28
- async callAPI(text, options) {
28
+ async callAPI(text, callOptions) {
29
29
  const api = this.settings.api ?? new LmntApiConfiguration_js_1.LmntApiConfiguration();
30
- const abortSignal = options?.run?.abortSignal;
30
+ const abortSignal = callOptions.run?.abortSignal;
31
31
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
32
32
  retry: api.retry,
33
33
  throttle: api.throttle,
@@ -48,7 +48,12 @@ class LmntSpeechModel extends AbstractModel_js_1.AbstractModel {
48
48
  }
49
49
  return (0, postToApi_js_1.postToApi)({
50
50
  url: api.assembleUrl(`/ai/speech`),
51
- headers: api.headers,
51
+ headers: api.headers({
52
+ functionType: callOptions.functionType,
53
+ functionId: callOptions.functionId,
54
+ run: callOptions.run,
55
+ callId: callOptions.callId,
56
+ }),
52
57
  body: {
53
58
  content: formData,
54
59
  values: {
@@ -1,6 +1,6 @@
1
1
  /// <reference types="node" />
2
2
  import { z } from "zod";
3
- import { FunctionOptions } from "../../core/FunctionOptions.js";
3
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
4
4
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { SpeechGenerationModel, SpeechGenerationModelSettings } from "../../model-function/generate-speech/SpeechGenerationModel.js";
@@ -34,7 +34,7 @@ export declare class LmntSpeechModel extends AbstractModel<LmntSpeechModelSettin
34
34
  get modelName(): string;
35
35
  private callAPI;
36
36
  get settingsForEvent(): Partial<LmntSpeechModelSettings>;
37
- doGenerateSpeechStandard(text: string, options?: FunctionOptions): Promise<Buffer>;
37
+ doGenerateSpeechStandard(text: string, options: FunctionCallOptions): Promise<Buffer>;
38
38
  withSettings(additionalSettings: Partial<LmntSpeechModelSettings>): this;
39
39
  }
40
40
  declare const lmntSpeechResponseSchema: z.ZodObject<{
@@ -22,9 +22,9 @@ export class LmntSpeechModel extends AbstractModel {
22
22
  get modelName() {
23
23
  return this.settings.voice;
24
24
  }
25
- async callAPI(text, options) {
25
+ async callAPI(text, callOptions) {
26
26
  const api = this.settings.api ?? new LmntApiConfiguration();
27
- const abortSignal = options?.run?.abortSignal;
27
+ const abortSignal = callOptions.run?.abortSignal;
28
28
  return callWithRetryAndThrottle({
29
29
  retry: api.retry,
30
30
  throttle: api.throttle,
@@ -45,7 +45,12 @@ export class LmntSpeechModel extends AbstractModel {
45
45
  }
46
46
  return postToApi({
47
47
  url: api.assembleUrl(`/ai/speech`),
48
- headers: api.headers,
48
+ headers: api.headers({
49
+ functionType: callOptions.functionType,
50
+ functionId: callOptions.functionId,
51
+ run: callOptions.run,
52
+ callId: callOptions.callId,
53
+ }),
49
54
  body: {
50
55
  content: formData,
51
56
  values: {
@@ -5,6 +5,7 @@ const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
+ const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
8
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
10
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
10
11
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
@@ -43,9 +44,9 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
43
44
  get modelName() {
44
45
  return this.settings.model;
45
46
  }
46
- async callAPI(prompt, options) {
47
+ async callAPI(prompt, callOptions, options) {
47
48
  const api = this.settings.api ?? new MistralApiConfiguration_js_1.MistralApiConfiguration();
48
- const abortSignal = options.run?.abortSignal;
49
+ const abortSignal = callOptions.run?.abortSignal;
49
50
  const stream = options.responseFormat.stream;
50
51
  const successfulResponseHandler = options.responseFormat.handler;
51
52
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
@@ -53,7 +54,12 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
53
54
  throttle: api.throttle,
54
55
  call: async () => (0, postToApi_js_1.postJsonToApi)({
55
56
  url: api.assembleUrl(`/chat/completions`),
56
- headers: api.headers,
57
+ headers: api.headers({
58
+ functionType: callOptions.functionType,
59
+ functionId: callOptions.functionId,
60
+ run: callOptions.run,
61
+ callId: callOptions.callId,
62
+ }),
57
63
  body: {
58
64
  stream,
59
65
  messages: prompt,
@@ -81,10 +87,17 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
81
87
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
82
88
  }
83
89
  async doGenerateTexts(prompt, options) {
84
- const response = await this.callAPI(prompt, {
85
- ...options,
90
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
86
91
  responseFormat: exports.MistralChatResponseFormat.json,
87
- });
92
+ }));
93
+ }
94
+ restoreGeneratedTexts(rawResponse) {
95
+ return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
96
+ structure: rawResponse,
97
+ schema: (0, ZodSchema_js_1.zodSchema)(mistralChatResponseSchema),
98
+ }));
99
+ }
100
+ processTextGenerationResponse(response) {
88
101
  return {
89
102
  response,
90
103
  textGenerationResults: response.choices.map((choice) => ({
@@ -105,8 +118,7 @@ class MistralChatModel extends AbstractModel_js_1.AbstractModel {
105
118
  }
106
119
  }
107
120
  doStreamText(prompt, options) {
108
- return this.callAPI(prompt, {
109
- ...options,
121
+ return this.callAPI(prompt, options, {
110
122
  responseFormat: exports.MistralChatResponseFormat.textDeltaIterable,
111
123
  });
112
124
  }
@@ -1,5 +1,5 @@
1
1
  import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
2
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
@@ -54,11 +54,11 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
54
54
  readonly contextWindowSize: undefined;
55
55
  readonly tokenizer: undefined;
56
56
  readonly countPromptTokens: undefined;
57
- callAPI<RESULT>(prompt: MistralChatPrompt, options: {
57
+ callAPI<RESULT>(prompt: MistralChatPrompt, callOptions: FunctionCallOptions, options: {
58
58
  responseFormat: MistralChatResponseFormatType<RESULT>;
59
- } & FunctionOptions): Promise<RESULT>;
59
+ }): Promise<RESULT>;
60
60
  get settingsForEvent(): Partial<MistralChatModelSettings>;
61
- doGenerateTexts(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<{
61
+ doGenerateTexts(prompt: MistralChatPrompt, options: FunctionCallOptions): Promise<{
62
62
  response: {
63
63
  object: string;
64
64
  model: string;
@@ -83,8 +83,58 @@ export declare class MistralChatModel extends AbstractModel<MistralChatModelSett
83
83
  finishReason: TextGenerationFinishReason;
84
84
  }[];
85
85
  }>;
86
+ restoreGeneratedTexts(rawResponse: unknown): {
87
+ response: {
88
+ object: string;
89
+ model: string;
90
+ usage: {
91
+ prompt_tokens: number;
92
+ completion_tokens: number;
93
+ total_tokens: number;
94
+ };
95
+ id: string;
96
+ created: number;
97
+ choices: {
98
+ message: {
99
+ role: "user" | "assistant";
100
+ content: string;
101
+ };
102
+ finish_reason: "length" | "stop" | "model_length";
103
+ index: number;
104
+ }[];
105
+ };
106
+ textGenerationResults: {
107
+ text: string;
108
+ finishReason: TextGenerationFinishReason;
109
+ }[];
110
+ };
111
+ processTextGenerationResponse(response: MistralChatResponse): {
112
+ response: {
113
+ object: string;
114
+ model: string;
115
+ usage: {
116
+ prompt_tokens: number;
117
+ completion_tokens: number;
118
+ total_tokens: number;
119
+ };
120
+ id: string;
121
+ created: number;
122
+ choices: {
123
+ message: {
124
+ role: "user" | "assistant";
125
+ content: string;
126
+ };
127
+ finish_reason: "length" | "stop" | "model_length";
128
+ index: number;
129
+ }[];
130
+ };
131
+ textGenerationResults: {
132
+ text: string;
133
+ finishReason: TextGenerationFinishReason;
134
+ }[];
135
+ };
86
136
  private translateFinishReason;
87
- doStreamText(prompt: MistralChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
137
+ doStreamText(prompt: MistralChatPrompt, options: FunctionCallOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
88
138
  model: string;
89
139
  id: string;
90
140
  choices: {
@@ -2,6 +2,7 @@ import { z } from "zod";
2
2
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { zodSchema } from "../../core/schema/ZodSchema.js";
5
+ import { validateTypes } from "../../core/schema/validateTypes.js";
5
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
7
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
8
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
@@ -40,9 +41,9 @@ export class MistralChatModel extends AbstractModel {
40
41
  get modelName() {
41
42
  return this.settings.model;
42
43
  }
43
- async callAPI(prompt, options) {
44
+ async callAPI(prompt, callOptions, options) {
44
45
  const api = this.settings.api ?? new MistralApiConfiguration();
45
- const abortSignal = options.run?.abortSignal;
46
+ const abortSignal = callOptions.run?.abortSignal;
46
47
  const stream = options.responseFormat.stream;
47
48
  const successfulResponseHandler = options.responseFormat.handler;
48
49
  return callWithRetryAndThrottle({
@@ -50,7 +51,12 @@ export class MistralChatModel extends AbstractModel {
50
51
  throttle: api.throttle,
51
52
  call: async () => postJsonToApi({
52
53
  url: api.assembleUrl(`/chat/completions`),
53
- headers: api.headers,
54
+ headers: api.headers({
55
+ functionType: callOptions.functionType,
56
+ functionId: callOptions.functionId,
57
+ run: callOptions.run,
58
+ callId: callOptions.callId,
59
+ }),
54
60
  body: {
55
61
  stream,
56
62
  messages: prompt,
@@ -78,10 +84,17 @@ export class MistralChatModel extends AbstractModel {
78
84
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
79
85
  }
80
86
  async doGenerateTexts(prompt, options) {
81
- const response = await this.callAPI(prompt, {
82
- ...options,
87
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
83
88
  responseFormat: MistralChatResponseFormat.json,
84
- });
89
+ }));
90
+ }
91
+ restoreGeneratedTexts(rawResponse) {
92
+ return this.processTextGenerationResponse(validateTypes({
93
+ structure: rawResponse,
94
+ schema: zodSchema(mistralChatResponseSchema),
95
+ }));
96
+ }
97
+ processTextGenerationResponse(response) {
85
98
  return {
86
99
  response,
87
100
  textGenerationResults: response.choices.map((choice) => ({
@@ -102,8 +115,7 @@ export class MistralChatModel extends AbstractModel {
102
115
  }
103
116
  }
104
117
  doStreamText(prompt, options) {
105
- return this.callAPI(prompt, {
106
- ...options,
118
+ return this.callAPI(prompt, options, {
107
119
  responseFormat: MistralChatResponseFormat.textDeltaIterable,
108
120
  });
109
121
  }
@@ -43,12 +43,12 @@ class MistralTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
43
43
  get modelName() {
44
44
  return this.settings.model;
45
45
  }
46
- async callAPI(texts, options) {
46
+ async callAPI(texts, callOptions) {
47
47
  if (texts.length > this.maxValuesPerCall) {
48
48
  throw new Error(`The Mistral embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
49
49
  }
50
50
  const api = this.settings.api ?? new MistralApiConfiguration_js_1.MistralApiConfiguration();
51
- const abortSignal = options?.run?.abortSignal;
51
+ const abortSignal = callOptions.run?.abortSignal;
52
52
  const model = this.settings.model;
53
53
  const encodingFormat = this.settings.encodingFormat ?? "float";
54
54
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
@@ -56,7 +56,12 @@ class MistralTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
56
56
  throttle: this.settings.api?.throttle,
57
57
  call: async () => (0, postToApi_js_1.postJsonToApi)({
58
58
  url: api.assembleUrl(`/embeddings`),
59
- headers: api.headers,
59
+ headers: api.headers({
60
+ functionType: callOptions.functionType,
61
+ functionId: callOptions.functionId,
62
+ run: callOptions.run,
63
+ callId: callOptions.callId,
64
+ }),
60
65
  body: {
61
66
  model,
62
67
  input: texts,
@@ -1,5 +1,5 @@
1
1
  import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
2
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
@@ -27,9 +27,9 @@ export declare class MistralTextEmbeddingModel extends AbstractModel<MistralText
27
27
  */
28
28
  readonly isParallelizable = false;
29
29
  readonly embeddingDimensions = 1024;
30
- callAPI(texts: Array<string>, options?: FunctionOptions): Promise<MistralTextEmbeddingResponse>;
30
+ callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<MistralTextEmbeddingResponse>;
31
31
  get settingsForEvent(): Partial<MistralTextEmbeddingModelSettings>;
32
- doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
32
+ doEmbedValues(texts: string[], options: FunctionCallOptions): Promise<{
33
33
  response: {
34
34
  object: string;
35
35
  data: {
@@ -40,12 +40,12 @@ export class MistralTextEmbeddingModel extends AbstractModel {
40
40
  get modelName() {
41
41
  return this.settings.model;
42
42
  }
43
- async callAPI(texts, options) {
43
+ async callAPI(texts, callOptions) {
44
44
  if (texts.length > this.maxValuesPerCall) {
45
45
  throw new Error(`The Mistral embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
46
46
  }
47
47
  const api = this.settings.api ?? new MistralApiConfiguration();
48
- const abortSignal = options?.run?.abortSignal;
48
+ const abortSignal = callOptions.run?.abortSignal;
49
49
  const model = this.settings.model;
50
50
  const encodingFormat = this.settings.encodingFormat ?? "float";
51
51
  return callWithRetryAndThrottle({
@@ -53,7 +53,12 @@ export class MistralTextEmbeddingModel extends AbstractModel {
53
53
  throttle: this.settings.api?.throttle,
54
54
  call: async () => postJsonToApi({
55
55
  url: api.assembleUrl(`/embeddings`),
56
- headers: api.headers,
56
+ headers: api.headers({
57
+ functionType: callOptions.functionType,
58
+ functionId: callOptions.functionId,
59
+ run: callOptions.run,
60
+ callId: callOptions.callId,
61
+ }),
57
62
  body: {
58
63
  model,
59
64
  input: texts,
@@ -7,7 +7,9 @@ const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndTh
7
7
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
8
8
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
9
9
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
10
+ const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
10
11
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
12
+ const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
11
13
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
12
14
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
13
15
  const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
@@ -50,16 +52,21 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
50
52
  get modelName() {
51
53
  return this.settings.model;
52
54
  }
53
- async callAPI(prompt, options) {
55
+ async callAPI(prompt, callOptions, options) {
54
56
  const { responseFormat } = options;
55
57
  const api = this.settings.api ?? new OllamaApiConfiguration_js_1.OllamaApiConfiguration();
56
- const abortSignal = options.run?.abortSignal;
58
+ const abortSignal = callOptions.run?.abortSignal;
57
59
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
58
60
  retry: api.retry,
59
61
  throttle: api.throttle,
60
62
  call: async () => (0, postToApi_js_1.postJsonToApi)({
61
63
  url: api.assembleUrl(`/api/chat`),
62
- headers: api.headers,
64
+ headers: api.headers({
65
+ functionType: callOptions.functionType,
66
+ functionId: callOptions.functionId,
67
+ run: callOptions.run,
68
+ callId: callOptions.callId,
69
+ }),
63
70
  body: {
64
71
  stream: responseFormat.stream,
65
72
  model: this.settings.model,
@@ -112,10 +119,17 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
112
119
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
113
120
  }
114
121
  async doGenerateTexts(prompt, options) {
115
- const response = await this.callAPI(prompt, {
116
- ...options,
122
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
117
123
  responseFormat: exports.OllamaChatResponseFormat.json,
118
- });
124
+ }));
125
+ }
126
+ restoreGeneratedTexts(rawResponse) {
127
+ return this.processTextGenerationResponse((0, validateTypes_js_1.validateTypes)({
128
+ structure: rawResponse,
129
+ schema: (0, ZodSchema_js_1.zodSchema)(ollamaChatResponseSchema),
130
+ }));
131
+ }
132
+ processTextGenerationResponse(response) {
119
133
  return {
120
134
  response,
121
135
  textGenerationResults: [
@@ -127,8 +141,7 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
127
141
  };
128
142
  }
129
143
  doStreamText(prompt, options) {
130
- return this.callAPI(prompt, {
131
- ...options,
144
+ return this.callAPI(prompt, options, {
132
145
  responseFormat: exports.OllamaChatResponseFormat.deltaIterable,
133
146
  });
134
147
  }
@@ -148,6 +161,17 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
148
161
  template: promptTemplate,
149
162
  });
150
163
  }
164
+ asStructureGenerationModel(promptTemplate) {
165
+ return "adaptModel" in promptTemplate
166
+ ? new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
167
+ model: promptTemplate.adaptModel(this),
168
+ template: promptTemplate,
169
+ })
170
+ : new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
171
+ model: this,
172
+ template: promptTemplate,
173
+ });
174
+ }
151
175
  /**
152
176
  * Returns this model with a text prompt template.
153
177
  */
@@ -177,6 +201,9 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
177
201
  promptTemplate,
178
202
  });
179
203
  }
204
+ withJsonOutput() {
205
+ return this.withSettings({ format: "json" });
206
+ }
180
207
  withSettings(additionalSettings) {
181
208
  return new OllamaChatModel(Object.assign({}, this.settings, additionalSettings));
182
209
  }
@@ -1,8 +1,10 @@
1
1
  import { z } from "zod";
2
- import { FunctionOptions } from "../../core/FunctionOptions.js";
2
+ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
+ import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
7
+ import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
6
8
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
9
  import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
10
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
@@ -32,11 +34,11 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
32
34
  readonly tokenizer: undefined;
33
35
  readonly countPromptTokens: undefined;
34
36
  readonly contextWindowSize: undefined;
35
- callAPI<RESPONSE>(prompt: OllamaChatPrompt, options: {
37
+ callAPI<RESPONSE>(prompt: OllamaChatPrompt, callOptions: FunctionCallOptions, options: {
36
38
  responseFormat: OllamaChatResponseFormatType<RESPONSE>;
37
- } & FunctionOptions): Promise<RESPONSE>;
39
+ }): Promise<RESPONSE>;
38
40
  get settingsForEvent(): Partial<OllamaChatModelSettings>;
39
- doGenerateTexts(prompt: OllamaChatPrompt, options?: FunctionOptions): Promise<{
41
+ doGenerateTexts(prompt: OllamaChatPrompt, options: FunctionCallOptions): Promise<{
40
42
  response: {
41
43
  message: {
42
44
  role: string;
@@ -57,7 +59,29 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
57
59
  finishReason: "unknown";
58
60
  }[];
59
61
  }>;
60
- doStreamText(prompt: OllamaChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
62
+ restoreGeneratedTexts(rawResponse: unknown): {
63
+ response: {
64
+ message: {
65
+ role: string;
66
+ content: string;
67
+ };
68
+ model: string;
69
+ done: true;
70
+ created_at: string;
71
+ total_duration: number;
72
+ prompt_eval_count: number;
73
+ eval_count: number;
74
+ eval_duration: number;
75
+ load_duration?: number | undefined;
76
+ prompt_eval_duration?: number | undefined;
77
+ };
78
+ textGenerationResults: {
79
+ text: string;
80
+ finishReason: "unknown";
81
+ }[];
82
+ };
83
+ private processTextGenerationResponse;
84
+ doStreamText(prompt: OllamaChatPrompt, options: FunctionCallOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
61
85
  message: {
62
86
  role: string;
63
87
  content: string;
@@ -79,6 +103,7 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
79
103
  extractTextDelta(delta: unknown): string | undefined;
80
104
  asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaChatPrompt, this>;
81
105
  asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallsModel<INPUT_PROMPT, OllamaChatPrompt, this>;
106
+ asStructureGenerationModel<INPUT_PROMPT, OllamaChatPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OllamaChatPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, OllamaChatPrompt, TextStreamingModel<OllamaChatPrompt, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>>;
82
107
  /**
83
108
  * Returns this model with a text prompt template.
84
109
  */
@@ -92,6 +117,7 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
92
117
  */
93
118
  withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
94
119
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaChatPrompt, OllamaChatModelSettings, this>;
120
+ withJsonOutput(): this;
95
121
  withSettings(additionalSettings: Partial<OllamaChatModelSettings>): this;
96
122
  }
97
123
  declare const ollamaChatResponseSchema: z.ZodObject<{
@@ -4,7 +4,9 @@ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottl
4
4
  import { postJsonToApi } from "../../core/api/postToApi.js";
5
5
  import { zodSchema } from "../../core/schema/ZodSchema.js";
6
6
  import { safeParseJSON } from "../../core/schema/parseJSON.js";
7
+ import { validateTypes } from "../../core/schema/validateTypes.js";
7
8
  import { AbstractModel } from "../../model-function/AbstractModel.js";
9
+ import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
8
10
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
9
11
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
10
12
  import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
@@ -47,16 +49,21 @@ export class OllamaChatModel extends AbstractModel {
47
49
  get modelName() {
48
50
  return this.settings.model;
49
51
  }
50
- async callAPI(prompt, options) {
52
+ async callAPI(prompt, callOptions, options) {
51
53
  const { responseFormat } = options;
52
54
  const api = this.settings.api ?? new OllamaApiConfiguration();
53
- const abortSignal = options.run?.abortSignal;
55
+ const abortSignal = callOptions.run?.abortSignal;
54
56
  return callWithRetryAndThrottle({
55
57
  retry: api.retry,
56
58
  throttle: api.throttle,
57
59
  call: async () => postJsonToApi({
58
60
  url: api.assembleUrl(`/api/chat`),
59
- headers: api.headers,
61
+ headers: api.headers({
62
+ functionType: callOptions.functionType,
63
+ functionId: callOptions.functionId,
64
+ run: callOptions.run,
65
+ callId: callOptions.callId,
66
+ }),
60
67
  body: {
61
68
  stream: responseFormat.stream,
62
69
  model: this.settings.model,
@@ -109,10 +116,17 @@ export class OllamaChatModel extends AbstractModel {
109
116
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
110
117
  }
111
118
  async doGenerateTexts(prompt, options) {
112
- const response = await this.callAPI(prompt, {
113
- ...options,
119
+ return this.processTextGenerationResponse(await this.callAPI(prompt, options, {
114
120
  responseFormat: OllamaChatResponseFormat.json,
115
- });
121
+ }));
122
+ }
123
+ restoreGeneratedTexts(rawResponse) {
124
+ return this.processTextGenerationResponse(validateTypes({
125
+ structure: rawResponse,
126
+ schema: zodSchema(ollamaChatResponseSchema),
127
+ }));
128
+ }
129
+ processTextGenerationResponse(response) {
116
130
  return {
117
131
  response,
118
132
  textGenerationResults: [
@@ -124,8 +138,7 @@ export class OllamaChatModel extends AbstractModel {
124
138
  };
125
139
  }
126
140
  doStreamText(prompt, options) {
127
- return this.callAPI(prompt, {
128
- ...options,
141
+ return this.callAPI(prompt, options, {
129
142
  responseFormat: OllamaChatResponseFormat.deltaIterable,
130
143
  });
131
144
  }
@@ -145,6 +158,17 @@ export class OllamaChatModel extends AbstractModel {
145
158
  template: promptTemplate,
146
159
  });
147
160
  }
161
+ asStructureGenerationModel(promptTemplate) {
162
+ return "adaptModel" in promptTemplate
163
+ ? new StructureFromTextStreamingModel({
164
+ model: promptTemplate.adaptModel(this),
165
+ template: promptTemplate,
166
+ })
167
+ : new StructureFromTextStreamingModel({
168
+ model: this,
169
+ template: promptTemplate,
170
+ });
171
+ }
148
172
  /**
149
173
  * Returns this model with a text prompt template.
150
174
  */
@@ -174,6 +198,9 @@ export class OllamaChatModel extends AbstractModel {
174
198
  promptTemplate,
175
199
  });
176
200
  }
201
+ withJsonOutput() {
202
+ return this.withSettings({ format: "json" });
203
+ }
177
204
  withSettings(additionalSettings) {
178
205
  return new OllamaChatModel(Object.assign({}, this.settings, additionalSettings));
179
206
  }