modelfusion 0.92.0 → 0.93.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (184) hide show
  1. package/README.md +19 -19
  2. package/core/api/ApiCallError.cjs +9 -1
  3. package/core/api/ApiCallError.d.ts +4 -1
  4. package/core/api/ApiCallError.js +9 -1
  5. package/model-function/{PromptFormat.d.ts → PromptTemplate.d.ts} +2 -2
  6. package/model-function/generate-image/ImageGenerationModel.d.ts +2 -2
  7. package/model-function/generate-image/{PromptFormatImageGenerationModel.cjs → PromptTemplateImageGenerationModel.cjs} +11 -11
  8. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +20 -0
  9. package/model-function/generate-image/{PromptFormatImageGenerationModel.js → PromptTemplateImageGenerationModel.js} +9 -9
  10. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +6 -6
  11. package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +4 -4
  12. package/model-function/generate-structure/StructureFromTextGenerationModel.js +6 -6
  13. package/model-function/generate-structure/{StructureFromTextPromptFormat.d.ts → StructureFromTextPromptTemplate.d.ts} +1 -1
  14. package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +4 -4
  15. package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +2 -2
  16. package/model-function/generate-structure/StructureFromTextStreamingModel.js +4 -4
  17. package/model-function/generate-structure/index.cjs +1 -1
  18. package/model-function/generate-structure/index.d.ts +1 -1
  19. package/model-function/generate-structure/index.js +1 -1
  20. package/model-function/generate-structure/jsonStructurePrompt.d.ts +2 -2
  21. package/model-function/generate-text/{PromptFormatTextGenerationModel.cjs → PromptTemplateTextGenerationModel.cjs} +21 -21
  22. package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +35 -0
  23. package/model-function/generate-text/{PromptFormatTextGenerationModel.js → PromptTemplateTextGenerationModel.js} +19 -19
  24. package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +38 -0
  25. package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +16 -0
  26. package/model-function/generate-text/PromptTemplateTextStreamingModel.js +34 -0
  27. package/model-function/generate-text/TextGenerationModel.d.ts +3 -3
  28. package/model-function/generate-text/TextGenerationPromptTemplate.d.ts +11 -0
  29. package/model-function/generate-text/index.cjs +4 -4
  30. package/model-function/generate-text/index.d.ts +4 -4
  31. package/model-function/generate-text/index.js +4 -4
  32. package/model-function/generate-text/{prompt-format/AlpacaPromptFormat.cjs → prompt-template/AlpacaPromptTemplate.cjs} +5 -2
  33. package/model-function/generate-text/{prompt-format/AlpacaPromptFormat.d.ts → prompt-template/AlpacaPromptTemplate.d.ts} +5 -5
  34. package/model-function/generate-text/{prompt-format/AlpacaPromptFormat.js → prompt-template/AlpacaPromptTemplate.js} +5 -2
  35. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.cjs +31 -0
  36. package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.test.js +29 -0
  37. package/model-function/generate-text/{prompt-format/ChatMLPromptFormat.cjs → prompt-template/ChatMLPromptTemplate.cjs} +5 -5
  38. package/model-function/generate-text/{prompt-format/ChatMLPromptFormat.d.ts → prompt-template/ChatMLPromptTemplate.d.ts} +7 -7
  39. package/model-function/generate-text/{prompt-format/ChatMLPromptFormat.js → prompt-template/ChatMLPromptTemplate.js} +5 -5
  40. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.cjs +49 -0
  41. package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.test.js +47 -0
  42. package/model-function/generate-text/{prompt-format → prompt-template}/ChatPrompt.d.ts +1 -1
  43. package/model-function/generate-text/prompt-template/Content.js +1 -0
  44. package/model-function/generate-text/{prompt-format → prompt-template}/InstructionPrompt.d.ts +7 -0
  45. package/model-function/generate-text/prompt-template/InstructionPrompt.js +1 -0
  46. package/model-function/generate-text/{prompt-format/Llama2PromptFormat.cjs → prompt-template/Llama2PromptTemplate.cjs} +8 -7
  47. package/model-function/generate-text/{prompt-format/Llama2PromptFormat.d.ts → prompt-template/Llama2PromptTemplate.d.ts} +7 -6
  48. package/model-function/generate-text/{prompt-format/Llama2PromptFormat.js → prompt-template/Llama2PromptTemplate.js} +8 -7
  49. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.cjs +49 -0
  50. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.d.ts +1 -0
  51. package/model-function/generate-text/prompt-template/Llama2PromptTemplate.test.js +47 -0
  52. package/model-function/generate-text/{prompt-format/TextPromptFormat.cjs → prompt-template/TextPromptTemplate.cjs} +3 -0
  53. package/model-function/generate-text/{prompt-format/TextPromptFormat.d.ts → prompt-template/TextPromptTemplate.d.ts} +4 -4
  54. package/model-function/generate-text/{prompt-format/TextPromptFormat.js → prompt-template/TextPromptTemplate.js} +3 -0
  55. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.cjs +49 -0
  56. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.d.ts +1 -0
  57. package/model-function/generate-text/prompt-template/TextPromptTemplate.test.js +47 -0
  58. package/model-function/generate-text/{prompt-format/VicunaPromptFormat.d.ts → prompt-template/VicunaPromptTemplate.d.ts} +2 -2
  59. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.cjs +21 -0
  60. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.d.ts +1 -0
  61. package/model-function/generate-text/prompt-template/VicunaPromptTemplate.test.js +19 -0
  62. package/model-function/generate-text/{prompt-format → prompt-template}/index.cjs +6 -6
  63. package/model-function/generate-text/prompt-template/index.d.ts +10 -0
  64. package/model-function/generate-text/prompt-template/index.js +10 -0
  65. package/model-function/index.cjs +2 -2
  66. package/model-function/index.d.ts +2 -2
  67. package/model-function/index.js +2 -2
  68. package/model-provider/anthropic/{AnthropicPromptFormat.cjs → AnthropicPromptTemplate.cjs} +15 -8
  69. package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +17 -0
  70. package/model-provider/anthropic/{AnthropicPromptFormat.js → AnthropicPromptTemplate.js} +15 -8
  71. package/model-provider/anthropic/AnthropicPromptTemplate.test.cjs +49 -0
  72. package/model-provider/anthropic/AnthropicPromptTemplate.test.d.ts +1 -0
  73. package/model-provider/anthropic/AnthropicPromptTemplate.test.js +47 -0
  74. package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +12 -12
  75. package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +9 -9
  76. package/model-provider/anthropic/AnthropicTextGenerationModel.js +12 -12
  77. package/model-provider/anthropic/index.cjs +2 -2
  78. package/model-provider/anthropic/index.d.ts +1 -1
  79. package/model-provider/anthropic/index.js +1 -1
  80. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +5 -5
  81. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +4 -4
  82. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +5 -5
  83. package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.d.ts +2 -2
  84. package/model-provider/cohere/CohereTextGenerationModel.cjs +10 -10
  85. package/model-provider/cohere/CohereTextGenerationModel.d.ts +7 -7
  86. package/model-provider/cohere/CohereTextGenerationModel.js +10 -10
  87. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +4 -4
  88. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +3 -3
  89. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +4 -4
  90. package/model-provider/llamacpp/{LlamaCppBakLLaVA1Format.cjs → LlamaCppBakLLaVA1PromptTemplate.cjs} +1 -1
  91. package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +11 -0
  92. package/model-provider/llamacpp/{LlamaCppBakLLaVA1Format.js → LlamaCppBakLLaVA1PromptTemplate.js} +1 -1
  93. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +12 -12
  94. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +7 -7
  95. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +12 -12
  96. package/model-provider/llamacpp/index.cjs +2 -2
  97. package/model-provider/llamacpp/index.d.ts +1 -1
  98. package/model-provider/llamacpp/index.js +1 -1
  99. package/model-provider/ollama/OllamaError.cjs +25 -24
  100. package/model-provider/ollama/OllamaError.d.ts +1 -11
  101. package/model-provider/ollama/OllamaError.js +24 -22
  102. package/model-provider/ollama/OllamaTextGenerationModel.cjs +47 -10
  103. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +12 -8
  104. package/model-provider/ollama/OllamaTextGenerationModel.js +48 -11
  105. package/model-provider/ollama/OllamaTextGenerationModel.test.cjs +63 -0
  106. package/model-provider/ollama/OllamaTextGenerationModel.test.d.ts +1 -0
  107. package/model-provider/ollama/OllamaTextGenerationModel.test.js +61 -0
  108. package/model-provider/ollama/index.cjs +1 -3
  109. package/model-provider/ollama/index.d.ts +1 -1
  110. package/model-provider/ollama/index.js +0 -1
  111. package/model-provider/openai/OpenAICompletionModel.cjs +10 -10
  112. package/model-provider/openai/OpenAICompletionModel.d.ts +7 -7
  113. package/model-provider/openai/OpenAICompletionModel.js +10 -10
  114. package/model-provider/openai/OpenAIError.cjs +13 -29
  115. package/model-provider/openai/OpenAIError.d.ts +2 -11
  116. package/model-provider/openai/OpenAIError.js +11 -26
  117. package/model-provider/openai/OpenAIImageGenerationModel.cjs +4 -4
  118. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +3 -3
  119. package/model-provider/openai/OpenAIImageGenerationModel.js +4 -4
  120. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.cjs +16 -16
  121. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +14 -14
  122. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.js +16 -16
  123. package/model-provider/openai/chat/OpenAIChatMessage.d.ts +1 -1
  124. package/model-provider/openai/chat/OpenAIChatModel.cjs +15 -15
  125. package/model-provider/openai/chat/OpenAIChatModel.d.ts +12 -12
  126. package/model-provider/openai/chat/OpenAIChatModel.js +15 -15
  127. package/model-provider/openai/chat/{OpenAIChatPromptFormat.cjs → OpenAIChatPromptTemplate.cjs} +1 -1
  128. package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +20 -0
  129. package/model-provider/openai/chat/{OpenAIChatPromptFormat.js → OpenAIChatPromptTemplate.js} +1 -1
  130. package/model-provider/openai/index.cjs +2 -4
  131. package/model-provider/openai/index.d.ts +2 -2
  132. package/model-provider/openai/index.js +1 -2
  133. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +14 -14
  134. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +11 -11
  135. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +14 -14
  136. package/model-provider/stability/StabilityImageGenerationModel.cjs +5 -5
  137. package/model-provider/stability/StabilityImageGenerationModel.d.ts +4 -4
  138. package/model-provider/stability/StabilityImageGenerationModel.js +5 -5
  139. package/model-provider/stability/StabilityImageGenerationPrompt.d.ts +2 -2
  140. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +5 -8
  141. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +5 -8
  142. package/package.json +5 -4
  143. package/tool/generate-tool-call/TextGenerationToolCallModel.d.ts +2 -2
  144. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +6 -6
  145. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.d.ts +4 -4
  146. package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +6 -6
  147. package/util/AsyncQueue.test.cjs +20 -21
  148. package/util/AsyncQueue.test.js +9 -10
  149. package/util/isDeepEqualData.test.cjs +14 -15
  150. package/util/isDeepEqualData.test.js +14 -15
  151. package/util/runSafe.test.cjs +12 -13
  152. package/util/runSafe.test.js +6 -7
  153. package/model-function/generate-image/PromptFormatImageGenerationModel.d.ts +0 -20
  154. package/model-function/generate-text/PromptFormatTextGenerationModel.d.ts +0 -35
  155. package/model-function/generate-text/PromptFormatTextStreamingModel.cjs +0 -38
  156. package/model-function/generate-text/PromptFormatTextStreamingModel.d.ts +0 -16
  157. package/model-function/generate-text/PromptFormatTextStreamingModel.js +0 -34
  158. package/model-function/generate-text/TextGenerationPromptFormat.d.ts +0 -11
  159. package/model-function/generate-text/prompt-format/index.d.ts +0 -10
  160. package/model-function/generate-text/prompt-format/index.js +0 -10
  161. package/model-provider/anthropic/AnthropicPromptFormat.d.ts +0 -17
  162. package/model-provider/llamacpp/LlamaCppBakLLaVA1Format.d.ts +0 -11
  163. package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +0 -20
  164. /package/model-function/{PromptFormat.cjs → PromptTemplate.cjs} +0 -0
  165. /package/model-function/{PromptFormat.js → PromptTemplate.js} +0 -0
  166. /package/model-function/generate-structure/{StructureFromTextPromptFormat.cjs → StructureFromTextPromptTemplate.cjs} +0 -0
  167. /package/model-function/generate-structure/{StructureFromTextPromptFormat.js → StructureFromTextPromptTemplate.js} +0 -0
  168. /package/model-function/generate-text/{TextGenerationPromptFormat.cjs → TextGenerationPromptTemplate.cjs} +0 -0
  169. /package/model-function/generate-text/{TextGenerationPromptFormat.js → TextGenerationPromptTemplate.js} +0 -0
  170. /package/model-function/generate-text/{prompt-format/Content.js → prompt-template/AlpacaPromptTemplate.test.d.ts} +0 -0
  171. /package/model-function/generate-text/{prompt-format/InstructionPrompt.js → prompt-template/ChatMLPromptTemplate.test.d.ts} +0 -0
  172. /package/model-function/generate-text/{prompt-format → prompt-template}/ChatPrompt.cjs +0 -0
  173. /package/model-function/generate-text/{prompt-format → prompt-template}/ChatPrompt.js +0 -0
  174. /package/model-function/generate-text/{prompt-format → prompt-template}/Content.cjs +0 -0
  175. /package/model-function/generate-text/{prompt-format → prompt-template}/Content.d.ts +0 -0
  176. /package/model-function/generate-text/{prompt-format → prompt-template}/InstructionPrompt.cjs +0 -0
  177. /package/model-function/generate-text/{prompt-format → prompt-template}/InvalidPromptError.cjs +0 -0
  178. /package/model-function/generate-text/{prompt-format → prompt-template}/InvalidPromptError.d.ts +0 -0
  179. /package/model-function/generate-text/{prompt-format → prompt-template}/InvalidPromptError.js +0 -0
  180. /package/model-function/generate-text/{prompt-format/VicunaPromptFormat.cjs → prompt-template/VicunaPromptTemplate.cjs} +0 -0
  181. /package/model-function/generate-text/{prompt-format/VicunaPromptFormat.js → prompt-template/VicunaPromptTemplate.js} +0 -0
  182. /package/model-function/generate-text/{prompt-format → prompt-template}/trimChatPrompt.cjs +0 -0
  183. /package/model-function/generate-text/{prompt-format → prompt-template}/trimChatPrompt.d.ts +0 -0
  184. /package/model-function/generate-text/{prompt-format → prompt-template}/trimChatPrompt.js +0 -0
@@ -1,9 +1,11 @@
1
1
  import { z } from "zod";
2
+ import { ApiCallError } from "../../core/api/ApiCallError.js";
2
3
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
- import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { postJsonToApi } from "../../core/api/postToApi.js";
4
5
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
6
+ import { safeParseJSON } from "../../core/schema/parseJSON.js";
5
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
- import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
8
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
9
  import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
8
10
  import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
9
11
  import { AsyncQueue } from "../../util/AsyncQueue.js";
@@ -93,27 +95,27 @@ export class OllamaTextGenerationModel extends AbstractModel {
93
95
  responseFormat: OllamaTextGenerationResponseFormat.deltaIterable,
94
96
  });
95
97
  }
96
- asToolCallGenerationModel(promptFormat) {
98
+ asToolCallGenerationModel(promptTemplate) {
97
99
  return new TextGenerationToolCallModel({
98
100
  model: this,
99
- format: promptFormat,
101
+ format: promptTemplate,
100
102
  });
101
103
  }
102
- asToolCallsOrTextGenerationModel(promptFormat) {
104
+ asToolCallsOrTextGenerationModel(promptTemplate) {
103
105
  return new TextGenerationToolCallsOrGenerateTextModel({
104
106
  model: this,
105
- format: promptFormat,
107
+ template: promptTemplate,
106
108
  });
107
109
  }
108
- withPromptFormat(promptFormat) {
109
- return new PromptFormatTextStreamingModel({
110
+ withPromptTemplate(promptTemplate) {
111
+ return new PromptTemplateTextStreamingModel({
110
112
  model: this.withSettings({
111
113
  stopSequences: [
112
114
  ...(this.settings.stopSequences ?? []),
113
- ...promptFormat.stopSequences,
115
+ ...promptTemplate.stopSequences,
114
116
  ],
115
117
  }),
116
- promptFormat,
118
+ promptTemplate,
117
119
  });
118
120
  }
119
121
  withSettings(additionalSettings) {
@@ -234,7 +236,42 @@ export const OllamaTextGenerationResponseFormat = {
234
236
  */
235
237
  json: {
236
238
  stream: false,
237
- handler: createJsonResponseHandler(ollamaTextGenerationResponseSchema),
239
+ handler: (async ({ response, url, requestBodyValues }) => {
240
+ const responseBody = await response.text();
241
+ const parsedResult = safeParseJSON({
242
+ text: responseBody,
243
+ schema: new ZodSchema(z.union([
244
+ ollamaTextGenerationResponseSchema,
245
+ z.object({
246
+ done: z.literal(false),
247
+ model: z.string(),
248
+ created_at: z.string(),
249
+ response: z.string(),
250
+ }),
251
+ ])),
252
+ });
253
+ if (!parsedResult.success) {
254
+ throw new ApiCallError({
255
+ message: "Invalid JSON response",
256
+ cause: parsedResult.error,
257
+ statusCode: response.status,
258
+ responseBody,
259
+ url,
260
+ requestBodyValues,
261
+ });
262
+ }
263
+ if (parsedResult.data.done === false) {
264
+ throw new ApiCallError({
265
+ message: "Incomplete Ollama response received",
266
+ statusCode: response.status,
267
+ responseBody,
268
+ url,
269
+ requestBodyValues,
270
+ isRetryable: true,
271
+ });
272
+ }
273
+ return parsedResult.data;
274
+ }),
238
275
  },
239
276
  /**
240
277
  * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
@@ -0,0 +1,63 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const assert_1 = require("assert");
4
+ const msw_1 = require("msw");
5
+ const node_1 = require("msw/node");
6
+ const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
7
+ const retryNever_js_1 = require("../../core/api/retryNever.cjs");
8
+ const generateText_js_1 = require("../../model-function/generate-text/generateText.cjs");
9
+ const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
10
+ const OllamaTextGenerationModel_js_1 = require("./OllamaTextGenerationModel.cjs");
11
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
12
+ let responseBodyJson = {};
13
+ const server = (0, node_1.setupServer)(msw_1.http.post("http://127.0.0.1:11434/api/generate", () => msw_1.HttpResponse.json(responseBodyJson)));
14
+ beforeAll(() => server.listen());
15
+ beforeEach(() => {
16
+ responseBodyJson = {};
17
+ });
18
+ afterEach(() => server.resetHandlers());
19
+ afterAll(() => server.close());
20
+ describe("generateText", () => {
21
+ it("should return the generated text", async () => {
22
+ responseBodyJson = {
23
+ model: "test-model",
24
+ created_at: "2023-08-04T19:22:45.499127Z",
25
+ response: "test response",
26
+ context: [1, 2, 3],
27
+ done: true,
28
+ total_duration: 5589157167,
29
+ load_duration: 3013701500,
30
+ sample_count: 114,
31
+ sample_duration: 81442000,
32
+ prompt_eval_count: 46,
33
+ prompt_eval_duration: 1160282000,
34
+ eval_count: 113,
35
+ eval_duration: 1325948000,
36
+ };
37
+ const result = await (0, generateText_js_1.generateText)(new OllamaTextGenerationModel_js_1.OllamaTextGenerationModel({
38
+ model: "test-model",
39
+ }), "test prompt");
40
+ expect(result).toEqual("test response");
41
+ });
42
+ it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
43
+ responseBodyJson = {
44
+ model: "",
45
+ created_at: "0001-01-01T00:00:00Z",
46
+ response: "",
47
+ done: false,
48
+ };
49
+ try {
50
+ await (0, generateText_js_1.generateText)(new OllamaTextGenerationModel_js_1.OllamaTextGenerationModel({
51
+ api: new OllamaApiConfiguration_js_1.OllamaApiConfiguration({
52
+ retry: (0, retryNever_js_1.retryNever)(),
53
+ }),
54
+ model: "test-model",
55
+ }), "test prompt");
56
+ (0, assert_1.fail)("Should have thrown ApiCallError");
57
+ }
58
+ catch (expectedError) {
59
+ expect(expectedError).toBeInstanceOf(ApiCallError_js_1.ApiCallError);
60
+ expect(expectedError.isRetryable).toBe(true);
61
+ }
62
+ });
63
+ });
@@ -0,0 +1,61 @@
1
+ import { fail } from "assert";
2
+ import { HttpResponse, http } from "msw";
3
+ import { setupServer } from "msw/node";
4
+ import { ApiCallError } from "../../core/api/ApiCallError.js";
5
+ import { retryNever } from "../../core/api/retryNever.js";
6
+ import { generateText } from "../../model-function/generate-text/generateText.js";
7
+ import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
8
+ import { OllamaTextGenerationModel } from "./OllamaTextGenerationModel.js";
9
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
10
+ let responseBodyJson = {};
11
+ const server = setupServer(http.post("http://127.0.0.1:11434/api/generate", () => HttpResponse.json(responseBodyJson)));
12
+ beforeAll(() => server.listen());
13
+ beforeEach(() => {
14
+ responseBodyJson = {};
15
+ });
16
+ afterEach(() => server.resetHandlers());
17
+ afterAll(() => server.close());
18
+ describe("generateText", () => {
19
+ it("should return the generated text", async () => {
20
+ responseBodyJson = {
21
+ model: "test-model",
22
+ created_at: "2023-08-04T19:22:45.499127Z",
23
+ response: "test response",
24
+ context: [1, 2, 3],
25
+ done: true,
26
+ total_duration: 5589157167,
27
+ load_duration: 3013701500,
28
+ sample_count: 114,
29
+ sample_duration: 81442000,
30
+ prompt_eval_count: 46,
31
+ prompt_eval_duration: 1160282000,
32
+ eval_count: 113,
33
+ eval_duration: 1325948000,
34
+ };
35
+ const result = await generateText(new OllamaTextGenerationModel({
36
+ model: "test-model",
37
+ }), "test prompt");
38
+ expect(result).toEqual("test response");
39
+ });
40
+ it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
41
+ responseBodyJson = {
42
+ model: "",
43
+ created_at: "0001-01-01T00:00:00Z",
44
+ response: "",
45
+ done: false,
46
+ };
47
+ try {
48
+ await generateText(new OllamaTextGenerationModel({
49
+ api: new OllamaApiConfiguration({
50
+ retry: retryNever(),
51
+ }),
52
+ model: "test-model",
53
+ }), "test prompt");
54
+ fail("Should have thrown ApiCallError");
55
+ }
56
+ catch (expectedError) {
57
+ expect(expectedError).toBeInstanceOf(ApiCallError);
58
+ expect(expectedError.isRetryable).toBe(true);
59
+ }
60
+ });
61
+ });
@@ -26,10 +26,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
26
26
  return result;
27
27
  };
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.ollama = exports.OllamaError = void 0;
29
+ exports.ollama = void 0;
30
30
  __exportStar(require("./OllamaApiConfiguration.cjs"), exports);
31
- var OllamaError_js_1 = require("./OllamaError.cjs");
32
- Object.defineProperty(exports, "OllamaError", { enumerable: true, get: function () { return OllamaError_js_1.OllamaError; } });
33
31
  exports.ollama = __importStar(require("./OllamaFacade.cjs"));
34
32
  __exportStar(require("./OllamaTextEmbeddingModel.cjs"), exports);
35
33
  __exportStar(require("./OllamaTextGenerationModel.cjs"), exports);
@@ -1,5 +1,5 @@
1
1
  export * from "./OllamaApiConfiguration.js";
2
- export { OllamaError } from "./OllamaError.js";
2
+ export { OllamaErrorData } from "./OllamaError.js";
3
3
  export * as ollama from "./OllamaFacade.js";
4
4
  export * from "./OllamaTextEmbeddingModel.js";
5
5
  export * from "./OllamaTextGenerationModel.js";
@@ -1,5 +1,4 @@
1
1
  export * from "./OllamaApiConfiguration.js";
2
- export { OllamaError } from "./OllamaError.js";
3
2
  export * as ollama from "./OllamaFacade.js";
4
3
  export * from "./OllamaTextEmbeddingModel.js";
5
4
  export * from "./OllamaTextGenerationModel.js";
@@ -7,8 +7,8 @@ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
8
8
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
9
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
- const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
11
- const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
10
+ const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
11
+ const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
12
12
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
13
13
  const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
14
14
  const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
@@ -239,26 +239,26 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
239
239
  });
240
240
  }
241
241
  /**
242
- * Returns this model with an instruction prompt format.
242
+ * Returns this model with an instruction prompt template.
243
243
  */
244
244
  withInstructionPrompt() {
245
- return this.withPromptFormat((0, TextPromptFormat_js_1.instruction)());
245
+ return this.withPromptTemplate((0, TextPromptTemplate_js_1.instruction)());
246
246
  }
247
247
  /**
248
- * Returns this model with a chat prompt format.
248
+ * Returns this model with a chat prompt template.
249
249
  */
250
250
  withChatPrompt(options) {
251
- return this.withPromptFormat((0, TextPromptFormat_js_1.chat)(options));
251
+ return this.withPromptTemplate((0, TextPromptTemplate_js_1.chat)(options));
252
252
  }
253
- withPromptFormat(promptFormat) {
254
- return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
253
+ withPromptTemplate(promptTemplate) {
254
+ return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
255
255
  model: this.withSettings({
256
256
  stopSequences: [
257
257
  ...(this.settings.stopSequences ?? []),
258
- ...promptFormat.stopSequences,
258
+ ...promptTemplate.stopSequences,
259
259
  ],
260
260
  }),
261
- promptFormat,
261
+ promptTemplate,
262
262
  });
263
263
  }
264
264
  withSettings(additionalSettings) {
@@ -4,9 +4,9 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { Delta } from "../../model-function/Delta.js";
7
- import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
7
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
8
  import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
9
- import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
9
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
10
10
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
11
11
  /**
12
12
  * @see https://platform.openai.com/docs/models/
@@ -174,17 +174,17 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
174
174
  }>;
175
175
  doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
176
176
  /**
177
- * Returns this model with an instruction prompt format.
177
+ * Returns this model with an instruction prompt template.
178
178
  */
179
- withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").TextInstructionPrompt, string, OpenAICompletionModelSettings, this>;
179
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").TextInstructionPrompt, string, OpenAICompletionModelSettings, this>;
180
180
  /**
181
- * Returns this model with a chat prompt format.
181
+ * Returns this model with a chat prompt template.
182
182
  */
183
183
  withChatPrompt(options?: {
184
184
  user?: string;
185
185
  assistant?: string;
186
- }): PromptFormatTextStreamingModel<import("../../index.js").TextChatPrompt, string, OpenAICompletionModelSettings, this>;
187
- withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
186
+ }): PromptTemplateTextStreamingModel<import("../../index.js").TextChatPrompt, string, OpenAICompletionModelSettings, this>;
187
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
188
188
  withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
189
189
  }
190
190
  declare const OpenAICompletionResponseSchema: z.ZodObject<{
@@ -4,8 +4,8 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
4
4
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
5
  import { parseJSON } from "../../core/schema/parseJSON.js";
6
6
  import { AbstractModel } from "../../model-function/AbstractModel.js";
7
- import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
8
- import { chat, instruction, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
7
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
+ import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
9
9
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
10
10
  import { AsyncQueue } from "../../util/AsyncQueue.js";
11
11
  import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
@@ -233,26 +233,26 @@ export class OpenAICompletionModel extends AbstractModel {
233
233
  });
234
234
  }
235
235
  /**
236
- * Returns this model with an instruction prompt format.
236
+ * Returns this model with an instruction prompt template.
237
237
  */
238
238
  withInstructionPrompt() {
239
- return this.withPromptFormat(instruction());
239
+ return this.withPromptTemplate(instruction());
240
240
  }
241
241
  /**
242
- * Returns this model with a chat prompt format.
242
+ * Returns this model with a chat prompt template.
243
243
  */
244
244
  withChatPrompt(options) {
245
- return this.withPromptFormat(chat(options));
245
+ return this.withPromptTemplate(chat(options));
246
246
  }
247
- withPromptFormat(promptFormat) {
248
- return new PromptFormatTextStreamingModel({
247
+ withPromptTemplate(promptTemplate) {
248
+ return new PromptTemplateTextStreamingModel({
249
249
  model: this.withSettings({
250
250
  stopSequences: [
251
251
  ...(this.settings.stopSequences ?? []),
252
- ...promptFormat.stopSequences,
252
+ ...promptTemplate.stopSequences,
253
253
  ],
254
254
  }),
255
- promptFormat,
255
+ promptTemplate,
256
256
  });
257
257
  }
258
258
  withSettings(additionalSettings) {
@@ -1,11 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.failedOpenAICallResponseHandler = exports.OpenAIError = exports.openAIErrorDataSchema = void 0;
3
+ exports.failedOpenAICallResponseHandler = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
6
6
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
7
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
- exports.openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
8
+ const openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
9
  error: zod_1.z.object({
10
10
  message: zod_1.z.string(),
11
11
  type: zod_1.z.string(),
@@ -13,50 +13,34 @@ exports.openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
13
13
  code: zod_1.z.string().nullable(),
14
14
  }),
15
15
  }));
16
- class OpenAIError extends ApiCallError_js_1.ApiCallError {
17
- constructor({ data, statusCode, url, requestBodyValues, message, }) {
18
- super({
19
- message,
20
- statusCode,
21
- requestBodyValues,
22
- url,
23
- isRetryable: (statusCode === 429 &&
24
- // insufficient_quota is also reported as a 429, but it's not retryable:
25
- data?.error.type !== "insufficient_quota") ||
26
- statusCode >= 500,
27
- });
28
- Object.defineProperty(this, "data", {
29
- enumerable: true,
30
- configurable: true,
31
- writable: true,
32
- value: void 0
33
- });
34
- this.data = data;
35
- }
36
- }
37
- exports.OpenAIError = OpenAIError;
38
16
  const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
39
17
  const responseBody = await response.text();
40
18
  // resilient parsing in case the response is not JSON or does not match the schema:
41
19
  try {
42
20
  const parsedError = (0, parseJSON_js_1.parseJSON)({
43
21
  text: responseBody,
44
- schema: exports.openAIErrorDataSchema,
22
+ schema: openAIErrorDataSchema,
45
23
  });
46
- return new OpenAIError({
24
+ return new ApiCallError_js_1.ApiCallError({
25
+ message: parsedError.error.message,
47
26
  url,
48
27
  requestBodyValues,
49
28
  statusCode: response.status,
50
- message: parsedError.error.message,
29
+ responseBody,
51
30
  data: parsedError,
31
+ isRetryable: (response.status === 429 &&
32
+ // insufficient_quota is also reported as a 429, but it's not retryable:
33
+ parsedError?.error.type !== "insufficient_quota") ||
34
+ response.status >= 500,
52
35
  });
53
36
  }
54
37
  catch (parseError) {
55
- return new OpenAIError({
38
+ return new ApiCallError_js_1.ApiCallError({
39
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
56
40
  url,
57
41
  requestBodyValues,
58
42
  statusCode: response.status,
59
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
43
+ responseBody,
60
44
  });
61
45
  }
62
46
  };
@@ -1,7 +1,7 @@
1
1
  import { ApiCallError } from "../../core/api/ApiCallError.js";
2
2
  import { ResponseHandler } from "../../core/api/postToApi.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- export declare const openAIErrorDataSchema: ZodSchema<{
4
+ declare const openAIErrorDataSchema: ZodSchema<{
5
5
  error: {
6
6
  message: string;
7
7
  code: string | null;
@@ -10,14 +10,5 @@ export declare const openAIErrorDataSchema: ZodSchema<{
10
10
  };
11
11
  }>;
12
12
  export type OpenAIErrorData = (typeof openAIErrorDataSchema)["_type"];
13
- export declare class OpenAIError extends ApiCallError {
14
- readonly data?: OpenAIErrorData;
15
- constructor({ data, statusCode, url, requestBodyValues, message, }: {
16
- message: string;
17
- statusCode: number;
18
- url: string;
19
- requestBodyValues: unknown;
20
- data?: OpenAIErrorData;
21
- });
22
- }
23
13
  export declare const failedOpenAICallResponseHandler: ResponseHandler<ApiCallError>;
14
+ export {};
@@ -2,7 +2,7 @@ import { z } from "zod";
2
2
  import { ApiCallError } from "../../core/api/ApiCallError.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
4
  import { parseJSON } from "../../core/schema/parseJSON.js";
5
- export const openAIErrorDataSchema = new ZodSchema(z.object({
5
+ const openAIErrorDataSchema = new ZodSchema(z.object({
6
6
  error: z.object({
7
7
  message: z.string(),
8
8
  type: z.string(),
@@ -10,27 +10,6 @@ export const openAIErrorDataSchema = new ZodSchema(z.object({
10
10
  code: z.string().nullable(),
11
11
  }),
12
12
  }));
13
- export class OpenAIError extends ApiCallError {
14
- constructor({ data, statusCode, url, requestBodyValues, message, }) {
15
- super({
16
- message,
17
- statusCode,
18
- requestBodyValues,
19
- url,
20
- isRetryable: (statusCode === 429 &&
21
- // insufficient_quota is also reported as a 429, but it's not retryable:
22
- data?.error.type !== "insufficient_quota") ||
23
- statusCode >= 500,
24
- });
25
- Object.defineProperty(this, "data", {
26
- enumerable: true,
27
- configurable: true,
28
- writable: true,
29
- value: void 0
30
- });
31
- this.data = data;
32
- }
33
- }
34
13
  export const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
35
14
  const responseBody = await response.text();
36
15
  // resilient parsing in case the response is not JSON or does not match the schema:
@@ -39,20 +18,26 @@ export const failedOpenAICallResponseHandler = async ({ response, url, requestBo
39
18
  text: responseBody,
40
19
  schema: openAIErrorDataSchema,
41
20
  });
42
- return new OpenAIError({
21
+ return new ApiCallError({
22
+ message: parsedError.error.message,
43
23
  url,
44
24
  requestBodyValues,
45
25
  statusCode: response.status,
46
- message: parsedError.error.message,
26
+ responseBody,
47
27
  data: parsedError,
28
+ isRetryable: (response.status === 429 &&
29
+ // insufficient_quota is also reported as a 429, but it's not retryable:
30
+ parsedError?.error.type !== "insufficient_quota") ||
31
+ response.status >= 500,
48
32
  });
49
33
  }
50
34
  catch (parseError) {
51
- return new OpenAIError({
35
+ return new ApiCallError({
36
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
52
37
  url,
53
38
  requestBodyValues,
54
39
  statusCode: response.status,
55
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
40
+ responseBody,
56
41
  });
57
42
  }
58
43
  };
@@ -5,7 +5,7 @@ const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
- const PromptFormatImageGenerationModel_js_1 = require("../../model-function/generate-image/PromptFormatImageGenerationModel.cjs");
8
+ const PromptTemplateImageGenerationModel_js_1 = require("../../model-function/generate-image/PromptTemplateImageGenerationModel.cjs");
9
9
  const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
10
10
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
11
11
  exports.OPENAI_IMAGE_MODELS = {
@@ -122,10 +122,10 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
122
122
  base64Image: response.data[0].b64_json,
123
123
  };
124
124
  }
125
- withPromptFormat(promptFormat) {
126
- return new PromptFormatImageGenerationModel_js_1.PromptFormatImageGenerationModel({
125
+ withPromptTemplate(promptTemplate) {
126
+ return new PromptTemplateImageGenerationModel_js_1.PromptTemplateImageGenerationModel({
127
127
  model: this,
128
- promptFormat,
128
+ promptTemplate,
129
129
  });
130
130
  }
131
131
  withSettings(additionalSettings) {
@@ -3,9 +3,9 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
- import { PromptFormat } from "../../model-function/PromptFormat.js";
6
+ import { PromptTemplate } from "../../model-function/PromptTemplate.js";
7
7
  import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-function/generate-image/ImageGenerationModel.js";
8
- import { PromptFormatImageGenerationModel } from "../../model-function/generate-image/PromptFormatImageGenerationModel.js";
8
+ import { PromptTemplateImageGenerationModel } from "../../model-function/generate-image/PromptTemplateImageGenerationModel.js";
9
9
  export declare const OPENAI_IMAGE_MODELS: {
10
10
  "dall-e-2": {
11
11
  getCost(settings: OpenAIImageGenerationSettings): 2000 | 1800 | 1600 | null;
@@ -61,7 +61,7 @@ export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImag
61
61
  };
62
62
  base64Image: string;
63
63
  }>;
64
- withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatImageGenerationModel<INPUT_PROMPT, string, OpenAIImageGenerationSettings, this>;
64
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, string>): PromptTemplateImageGenerationModel<INPUT_PROMPT, string, OpenAIImageGenerationSettings, this>;
65
65
  withSettings(additionalSettings: Partial<OpenAIImageGenerationSettings>): this;
66
66
  }
67
67
  export type OpenAIImageGenerationResponseFormatType<T> = {
@@ -2,7 +2,7 @@ import { z } from "zod";
2
2
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
3
  import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
- import { PromptFormatImageGenerationModel } from "../../model-function/generate-image/PromptFormatImageGenerationModel.js";
5
+ import { PromptTemplateImageGenerationModel } from "../../model-function/generate-image/PromptTemplateImageGenerationModel.js";
6
6
  import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
7
7
  import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
8
8
  export const OPENAI_IMAGE_MODELS = {
@@ -118,10 +118,10 @@ export class OpenAIImageGenerationModel extends AbstractModel {
118
118
  base64Image: response.data[0].b64_json,
119
119
  };
120
120
  }
121
- withPromptFormat(promptFormat) {
122
- return new PromptFormatImageGenerationModel({
121
+ withPromptTemplate(promptTemplate) {
122
+ return new PromptTemplateImageGenerationModel({
123
123
  model: this,
124
- promptFormat,
124
+ promptTemplate,
125
125
  });
126
126
  }
127
127
  withSettings(additionalSettings) {