modelfusion 0.94.0 → 0.96.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +3 -3
  2. package/core/api/postToApi.cjs +30 -1
  3. package/core/api/postToApi.d.ts +7 -1
  4. package/core/api/postToApi.js +29 -1
  5. package/model-provider/index.cjs +1 -0
  6. package/model-provider/index.d.ts +1 -0
  7. package/model-provider/index.js +1 -0
  8. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +3 -1
  9. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +4 -0
  10. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +3 -1
  11. package/model-provider/mistral/MistralApiConfiguration.cjs +22 -0
  12. package/model-provider/mistral/MistralApiConfiguration.d.ts +12 -0
  13. package/model-provider/mistral/MistralApiConfiguration.js +18 -0
  14. package/model-provider/mistral/MistralError.cjs +17 -0
  15. package/model-provider/mistral/MistralError.d.ts +13 -0
  16. package/model-provider/mistral/MistralError.js +14 -0
  17. package/model-provider/mistral/MistralFacade.cjs +18 -0
  18. package/model-provider/mistral/MistralFacade.d.ts +6 -0
  19. package/model-provider/mistral/MistralFacade.js +12 -0
  20. package/model-provider/mistral/MistralPromptTemplate.cjs +64 -0
  21. package/model-provider/mistral/MistralPromptTemplate.d.ts +16 -0
  22. package/model-provider/mistral/MistralPromptTemplate.js +58 -0
  23. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +100 -0
  24. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +106 -0
  25. package/model-provider/mistral/MistralTextEmbeddingModel.js +96 -0
  26. package/model-provider/mistral/MistralTextGenerationModel.cjs +254 -0
  27. package/model-provider/mistral/MistralTextGenerationModel.d.ts +231 -0
  28. package/model-provider/mistral/MistralTextGenerationModel.js +250 -0
  29. package/model-provider/mistral/index.cjs +34 -0
  30. package/model-provider/mistral/index.d.ts +6 -0
  31. package/model-provider/mistral/index.js +5 -0
  32. package/model-provider/ollama/OllamaError.cjs +5 -30
  33. package/model-provider/ollama/OllamaError.js +5 -29
  34. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +1 -7
  35. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +0 -1
  36. package/model-provider/ollama/OllamaTextEmbeddingModel.js +1 -7
  37. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -4
  38. package/model-provider/openai/OpenAIError.cjs +9 -34
  39. package/model-provider/openai/OpenAIError.d.ts +1 -3
  40. package/model-provider/openai/OpenAIError.js +9 -33
  41. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -6
  42. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  43. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +2 -1
  44. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +2 -1
  45. package/package.json +1 -1
@@ -0,0 +1,250 @@
1
+ import { z } from "zod";
2
+ import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
+ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
4
+ import { ZodSchema } from "../../core/schema/ZodSchema.js";
5
+ import { safeParseJSON } from "../../core/schema/parseJSON.js";
6
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
7
+ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
8
+ import { AsyncQueue } from "../../util/AsyncQueue.js";
9
+ import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
10
+ import { MistralApiConfiguration } from "./MistralApiConfiguration.js";
11
+ import { failedMistralCallResponseHandler } from "./MistralError.js";
12
+ import { chat, instruction, text } from "./MistralPromptTemplate.js";
13
+ export class MistralTextGenerationModel extends AbstractModel {
14
+ constructor(settings) {
15
+ super({ settings });
16
+ Object.defineProperty(this, "provider", {
17
+ enumerable: true,
18
+ configurable: true,
19
+ writable: true,
20
+ value: "mistral"
21
+ });
22
+ Object.defineProperty(this, "contextWindowSize", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: undefined
27
+ });
28
+ Object.defineProperty(this, "tokenizer", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: undefined
33
+ });
34
+ Object.defineProperty(this, "countPromptTokens", {
35
+ enumerable: true,
36
+ configurable: true,
37
+ writable: true,
38
+ value: undefined
39
+ });
40
+ }
41
+ get modelName() {
42
+ return this.settings.model;
43
+ }
44
+ async callAPI(prompt, options) {
45
+ const { model, temperature, topP, safeMode, randomSeed, maxCompletionTokens, } = this.settings;
46
+ const api = this.settings.api ?? new MistralApiConfiguration();
47
+ const abortSignal = options.run?.abortSignal;
48
+ const stream = options.responseFormat.stream;
49
+ const successfulResponseHandler = options.responseFormat.handler;
50
+ return callWithRetryAndThrottle({
51
+ retry: api.retry,
52
+ throttle: api.throttle,
53
+ call: async () => postJsonToApi({
54
+ url: api.assembleUrl(`/chat/completions`),
55
+ headers: api.headers,
56
+ body: {
57
+ stream,
58
+ messages: prompt,
59
+ model,
60
+ temperature,
61
+ top_p: topP,
62
+ max_tokens: maxCompletionTokens,
63
+ safe_mode: safeMode,
64
+ random_seed: randomSeed,
65
+ },
66
+ failedResponseHandler: failedMistralCallResponseHandler,
67
+ successfulResponseHandler,
68
+ abortSignal,
69
+ }),
70
+ });
71
+ }
72
+ get settingsForEvent() {
73
+ const eventSettingProperties = [
74
+ "maxCompletionTokens",
75
+ "temperature",
76
+ "topP",
77
+ "safeMode",
78
+ "randomSeed",
79
+ ];
80
+ return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
81
+ }
82
+ async doGenerateText(prompt, options) {
83
+ const response = await this.callAPI(prompt, {
84
+ ...options,
85
+ responseFormat: MistralTextGenerationResponseFormat.json,
86
+ });
87
+ return {
88
+ response,
89
+ text: response.choices[0].message.content,
90
+ };
91
+ }
92
+ doStreamText(prompt, options) {
93
+ return this.callAPI(prompt, {
94
+ ...options,
95
+ responseFormat: MistralTextGenerationResponseFormat.textDeltaIterable,
96
+ });
97
+ }
98
+ /**
99
+ * Returns this model with a text prompt template.
100
+ */
101
+ withTextPrompt() {
102
+ return this.withPromptTemplate(text());
103
+ }
104
+ /**
105
+ * Returns this model with an instruction prompt template.
106
+ */
107
+ withInstructionPrompt() {
108
+ return this.withPromptTemplate(instruction());
109
+ }
110
+ /**
111
+ * Returns this model with a chat prompt template.
112
+ */
113
+ withChatPrompt() {
114
+ return this.withPromptTemplate(chat());
115
+ }
116
+ withPromptTemplate(promptTemplate) {
117
+ return new PromptTemplateTextStreamingModel({
118
+ model: this,
119
+ promptTemplate,
120
+ });
121
+ }
122
+ withSettings(additionalSettings) {
123
+ return new MistralTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
124
+ }
125
+ }
126
+ const mistralTextGenerationResponseSchema = z.object({
127
+ id: z.string(),
128
+ object: z.string(),
129
+ created: z.number(),
130
+ model: z.string(),
131
+ choices: z.array(z.object({
132
+ index: z.number(),
133
+ message: z.object({
134
+ role: z.enum(["user", "assistant"]),
135
+ content: z.string(),
136
+ }),
137
+ finish_reason: z.enum(["stop", "length", "model_length"]),
138
+ })),
139
+ usage: z.object({
140
+ prompt_tokens: z.number(),
141
+ completion_tokens: z.number(),
142
+ total_tokens: z.number(),
143
+ }),
144
+ });
145
+ export const MistralTextGenerationResponseFormat = {
146
+ /**
147
+ * Returns the response as a JSON object.
148
+ */
149
+ json: {
150
+ stream: false,
151
+ handler: createJsonResponseHandler(mistralTextGenerationResponseSchema),
152
+ },
153
+ /**
154
+ * Returns an async iterable over the text deltas (only the tex different of the first choice).
155
+ */
156
+ textDeltaIterable: {
157
+ stream: true,
158
+ handler: async ({ response }) => createMistralTextGenerationDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
159
+ },
160
+ };
161
+ const mistralTextGenerationChunkSchema = new ZodSchema(z.object({
162
+ id: z.string(),
163
+ object: z.string().optional(),
164
+ created: z.number().optional(),
165
+ model: z.string(),
166
+ choices: z.array(z.object({
167
+ index: z.number(),
168
+ delta: z.object({
169
+ role: z.enum(["assistant", "user"]).optional().nullable(),
170
+ content: z.string().nullable().optional(),
171
+ }),
172
+ finish_reason: z
173
+ .enum(["stop", "length", "model_length"])
174
+ .nullable()
175
+ .optional(),
176
+ })),
177
+ }));
178
+ async function createMistralTextGenerationDeltaIterableQueue(stream, extractDeltaValue) {
179
+ const queue = new AsyncQueue();
180
+ const streamDelta = [];
181
+ // process the stream asynchonously (no 'await' on purpose):
182
+ parseEventSourceStream({ stream })
183
+ .then(async (events) => {
184
+ try {
185
+ for await (const event of events) {
186
+ const data = event.data;
187
+ if (data === "[DONE]") {
188
+ queue.close();
189
+ return;
190
+ }
191
+ const parseResult = safeParseJSON({
192
+ text: data,
193
+ schema: mistralTextGenerationChunkSchema,
194
+ });
195
+ if (!parseResult.success) {
196
+ queue.push({
197
+ type: "error",
198
+ error: parseResult.error,
199
+ });
200
+ // Note: the queue is not closed on purpose. Some providers might add additional
201
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
202
+ continue;
203
+ }
204
+ const completionChunk = parseResult.data;
205
+ for (let i = 0; i < completionChunk.choices.length; i++) {
206
+ const eventChoice = completionChunk.choices[i];
207
+ const delta = eventChoice.delta;
208
+ if (streamDelta[i] == null) {
209
+ streamDelta[i] = {
210
+ role: undefined,
211
+ content: "",
212
+ isComplete: false,
213
+ delta,
214
+ };
215
+ }
216
+ const choice = streamDelta[i];
217
+ choice.delta = delta;
218
+ if (eventChoice.finish_reason != null) {
219
+ choice.isComplete = true;
220
+ }
221
+ if (delta.content != undefined) {
222
+ choice.content += delta.content;
223
+ }
224
+ if (delta.role != undefined) {
225
+ choice.role = delta.role;
226
+ }
227
+ }
228
+ // Since we're mutating the choices array in an async scenario,
229
+ // we need to make a deep copy:
230
+ const streamDeltaDeepCopy = JSON.parse(JSON.stringify(streamDelta));
231
+ queue.push({
232
+ type: "delta",
233
+ fullDelta: streamDeltaDeepCopy,
234
+ valueDelta: extractDeltaValue(streamDeltaDeepCopy),
235
+ });
236
+ }
237
+ }
238
+ catch (error) {
239
+ queue.push({ type: "error", error });
240
+ queue.close();
241
+ return;
242
+ }
243
+ })
244
+ .catch((error) => {
245
+ queue.push({ type: "error", error });
246
+ queue.close();
247
+ return;
248
+ });
249
+ return queue;
250
+ }
@@ -0,0 +1,34 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
19
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
20
+ };
21
+ var __importStar = (this && this.__importStar) || function (mod) {
22
+ if (mod && mod.__esModule) return mod;
23
+ var result = {};
24
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
25
+ __setModuleDefault(result, mod);
26
+ return result;
27
+ };
28
+ Object.defineProperty(exports, "__esModule", { value: true });
29
+ exports.MistralPrompt = exports.mistral = void 0;
30
+ __exportStar(require("./MistralApiConfiguration.cjs"), exports);
31
+ exports.mistral = __importStar(require("./MistralFacade.cjs"));
32
+ exports.MistralPrompt = __importStar(require("./MistralPromptTemplate.cjs"));
33
+ __exportStar(require("./MistralTextEmbeddingModel.cjs"), exports);
34
+ __exportStar(require("./MistralTextGenerationModel.cjs"), exports);
@@ -0,0 +1,6 @@
1
+ export * from "./MistralApiConfiguration.js";
2
+ export { MistralErrorData } from "./MistralError.js";
3
+ export * as mistral from "./MistralFacade.js";
4
+ export * as MistralPrompt from "./MistralPromptTemplate.js";
5
+ export * from "./MistralTextEmbeddingModel.js";
6
+ export * from "./MistralTextGenerationModel.js";
@@ -0,0 +1,5 @@
1
+ export * from "./MistralApiConfiguration.js";
2
+ export * as mistral from "./MistralFacade.js";
3
+ export * as MistralPrompt from "./MistralPromptTemplate.js";
4
+ export * from "./MistralTextEmbeddingModel.js";
5
+ export * from "./MistralTextGenerationModel.js";
@@ -2,37 +2,12 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.failedOllamaCallResponseHandler = void 0;
4
4
  const zod_1 = require("zod");
5
- const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
5
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
6
6
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
7
  const ollamaErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
8
  error: zod_1.z.string(),
10
9
  }));
11
- const failedOllamaCallResponseHandler = async ({ response, url, requestBodyValues }) => {
12
- const responseBody = await response.text();
13
- // resilient parsing in case the response is not JSON or does not match the schema:
14
- try {
15
- const parsedError = (0, parseJSON_js_1.parseJSON)({
16
- text: responseBody,
17
- schema: ollamaErrorDataSchema,
18
- });
19
- return new ApiCallError_js_1.ApiCallError({
20
- message: parsedError.error,
21
- url,
22
- requestBodyValues,
23
- statusCode: response.status,
24
- responseBody,
25
- data: parsedError,
26
- });
27
- }
28
- catch (parseError) {
29
- return new ApiCallError_js_1.ApiCallError({
30
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
31
- url,
32
- requestBodyValues,
33
- statusCode: response.status,
34
- responseBody,
35
- });
36
- }
37
- };
38
- exports.failedOllamaCallResponseHandler = failedOllamaCallResponseHandler;
10
+ exports.failedOllamaCallResponseHandler = (0, postToApi_js_1.createJsonErrorResponseHandler)({
11
+ errorSchema: ollamaErrorDataSchema,
12
+ errorToMessage: (error) => error.error,
13
+ });
@@ -1,34 +1,10 @@
1
1
  import { z } from "zod";
2
- import { ApiCallError } from "../../core/api/ApiCallError.js";
2
+ import { createJsonErrorResponseHandler, } from "../../core/api/postToApi.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- import { parseJSON } from "../../core/schema/parseJSON.js";
5
4
  const ollamaErrorDataSchema = new ZodSchema(z.object({
6
5
  error: z.string(),
7
6
  }));
8
- export const failedOllamaCallResponseHandler = async ({ response, url, requestBodyValues }) => {
9
- const responseBody = await response.text();
10
- // resilient parsing in case the response is not JSON or does not match the schema:
11
- try {
12
- const parsedError = parseJSON({
13
- text: responseBody,
14
- schema: ollamaErrorDataSchema,
15
- });
16
- return new ApiCallError({
17
- message: parsedError.error,
18
- url,
19
- requestBodyValues,
20
- statusCode: response.status,
21
- responseBody,
22
- data: parsedError,
23
- });
24
- }
25
- catch (parseError) {
26
- return new ApiCallError({
27
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
28
- url,
29
- requestBodyValues,
30
- statusCode: response.status,
31
- responseBody,
32
- });
33
- }
34
- };
7
+ export const failedOllamaCallResponseHandler = createJsonErrorResponseHandler({
8
+ errorSchema: ollamaErrorDataSchema,
9
+ errorToMessage: (error) => error.error,
10
+ });
@@ -22,12 +22,6 @@ class OllamaTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
22
22
  writable: true,
23
23
  value: 1
24
24
  });
25
- Object.defineProperty(this, "contextWindowSize", {
26
- enumerable: true,
27
- configurable: true,
28
- writable: true,
29
- value: undefined
30
- });
31
25
  }
32
26
  get modelName() {
33
27
  return null;
@@ -40,7 +34,7 @@ class OllamaTextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
40
34
  }
41
35
  async callAPI(texts, options) {
42
36
  if (texts.length > this.maxValuesPerCall) {
43
- throw new Error(`The Llama.cpp embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
37
+ throw new Error(`The Ollama embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
44
38
  }
45
39
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
46
40
  retry: this.settings.api?.retry,
@@ -15,7 +15,6 @@ export declare class OllamaTextEmbeddingModel extends AbstractModel<OllamaTextEm
15
15
  get modelName(): null;
16
16
  readonly maxValuesPerCall = 1;
17
17
  get isParallelizable(): boolean;
18
- readonly contextWindowSize: undefined;
19
18
  get embeddingDimensions(): number | undefined;
20
19
  callAPI(texts: Array<string>, options?: FunctionOptions): Promise<OllamaTextEmbeddingResponse>;
21
20
  get settingsForEvent(): Partial<OllamaTextEmbeddingModelSettings>;
@@ -19,12 +19,6 @@ export class OllamaTextEmbeddingModel extends AbstractModel {
19
19
  writable: true,
20
20
  value: 1
21
21
  });
22
- Object.defineProperty(this, "contextWindowSize", {
23
- enumerable: true,
24
- configurable: true,
25
- writable: true,
26
- value: undefined
27
- });
28
22
  }
29
23
  get modelName() {
30
24
  return null;
@@ -37,7 +31,7 @@ export class OllamaTextEmbeddingModel extends AbstractModel {
37
31
  }
38
32
  async callAPI(texts, options) {
39
33
  if (texts.length > this.maxValuesPerCall) {
40
- throw new Error(`The Llama.cpp embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
34
+ throw new Error(`The Ollama embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
41
35
  }
42
36
  return callWithRetryAndThrottle({
43
37
  retry: this.settings.api?.retry,
@@ -156,13 +156,13 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
156
156
  };
157
157
  model: string;
158
158
  id: string;
159
+ created: number;
159
160
  choices: {
160
161
  text: string;
161
162
  index: number;
162
163
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
163
164
  logprobs?: any;
164
165
  }[];
165
- created: number;
166
166
  system_fingerprint?: string | undefined;
167
167
  };
168
168
  text: string;
@@ -231,13 +231,13 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
231
231
  };
232
232
  model: string;
233
233
  id: string;
234
+ created: number;
234
235
  choices: {
235
236
  text: string;
236
237
  index: number;
237
238
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
238
239
  logprobs?: any;
239
240
  }[];
240
- created: number;
241
241
  system_fingerprint?: string | undefined;
242
242
  }, {
243
243
  object: "text_completion";
@@ -248,13 +248,13 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
248
248
  };
249
249
  model: string;
250
250
  id: string;
251
+ created: number;
251
252
  choices: {
252
253
  text: string;
253
254
  index: number;
254
255
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
255
256
  logprobs?: any;
256
257
  }[];
257
- created: number;
258
258
  system_fingerprint?: string | undefined;
259
259
  }>;
260
260
  export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
@@ -277,13 +277,13 @@ export declare const OpenAITextResponseFormat: {
277
277
  };
278
278
  model: string;
279
279
  id: string;
280
+ created: number;
280
281
  choices: {
281
282
  text: string;
282
283
  index: number;
283
284
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
284
285
  logprobs?: any;
285
286
  }[];
286
- created: number;
287
287
  system_fingerprint?: string | undefined;
288
288
  }>;
289
289
  };
@@ -2,9 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.failedOpenAICallResponseHandler = void 0;
4
4
  const zod_1 = require("zod");
5
- const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
5
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
6
6
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
7
  const openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
8
  error: zod_1.z.object({
10
9
  message: zod_1.z.string(),
@@ -13,35 +12,11 @@ const openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
13
12
  code: zod_1.z.string().nullable(),
14
13
  }),
15
14
  }));
16
- const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
17
- const responseBody = await response.text();
18
- // resilient parsing in case the response is not JSON or does not match the schema:
19
- try {
20
- const parsedError = (0, parseJSON_js_1.parseJSON)({
21
- text: responseBody,
22
- schema: openAIErrorDataSchema,
23
- });
24
- return new ApiCallError_js_1.ApiCallError({
25
- message: parsedError.error.message,
26
- url,
27
- requestBodyValues,
28
- statusCode: response.status,
29
- responseBody,
30
- data: parsedError,
31
- isRetryable: (response.status === 429 &&
32
- // insufficient_quota is also reported as a 429, but it's not retryable:
33
- parsedError?.error.type !== "insufficient_quota") ||
34
- response.status >= 500,
35
- });
36
- }
37
- catch (parseError) {
38
- return new ApiCallError_js_1.ApiCallError({
39
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
40
- url,
41
- requestBodyValues,
42
- statusCode: response.status,
43
- responseBody,
44
- });
45
- }
46
- };
47
- exports.failedOpenAICallResponseHandler = failedOpenAICallResponseHandler;
15
+ exports.failedOpenAICallResponseHandler = (0, postToApi_js_1.createJsonErrorResponseHandler)({
16
+ errorSchema: openAIErrorDataSchema,
17
+ errorToMessage: (error) => error.error.message,
18
+ isRetryable: (error, response) => (response.status === 429 &&
19
+ // insufficient_quota is also reported as a 429, but it's not retryable:
20
+ error.error.type !== "insufficient_quota") ||
21
+ response.status >= 500,
22
+ });
@@ -1,5 +1,3 @@
1
- import { ApiCallError } from "../../core/api/ApiCallError.js";
2
- import { ResponseHandler } from "../../core/api/postToApi.js";
3
1
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
2
  declare const openAIErrorDataSchema: ZodSchema<{
5
3
  error: {
@@ -10,5 +8,5 @@ declare const openAIErrorDataSchema: ZodSchema<{
10
8
  };
11
9
  }>;
12
10
  export type OpenAIErrorData = (typeof openAIErrorDataSchema)["_type"];
13
- export declare const failedOpenAICallResponseHandler: ResponseHandler<ApiCallError>;
11
+ export declare const failedOpenAICallResponseHandler: import("../../core/api/postToApi.js").ResponseHandler<import("../../index.js").ApiCallError>;
14
12
  export {};
@@ -1,7 +1,6 @@
1
1
  import { z } from "zod";
2
- import { ApiCallError } from "../../core/api/ApiCallError.js";
2
+ import { createJsonErrorResponseHandler } from "../../core/api/postToApi.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- import { parseJSON } from "../../core/schema/parseJSON.js";
5
4
  const openAIErrorDataSchema = new ZodSchema(z.object({
6
5
  error: z.object({
7
6
  message: z.string(),
@@ -10,34 +9,11 @@ const openAIErrorDataSchema = new ZodSchema(z.object({
10
9
  code: z.string().nullable(),
11
10
  }),
12
11
  }));
13
- export const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
14
- const responseBody = await response.text();
15
- // resilient parsing in case the response is not JSON or does not match the schema:
16
- try {
17
- const parsedError = parseJSON({
18
- text: responseBody,
19
- schema: openAIErrorDataSchema,
20
- });
21
- return new ApiCallError({
22
- message: parsedError.error.message,
23
- url,
24
- requestBodyValues,
25
- statusCode: response.status,
26
- responseBody,
27
- data: parsedError,
28
- isRetryable: (response.status === 429 &&
29
- // insufficient_quota is also reported as a 429, but it's not retryable:
30
- parsedError?.error.type !== "insufficient_quota") ||
31
- response.status >= 500,
32
- });
33
- }
34
- catch (parseError) {
35
- return new ApiCallError({
36
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
37
- url,
38
- requestBodyValues,
39
- statusCode: response.status,
40
- responseBody,
41
- });
42
- }
43
- };
12
+ export const failedOpenAICallResponseHandler = createJsonErrorResponseHandler({
13
+ errorSchema: openAIErrorDataSchema,
14
+ errorToMessage: (error) => error.error.message,
15
+ isRetryable: (error, response) => (response.status === 429 &&
16
+ // insufficient_quota is also reported as a 429, but it's not retryable:
17
+ error.error.type !== "insufficient_quota") ||
18
+ response.status >= 500,
19
+ });