modelfusion 0.95.0 → 0.97.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +3 -3
  2. package/core/api/postToApi.cjs +30 -1
  3. package/core/api/postToApi.d.ts +7 -1
  4. package/core/api/postToApi.js +29 -1
  5. package/model-provider/index.cjs +1 -0
  6. package/model-provider/index.d.ts +1 -0
  7. package/model-provider/index.js +1 -0
  8. package/model-provider/mistral/MistralApiConfiguration.cjs +22 -0
  9. package/model-provider/mistral/MistralApiConfiguration.d.ts +12 -0
  10. package/model-provider/mistral/MistralApiConfiguration.js +18 -0
  11. package/model-provider/mistral/MistralError.cjs +17 -0
  12. package/model-provider/mistral/MistralError.d.ts +13 -0
  13. package/model-provider/mistral/MistralError.js +14 -0
  14. package/model-provider/mistral/MistralFacade.cjs +18 -0
  15. package/model-provider/mistral/MistralFacade.d.ts +6 -0
  16. package/model-provider/mistral/MistralFacade.js +12 -0
  17. package/model-provider/mistral/MistralPromptTemplate.cjs +64 -0
  18. package/model-provider/mistral/MistralPromptTemplate.d.ts +16 -0
  19. package/model-provider/mistral/MistralPromptTemplate.js +58 -0
  20. package/model-provider/mistral/MistralTextEmbeddingModel.cjs +100 -0
  21. package/model-provider/mistral/MistralTextEmbeddingModel.d.ts +106 -0
  22. package/model-provider/mistral/MistralTextEmbeddingModel.js +96 -0
  23. package/model-provider/mistral/MistralTextGenerationModel.cjs +254 -0
  24. package/model-provider/mistral/MistralTextGenerationModel.d.ts +231 -0
  25. package/model-provider/mistral/MistralTextGenerationModel.js +250 -0
  26. package/model-provider/mistral/index.cjs +34 -0
  27. package/model-provider/mistral/index.d.ts +6 -0
  28. package/model-provider/mistral/index.js +5 -0
  29. package/model-provider/ollama/OllamaError.cjs +5 -30
  30. package/model-provider/ollama/OllamaError.js +5 -29
  31. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +1 -7
  32. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +0 -1
  33. package/model-provider/ollama/OllamaTextEmbeddingModel.js +1 -7
  34. package/model-provider/ollama/OllamaTextGenerationModel.cjs +60 -57
  35. package/model-provider/ollama/OllamaTextGenerationModel.d.ts +33 -22
  36. package/model-provider/ollama/OllamaTextGenerationModel.js +60 -57
  37. package/model-provider/ollama/OllamaTextGenerationModel.test.cjs +2 -2
  38. package/model-provider/ollama/OllamaTextGenerationModel.test.js +2 -2
  39. package/model-provider/openai/OpenAICompletionModel.d.ts +4 -4
  40. package/model-provider/openai/OpenAIError.cjs +9 -34
  41. package/model-provider/openai/OpenAIError.d.ts +1 -3
  42. package/model-provider/openai/OpenAIError.js +9 -33
  43. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -6
  44. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
  45. package/model-provider/whispercpp/WhisperCppTranscriptionModel.cjs +2 -1
  46. package/model-provider/whispercpp/WhisperCppTranscriptionModel.js +2 -1
  47. package/package.json +1 -1
@@ -40,39 +40,39 @@ export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends n
40
40
  * A lower learning rate will result in slower adjustments,
41
41
  * while a higher learning rate will make the algorithm more responsive. (Default: 0.1)
42
42
  */
43
- mirostat_eta?: number;
43
+ mirostatEta?: number;
44
44
  /**
45
45
  * Controls the balance between coherence and diversity of the output.
46
46
  * A lower value will result in more focused and coherent text. (Default: 5.0)
47
47
  */
48
- mirostat_tau?: number;
48
+ mirostatTau?: number;
49
49
  /**
50
50
  * The number of GQA groups in the transformer layer. Required for some models,
51
51
  * for example it is 8 for llama2:70b
52
52
  */
53
- num_gqa?: number;
53
+ numGqa?: number;
54
54
  /**
55
55
  * The number of layers to send to the GPU(s). On macOS it defaults to 1 to
56
56
  * enable metal support, 0 to disable.
57
57
  */
58
- num_gpu?: number;
58
+ numGpu?: number;
59
59
  /**
60
60
  * Sets the number of threads to use during computation. By default, Ollama will
61
61
  * detect this for optimal performance. It is recommended to set this value to the
62
62
  * number of physical CPU cores your system has (as opposed to the logical number of cores).
63
63
  */
64
- num_threads?: number;
64
+ numThreads?: number;
65
65
  /**
66
66
  * Sets how far back for the model to look back to prevent repetition.
67
67
  * (Default: 64, 0 = disabled, -1 = num_ctx)
68
68
  */
69
- repeat_last_n?: number;
69
+ repeatLastN?: number;
70
70
  /**
71
71
  * Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
72
72
  * will penalize repetitions more strongly, while a lower value (e.g., 0.9)
73
73
  * will be more lenient. (Default: 1.1)
74
74
  */
75
- repeat_penalty?: number;
75
+ repeatPenalty?: number;
76
76
  /**
77
77
  * Sets the random number seed to use for generation. Setting this to a
78
78
  * specific number will make the model generate the same text for the same prompt.
@@ -84,19 +84,19 @@ export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends n
84
84
  * from the output. A higher value (e.g., 2.0) will reduce the impact more,
85
85
  * while a value of 1.0 disables this setting. (default: 1)
86
86
  */
87
- tfs_z?: number;
87
+ tfsZ?: number;
88
88
  /**
89
89
  * Reduces the probability of generating nonsense. A higher value (e.g. 100)
90
90
  * will give more diverse answers, while a lower value (e.g. 10) will be more
91
91
  * conservative. (Default: 40)
92
92
  */
93
- top_k?: number;
93
+ topK?: number;
94
94
  /**
95
95
  * Works together with top-k. A higher value (e.g., 0.95) will lead to more
96
96
  * diverse text, while a lower value (e.g., 0.5) will generate more focused
97
97
  * and conservative text. (Default: 0.9)
98
98
  */
99
- top_p?: number;
99
+ topP?: number;
100
100
  /**
101
101
  * When set to true, no formatting will be applied to the prompt and no context
102
102
  * will be returned.
@@ -111,35 +111,46 @@ export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends n
111
111
  template?: string;
112
112
  context?: number[];
113
113
  }
114
- export declare class OllamaTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<string, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
114
+ export interface OllamaTextGenerationPrompt {
115
+ /**
116
+ * Text prompt.
117
+ */
118
+ prompt: string;
119
+ /**
120
+ Images. Supports base64-encoded `png` and `jpeg` images up to 100MB in size.
121
+ */
122
+ images?: Record<number, string>;
123
+ }
124
+ export declare class OllamaTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<OllamaTextGenerationPrompt, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
115
125
  constructor(settings: OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>);
116
126
  readonly provider = "ollama";
117
127
  get modelName(): string;
118
128
  readonly tokenizer: undefined;
119
129
  readonly countPromptTokens: undefined;
120
130
  get contextWindowSize(): CONTEXT_WINDOW_SIZE;
121
- callAPI<RESPONSE>(prompt: string, options: {
131
+ callAPI<RESPONSE>(prompt: OllamaTextGenerationPrompt, options: {
122
132
  responseFormat: OllamaTextGenerationResponseFormatType<RESPONSE>;
123
133
  } & FunctionOptions): Promise<RESPONSE>;
124
134
  get settingsForEvent(): Partial<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
125
- doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
135
+ doGenerateText(prompt: OllamaTextGenerationPrompt, options?: FunctionOptions): Promise<{
126
136
  response: {
127
137
  response: string;
128
138
  model: string;
129
139
  done: true;
130
140
  total_duration: number;
131
- load_duration: number;
132
141
  prompt_eval_count: number;
133
142
  eval_count: number;
134
143
  eval_duration: number;
144
+ load_duration?: number | undefined;
135
145
  context?: number[] | undefined;
136
146
  };
137
147
  text: string;
138
148
  }>;
139
- doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
140
- asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, string>): TextGenerationToolCallModel<INPUT_PROMPT, string, this>;
141
- asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT, string>): TextGenerationToolCallsOrGenerateTextModel<INPUT_PROMPT, string, this>;
142
- withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
149
+ doStreamText(prompt: OllamaTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
150
+ asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaTextGenerationPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaTextGenerationPrompt, this>;
151
+ asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT, OllamaTextGenerationPrompt>): TextGenerationToolCallsOrGenerateTextModel<INPUT_PROMPT, OllamaTextGenerationPrompt, this>;
152
+ withTextPrompt(): PromptTemplateTextStreamingModel<string, OllamaTextGenerationPrompt, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
153
+ withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaTextGenerationPrompt, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
143
154
  withSettings(additionalSettings: Partial<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
144
155
  }
145
156
  declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
@@ -147,7 +158,7 @@ declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
147
158
  model: z.ZodString;
148
159
  response: z.ZodString;
149
160
  total_duration: z.ZodNumber;
150
- load_duration: z.ZodNumber;
161
+ load_duration: z.ZodOptional<z.ZodNumber>;
151
162
  prompt_eval_count: z.ZodNumber;
152
163
  eval_count: z.ZodNumber;
153
164
  eval_duration: z.ZodNumber;
@@ -157,20 +168,20 @@ declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
157
168
  model: string;
158
169
  done: true;
159
170
  total_duration: number;
160
- load_duration: number;
161
171
  prompt_eval_count: number;
162
172
  eval_count: number;
163
173
  eval_duration: number;
174
+ load_duration?: number | undefined;
164
175
  context?: number[] | undefined;
165
176
  }, {
166
177
  response: string;
167
178
  model: string;
168
179
  done: true;
169
180
  total_duration: number;
170
- load_duration: number;
171
181
  prompt_eval_count: number;
172
182
  eval_count: number;
173
183
  eval_duration: number;
184
+ load_duration?: number | undefined;
174
185
  context?: number[] | undefined;
175
186
  }>;
176
187
  export type OllamaTextGenerationResponse = z.infer<typeof ollamaTextGenerationResponseSchema>;
@@ -198,10 +209,10 @@ export declare const OllamaTextGenerationResponseFormat: {
198
209
  model: string;
199
210
  done: true;
200
211
  total_duration: number;
201
- load_duration: number;
202
212
  prompt_eval_count: number;
203
213
  eval_count: number;
204
214
  eval_duration: number;
215
+ load_duration?: number | undefined;
205
216
  context?: number[] | undefined;
206
217
  }>;
207
218
  };
@@ -41,15 +41,47 @@ export class OllamaTextGenerationModel extends AbstractModel {
41
41
  return this.settings.contextWindowSize;
42
42
  }
43
43
  async callAPI(prompt, options) {
44
+ const { responseFormat } = options;
45
+ const api = this.settings.api ?? new OllamaApiConfiguration();
46
+ const abortSignal = options.run?.abortSignal;
44
47
  return callWithRetryAndThrottle({
45
- retry: this.settings.api?.retry,
46
- throttle: this.settings.api?.throttle,
47
- call: async () => callOllamaTextGenerationAPI({
48
- ...this.settings,
49
- // other
50
- abortSignal: options.run?.abortSignal,
51
- prompt,
52
- responseFormat: options.responseFormat,
48
+ retry: api.retry,
49
+ throttle: api.throttle,
50
+ call: async () => postJsonToApi({
51
+ url: api.assembleUrl(`/api/generate`),
52
+ headers: api.headers,
53
+ body: {
54
+ stream: responseFormat.stream,
55
+ model: this.settings.model,
56
+ prompt: prompt.prompt,
57
+ images: prompt.images,
58
+ format: this.settings.format,
59
+ options: {
60
+ mirostat: this.settings.mirostat,
61
+ mirostat_eta: this.settings.mirostatEta,
62
+ mirostat_tau: this.settings.mirostatTau,
63
+ num_ctx: this.settings.contextWindowSize,
64
+ num_gpu: this.settings.numGpu,
65
+ num_gqa: this.settings.numGqa,
66
+ num_predict: this.settings.maxCompletionTokens,
67
+ num_threads: this.settings.numThreads,
68
+ repeat_last_n: this.settings.repeatLastN,
69
+ repeat_penalty: this.settings.repeatPenalty,
70
+ seed: this.settings.seed,
71
+ stop: this.settings.stopSequences,
72
+ temperature: this.settings.temperature,
73
+ tfs_z: this.settings.tfsZ,
74
+ top_k: this.settings.topK,
75
+ top_p: this.settings.topP,
76
+ },
77
+ system: this.settings.system,
78
+ template: this.settings.template,
79
+ context: this.settings.context,
80
+ raw: this.settings.raw,
81
+ },
82
+ failedResponseHandler: failedOllamaCallResponseHandler,
83
+ successfulResponseHandler: responseFormat.handler,
84
+ abortSignal,
53
85
  }),
54
86
  });
55
87
  }
@@ -60,17 +92,17 @@ export class OllamaTextGenerationModel extends AbstractModel {
60
92
  "contextWindowSize",
61
93
  "temperature",
62
94
  "mirostat",
63
- "mirostat_eta",
64
- "mirostat_tau",
65
- "num_gqa",
66
- "num_gpu",
67
- "num_threads",
68
- "repeat_last_n",
69
- "repeat_penalty",
95
+ "mirostatEta",
96
+ "mirostatTau",
97
+ "numGqa",
98
+ "numGpu",
99
+ "numThreads",
100
+ "repeatLastN",
101
+ "repeatPenalty",
70
102
  "seed",
71
- "tfs_z",
72
- "top_k",
73
- "top_p",
103
+ "tfsZ",
104
+ "topK",
105
+ "topP",
74
106
  "system",
75
107
  "template",
76
108
  "context",
@@ -107,6 +139,14 @@ export class OllamaTextGenerationModel extends AbstractModel {
107
139
  template: promptTemplate,
108
140
  });
109
141
  }
142
+ withTextPrompt() {
143
+ return this.withPromptTemplate({
144
+ format(prompt) {
145
+ return { prompt: prompt };
146
+ },
147
+ stopSequences: [],
148
+ });
149
+ }
110
150
  withPromptTemplate(promptTemplate) {
111
151
  return new PromptTemplateTextStreamingModel({
112
152
  model: this.withSettings({
@@ -127,7 +167,7 @@ const ollamaTextGenerationResponseSchema = z.object({
127
167
  model: z.string(),
128
168
  response: z.string(),
129
169
  total_duration: z.number(),
130
- load_duration: z.number(),
170
+ load_duration: z.number().optional(),
131
171
  prompt_eval_count: z.number(),
132
172
  eval_count: z.number(),
133
173
  eval_duration: z.number(),
@@ -145,7 +185,7 @@ const ollamaTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("do
145
185
  model: z.string(),
146
186
  created_at: z.string(),
147
187
  total_duration: z.number(),
148
- load_duration: z.number(),
188
+ load_duration: z.number().optional(),
149
189
  sample_count: z.number().optional(),
150
190
  sample_duration: z.number().optional(),
151
191
  prompt_eval_count: z.number(),
@@ -155,43 +195,6 @@ const ollamaTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("do
155
195
  context: z.array(z.number()).optional(),
156
196
  }),
157
197
  ]));
158
- async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, format, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, raw, }) {
159
- return postJsonToApi({
160
- url: api.assembleUrl(`/api/generate`),
161
- headers: api.headers,
162
- body: {
163
- stream: responseFormat.stream,
164
- model,
165
- prompt,
166
- format,
167
- options: {
168
- mirostat,
169
- mirostat_eta,
170
- mirostat_tau,
171
- num_ctx: contextWindowSize,
172
- num_gpu,
173
- num_gqa,
174
- num_predict: maxCompletionTokens,
175
- num_threads,
176
- repeat_last_n,
177
- repeat_penalty,
178
- seed,
179
- stop: stopSequences,
180
- temperature,
181
- tfs_z,
182
- top_k,
183
- top_p,
184
- },
185
- system,
186
- template,
187
- context,
188
- raw,
189
- },
190
- failedResponseHandler: failedOllamaCallResponseHandler,
191
- successfulResponseHandler: responseFormat.handler,
192
- abortSignal,
193
- });
194
- }
195
198
  async function createOllamaFullDeltaIterableQueue(stream) {
196
199
  const queue = new AsyncQueue();
197
200
  let accumulatedText = "";
@@ -36,7 +36,7 @@ describe("generateText", () => {
36
36
  };
37
37
  const result = await (0, generateText_js_1.generateText)(new OllamaTextGenerationModel_js_1.OllamaTextGenerationModel({
38
38
  model: "test-model",
39
- }), "test prompt");
39
+ }).withTextPrompt(), "test prompt");
40
40
  expect(result).toEqual("test response");
41
41
  });
42
42
  it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
@@ -52,7 +52,7 @@ describe("generateText", () => {
52
52
  retry: (0, retryNever_js_1.retryNever)(),
53
53
  }),
54
54
  model: "test-model",
55
- }), "test prompt");
55
+ }).withTextPrompt(), "test prompt");
56
56
  (0, assert_1.fail)("Should have thrown ApiCallError");
57
57
  }
58
58
  catch (expectedError) {
@@ -34,7 +34,7 @@ describe("generateText", () => {
34
34
  };
35
35
  const result = await generateText(new OllamaTextGenerationModel({
36
36
  model: "test-model",
37
- }), "test prompt");
37
+ }).withTextPrompt(), "test prompt");
38
38
  expect(result).toEqual("test response");
39
39
  });
40
40
  it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
@@ -50,7 +50,7 @@ describe("generateText", () => {
50
50
  retry: retryNever(),
51
51
  }),
52
52
  model: "test-model",
53
- }), "test prompt");
53
+ }).withTextPrompt(), "test prompt");
54
54
  fail("Should have thrown ApiCallError");
55
55
  }
56
56
  catch (expectedError) {
@@ -156,13 +156,13 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
156
156
  };
157
157
  model: string;
158
158
  id: string;
159
+ created: number;
159
160
  choices: {
160
161
  text: string;
161
162
  index: number;
162
163
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
163
164
  logprobs?: any;
164
165
  }[];
165
- created: number;
166
166
  system_fingerprint?: string | undefined;
167
167
  };
168
168
  text: string;
@@ -231,13 +231,13 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
231
231
  };
232
232
  model: string;
233
233
  id: string;
234
+ created: number;
234
235
  choices: {
235
236
  text: string;
236
237
  index: number;
237
238
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
238
239
  logprobs?: any;
239
240
  }[];
240
- created: number;
241
241
  system_fingerprint?: string | undefined;
242
242
  }, {
243
243
  object: "text_completion";
@@ -248,13 +248,13 @@ declare const OpenAICompletionResponseSchema: z.ZodObject<{
248
248
  };
249
249
  model: string;
250
250
  id: string;
251
+ created: number;
251
252
  choices: {
252
253
  text: string;
253
254
  index: number;
254
255
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
255
256
  logprobs?: any;
256
257
  }[];
257
- created: number;
258
258
  system_fingerprint?: string | undefined;
259
259
  }>;
260
260
  export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
@@ -277,13 +277,13 @@ export declare const OpenAITextResponseFormat: {
277
277
  };
278
278
  model: string;
279
279
  id: string;
280
+ created: number;
280
281
  choices: {
281
282
  text: string;
282
283
  index: number;
283
284
  finish_reason?: "length" | "stop" | "content_filter" | null | undefined;
284
285
  logprobs?: any;
285
286
  }[];
286
- created: number;
287
287
  system_fingerprint?: string | undefined;
288
288
  }>;
289
289
  };
@@ -2,9 +2,8 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.failedOpenAICallResponseHandler = void 0;
4
4
  const zod_1 = require("zod");
5
- const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
5
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
6
6
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
7
  const openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
8
  error: zod_1.z.object({
10
9
  message: zod_1.z.string(),
@@ -13,35 +12,11 @@ const openAIErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
13
12
  code: zod_1.z.string().nullable(),
14
13
  }),
15
14
  }));
16
- const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
17
- const responseBody = await response.text();
18
- // resilient parsing in case the response is not JSON or does not match the schema:
19
- try {
20
- const parsedError = (0, parseJSON_js_1.parseJSON)({
21
- text: responseBody,
22
- schema: openAIErrorDataSchema,
23
- });
24
- return new ApiCallError_js_1.ApiCallError({
25
- message: parsedError.error.message,
26
- url,
27
- requestBodyValues,
28
- statusCode: response.status,
29
- responseBody,
30
- data: parsedError,
31
- isRetryable: (response.status === 429 &&
32
- // insufficient_quota is also reported as a 429, but it's not retryable:
33
- parsedError?.error.type !== "insufficient_quota") ||
34
- response.status >= 500,
35
- });
36
- }
37
- catch (parseError) {
38
- return new ApiCallError_js_1.ApiCallError({
39
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
40
- url,
41
- requestBodyValues,
42
- statusCode: response.status,
43
- responseBody,
44
- });
45
- }
46
- };
47
- exports.failedOpenAICallResponseHandler = failedOpenAICallResponseHandler;
15
+ exports.failedOpenAICallResponseHandler = (0, postToApi_js_1.createJsonErrorResponseHandler)({
16
+ errorSchema: openAIErrorDataSchema,
17
+ errorToMessage: (error) => error.error.message,
18
+ isRetryable: (error, response) => (response.status === 429 &&
19
+ // insufficient_quota is also reported as a 429, but it's not retryable:
20
+ error.error.type !== "insufficient_quota") ||
21
+ response.status >= 500,
22
+ });
@@ -1,5 +1,3 @@
1
- import { ApiCallError } from "../../core/api/ApiCallError.js";
2
- import { ResponseHandler } from "../../core/api/postToApi.js";
3
1
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
2
  declare const openAIErrorDataSchema: ZodSchema<{
5
3
  error: {
@@ -10,5 +8,5 @@ declare const openAIErrorDataSchema: ZodSchema<{
10
8
  };
11
9
  }>;
12
10
  export type OpenAIErrorData = (typeof openAIErrorDataSchema)["_type"];
13
- export declare const failedOpenAICallResponseHandler: ResponseHandler<ApiCallError>;
11
+ export declare const failedOpenAICallResponseHandler: import("../../core/api/postToApi.js").ResponseHandler<import("../../index.js").ApiCallError>;
14
12
  export {};
@@ -1,7 +1,6 @@
1
1
  import { z } from "zod";
2
- import { ApiCallError } from "../../core/api/ApiCallError.js";
2
+ import { createJsonErrorResponseHandler } from "../../core/api/postToApi.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- import { parseJSON } from "../../core/schema/parseJSON.js";
5
4
  const openAIErrorDataSchema = new ZodSchema(z.object({
6
5
  error: z.object({
7
6
  message: z.string(),
@@ -10,34 +9,11 @@ const openAIErrorDataSchema = new ZodSchema(z.object({
10
9
  code: z.string().nullable(),
11
10
  }),
12
11
  }));
13
- export const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
14
- const responseBody = await response.text();
15
- // resilient parsing in case the response is not JSON or does not match the schema:
16
- try {
17
- const parsedError = parseJSON({
18
- text: responseBody,
19
- schema: openAIErrorDataSchema,
20
- });
21
- return new ApiCallError({
22
- message: parsedError.error.message,
23
- url,
24
- requestBodyValues,
25
- statusCode: response.status,
26
- responseBody,
27
- data: parsedError,
28
- isRetryable: (response.status === 429 &&
29
- // insufficient_quota is also reported as a 429, but it's not retryable:
30
- parsedError?.error.type !== "insufficient_quota") ||
31
- response.status >= 500,
32
- });
33
- }
34
- catch (parseError) {
35
- return new ApiCallError({
36
- message: responseBody.trim() !== "" ? responseBody : response.statusText,
37
- url,
38
- requestBodyValues,
39
- statusCode: response.status,
40
- responseBody,
41
- });
42
- }
43
- };
12
+ export const failedOpenAICallResponseHandler = createJsonErrorResponseHandler({
13
+ errorSchema: openAIErrorDataSchema,
14
+ errorToMessage: (error) => error.error.message,
15
+ isRetryable: (error, response) => (response.status === 429 &&
16
+ // insufficient_quota is also reported as a 429, but it's not retryable:
17
+ error.error.type !== "insufficient_quota") ||
18
+ response.status >= 500,
19
+ });
@@ -74,6 +74,7 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
74
74
  };
75
75
  model: string;
76
76
  id: string;
77
+ created: number;
77
78
  choices: {
78
79
  message: {
79
80
  role: "assistant";
@@ -95,7 +96,6 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
95
96
  logprobs?: any;
96
97
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
97
98
  }[];
98
- created: number;
99
99
  system_fingerprint?: string | null | undefined;
100
100
  };
101
101
  text: string;
@@ -116,6 +116,7 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
116
116
  };
117
117
  model: string;
118
118
  id: string;
119
+ created: number;
119
120
  choices: {
120
121
  message: {
121
122
  role: "assistant";
@@ -137,7 +138,6 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
137
138
  logprobs?: any;
138
139
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
139
140
  }[];
140
- created: number;
141
141
  system_fingerprint?: string | null | undefined;
142
142
  };
143
143
  toolCall: {
@@ -160,6 +160,7 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
160
160
  };
161
161
  model: string;
162
162
  id: string;
163
+ created: number;
163
164
  choices: {
164
165
  message: {
165
166
  role: "assistant";
@@ -181,7 +182,6 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
181
182
  logprobs?: any;
182
183
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
183
184
  }[];
184
- created: number;
185
185
  system_fingerprint?: string | null | undefined;
186
186
  };
187
187
  text: string | null;
@@ -347,6 +347,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
347
347
  };
348
348
  model: string;
349
349
  id: string;
350
+ created: number;
350
351
  choices: {
351
352
  message: {
352
353
  role: "assistant";
@@ -368,7 +369,6 @@ declare const openAIChatResponseSchema: z.ZodObject<{
368
369
  logprobs?: any;
369
370
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
370
371
  }[];
371
- created: number;
372
372
  system_fingerprint?: string | null | undefined;
373
373
  }, {
374
374
  object: "chat.completion";
@@ -379,6 +379,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
379
379
  };
380
380
  model: string;
381
381
  id: string;
382
+ created: number;
382
383
  choices: {
383
384
  message: {
384
385
  role: "assistant";
@@ -400,7 +401,6 @@ declare const openAIChatResponseSchema: z.ZodObject<{
400
401
  logprobs?: any;
401
402
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
402
403
  }[];
403
- created: number;
404
404
  system_fingerprint?: string | null | undefined;
405
405
  }>;
406
406
  export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
@@ -423,6 +423,7 @@ export declare const OpenAIChatResponseFormat: {
423
423
  };
424
424
  model: string;
425
425
  id: string;
426
+ created: number;
426
427
  choices: {
427
428
  message: {
428
429
  role: "assistant";
@@ -444,7 +445,6 @@ export declare const OpenAIChatResponseFormat: {
444
445
  logprobs?: any;
445
446
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
446
447
  }[];
447
- created: number;
448
448
  system_fingerprint?: string | null | undefined;
449
449
  }>;
450
450
  };
@@ -52,6 +52,7 @@ OpenAIChatSettings> {
52
52
  };
53
53
  model: string;
54
54
  id: string;
55
+ created: number;
55
56
  choices: {
56
57
  message: {
57
58
  role: "assistant";
@@ -73,7 +74,6 @@ OpenAIChatSettings> {
73
74
  logprobs?: any;
74
75
  finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
75
76
  }[];
76
- created: number;
77
77
  system_fingerprint?: string | null | undefined;
78
78
  };
79
79
  valueText: string;