modelfusion 0.1.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +16 -0
  2. package/model-provider/cohere/CohereError.cjs +11 -0
  3. package/model-provider/cohere/CohereError.js +11 -0
  4. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -1
  5. package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -1
  6. package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -1
  7. package/model-provider/cohere/CohereTextGenerationModel.js +3 -1
  8. package/model-provider/cohere/CohereTokenizer.cjs +6 -2
  9. package/model-provider/cohere/CohereTokenizer.js +6 -2
  10. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -1
  11. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -1
  12. package/model-provider/openai/OpenAIError.cjs +21 -9
  13. package/model-provider/openai/OpenAIError.d.ts +3 -3
  14. package/model-provider/openai/OpenAIError.js +21 -9
  15. package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -1
  16. package/model-provider/openai/OpenAIImageGenerationModel.js +3 -1
  17. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -1
  18. package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -1
  19. package/model-provider/openai/OpenAITextGenerationModel.cjs +5 -2
  20. package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -0
  21. package/model-provider/openai/OpenAITextGenerationModel.js +5 -2
  22. package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -2
  23. package/model-provider/openai/OpenAITranscriptionModel.js +3 -2
  24. package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -2
  25. package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -0
  26. package/model-provider/openai/chat/OpenAIChatModel.js +5 -2
  27. package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -1
  28. package/model-provider/stability/StabilityImageGenerationModel.js +3 -1
  29. package/package.json +3 -3
  30. package/prompt/AlpacaPromptMapping.cjs +33 -0
  31. package/prompt/AlpacaPromptMapping.d.ts +11 -0
  32. package/prompt/AlpacaPromptMapping.js +29 -0
  33. package/prompt/InstructionPrompt.d.ts +13 -1
  34. package/prompt/Llama2PromptMapping.cjs +1 -1
  35. package/prompt/Llama2PromptMapping.js +1 -1
  36. package/prompt/OpenAIChatPromptMapping.cjs +6 -0
  37. package/prompt/OpenAIChatPromptMapping.js +6 -0
  38. package/prompt/TextPromptMapping.cjs +11 -3
  39. package/prompt/TextPromptMapping.js +11 -3
  40. package/prompt/VicunaPromptMapping.cjs +55 -0
  41. package/prompt/VicunaPromptMapping.d.ts +16 -0
  42. package/prompt/VicunaPromptMapping.js +51 -0
  43. package/prompt/index.cjs +2 -0
  44. package/prompt/index.d.ts +2 -0
  45. package/prompt/index.js +2 -0
  46. package/util/api/postToApi.cjs +6 -11
  47. package/util/api/postToApi.d.ts +4 -5
  48. package/util/api/postToApi.js +6 -11
  49. package/util/api/retryWithExponentialBackoff.cjs +1 -1
  50. package/util/api/retryWithExponentialBackoff.js +1 -1
package/README.md CHANGED
@@ -377,6 +377,22 @@ const { chunks } = await retrieveTextChunks(
377
377
  - [Memory](https://modelfusion.dev/integration/vector-index/memory)
378
378
  - [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
379
379
 
380
+ ### Observability
381
+
382
+ - [Helicone](https://modelfusion.dev/integration/observability/helicone)
383
+
384
+ ### Prompt Formats
385
+
386
+ Use higher level prompts that are mapped into model specific prompt formats.
387
+
388
+ | Prompt Format | Instruction Prompt | Chat Prompt |
389
+ | ------------- | ------------------ | ----------- |
390
+ | OpenAI Chat | ✅ | ✅ |
391
+ | Llama 2 | ✅ | ✅ |
392
+ | Alpaca | ✅ | ❌ |
393
+ | Vicuna | ❌ | ✅ |
394
+ | Generic Text | ✅ | ✅ |
395
+
380
396
  ## Documentation
381
397
 
382
398
  - [Guide](https://modelfusion.dev/guide)
@@ -25,6 +25,17 @@ class CohereError extends ApiCallError_js_1.ApiCallError {
25
25
  exports.CohereError = CohereError;
26
26
  const failedCohereCallResponseHandler = async ({ response, url, requestBodyValues }) => {
27
27
  const responseBody = await response.text();
28
+ // For some errors, the body of Cohere responses is empty:
29
+ if (responseBody.trim() === "") {
30
+ return new CohereError({
31
+ url,
32
+ requestBodyValues,
33
+ statusCode: response.status,
34
+ data: {
35
+ message: response.statusText,
36
+ },
37
+ });
38
+ }
28
39
  const parsedError = exports.cohereErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
29
40
  return new CohereError({
30
41
  url,
@@ -18,6 +18,17 @@ export class CohereError extends ApiCallError {
18
18
  }
19
19
  export const failedCohereCallResponseHandler = async ({ response, url, requestBodyValues }) => {
20
20
  const responseBody = await response.text();
21
+ // For some errors, the body of Cohere responses is empty:
22
+ if (responseBody.trim() === "") {
23
+ return new CohereError({
24
+ url,
25
+ requestBodyValues,
26
+ statusCode: response.status,
27
+ data: {
28
+ message: response.statusText,
29
+ },
30
+ });
31
+ }
21
32
  const parsedError = cohereErrorDataSchema.parse(SecureJSON.parse(responseBody));
22
33
  return new CohereError({
23
34
  url,
@@ -159,7 +159,9 @@ const cohereTextEmbeddingResponseSchema = zod_1.default.object({
159
159
  async function callCohereEmbeddingAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, texts, truncate, }) {
160
160
  return (0, postToApi_js_1.postJsonToApi)({
161
161
  url: `${baseUrl}/embed`,
162
- apiKey,
162
+ headers: {
163
+ Authorization: `Bearer ${apiKey}`,
164
+ },
163
165
  body: {
164
166
  model,
165
167
  texts,
@@ -152,7 +152,9 @@ const cohereTextEmbeddingResponseSchema = z.object({
152
152
  async function callCohereEmbeddingAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, texts, truncate, }) {
153
153
  return postJsonToApi({
154
154
  url: `${baseUrl}/embed`,
155
- apiKey,
155
+ headers: {
156
+ Authorization: `Bearer ${apiKey}`,
157
+ },
156
158
  body: {
157
159
  model,
158
160
  texts,
@@ -179,7 +179,9 @@ const cohereTextGenerationResponseSchema = zod_1.z.object({
179
179
  async function callCohereTextGenerationAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, responseFormat, apiKey, model, prompt, numGenerations, maxTokens, temperature, k, p, frequencyPenalty, presencePenalty, endSequences, stopSequences, returnLikelihoods, logitBias, truncate, }) {
180
180
  return (0, postToApi_js_1.postJsonToApi)({
181
181
  url: `${baseUrl}/generate`,
182
- apiKey,
182
+ headers: {
183
+ Authorization: `Bearer ${apiKey}`,
184
+ },
183
185
  body: {
184
186
  stream: responseFormat.stream,
185
187
  model,
@@ -172,7 +172,9 @@ const cohereTextGenerationResponseSchema = z.object({
172
172
  async function callCohereTextGenerationAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, responseFormat, apiKey, model, prompt, numGenerations, maxTokens, temperature, k, p, frequencyPenalty, presencePenalty, endSequences, stopSequences, returnLikelihoods, logitBias, truncate, }) {
173
173
  return postJsonToApi({
174
174
  url: `${baseUrl}/generate`,
175
- apiKey,
175
+ headers: {
176
+ Authorization: `Bearer ${apiKey}`,
177
+ },
176
178
  body: {
177
179
  stream: responseFormat.stream,
178
180
  model,
@@ -97,7 +97,9 @@ const cohereDetokenizationResponseSchema = zod_1.default.object({
97
97
  async function callCohereDetokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, tokens, }) {
98
98
  return (0, postToApi_js_1.postJsonToApi)({
99
99
  url: `${baseUrl}/detokenize`,
100
- apiKey,
100
+ headers: {
101
+ Authorization: `Bearer ${apiKey}`,
102
+ },
101
103
  body: {
102
104
  model,
103
105
  tokens,
@@ -124,7 +126,9 @@ const cohereTokenizationResponseSchema = zod_1.default.object({
124
126
  async function callCohereTokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, text, }) {
125
127
  return (0, postToApi_js_1.postJsonToApi)({
126
128
  url: `${baseUrl}/tokenize`,
127
- apiKey,
129
+ headers: {
130
+ Authorization: `Bearer ${apiKey}`,
131
+ },
128
132
  body: {
129
133
  model,
130
134
  text,
@@ -90,7 +90,9 @@ const cohereDetokenizationResponseSchema = z.object({
90
90
  async function callCohereDetokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, tokens, }) {
91
91
  return postJsonToApi({
92
92
  url: `${baseUrl}/detokenize`,
93
- apiKey,
93
+ headers: {
94
+ Authorization: `Bearer ${apiKey}`,
95
+ },
94
96
  body: {
95
97
  model,
96
98
  tokens,
@@ -117,7 +119,9 @@ const cohereTokenizationResponseSchema = z.object({
117
119
  async function callCohereTokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, text, }) {
118
120
  return postJsonToApi({
119
121
  url: `${baseUrl}/tokenize`,
120
- apiKey,
122
+ headers: {
123
+ Authorization: `Bearer ${apiKey}`,
124
+ },
121
125
  body: {
122
126
  model,
123
127
  text,
@@ -149,7 +149,9 @@ const huggingFaceTextGenerationResponseSchema = zod_1.default.array(zod_1.defaul
149
149
  async function callHuggingFaceTextGenerationAPI({ baseUrl = "https://api-inference.huggingface.co/models", abortSignal, apiKey, model, inputs, topK, topP, temperature, repetitionPenalty, maxNewTokens, maxTime, numReturnSequences, doSample, options, }) {
150
150
  return (0, postToApi_js_1.postJsonToApi)({
151
151
  url: `${baseUrl}/${model}`,
152
- apiKey,
152
+ headers: {
153
+ Authorization: `Bearer ${apiKey}`,
154
+ },
153
155
  body: {
154
156
  inputs,
155
157
  top_k: topK,
@@ -142,7 +142,9 @@ const huggingFaceTextGenerationResponseSchema = z.array(z.object({
142
142
  async function callHuggingFaceTextGenerationAPI({ baseUrl = "https://api-inference.huggingface.co/models", abortSignal, apiKey, model, inputs, topK, topP, temperature, repetitionPenalty, maxNewTokens, maxTime, numReturnSequences, doSample, options, }) {
143
143
  return postJsonToApi({
144
144
  url: `${baseUrl}/${model}`,
145
- apiKey,
145
+ headers: {
146
+ Authorization: `Bearer ${apiKey}`,
147
+ },
146
148
  body: {
147
149
  inputs,
148
150
  top_k: topK,
@@ -16,7 +16,7 @@ exports.openAIErrorDataSchema = zod_1.z.object({
16
16
  }),
17
17
  });
18
18
  class OpenAIError extends ApiCallError_js_1.ApiCallError {
19
- constructor({ data, statusCode, url, requestBodyValues, message = data.error.message, }) {
19
+ constructor({ data, statusCode, url, requestBodyValues, message, }) {
20
20
  super({
21
21
  message,
22
22
  statusCode,
@@ -24,7 +24,7 @@ class OpenAIError extends ApiCallError_js_1.ApiCallError {
24
24
  url,
25
25
  isRetryable: (statusCode === 429 &&
26
26
  // insufficient_quota is also reported as a 429, but it's not retryable:
27
- data.error.type !== "insufficient_quota") ||
27
+ data?.error.type !== "insufficient_quota") ||
28
28
  statusCode >= 500,
29
29
  });
30
30
  Object.defineProperty(this, "data", {
@@ -39,12 +39,24 @@ class OpenAIError extends ApiCallError_js_1.ApiCallError {
39
39
  exports.OpenAIError = OpenAIError;
40
40
  const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
41
41
  const responseBody = await response.text();
42
- const parsedError = exports.openAIErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
43
- return new OpenAIError({
44
- url,
45
- requestBodyValues,
46
- statusCode: response.status,
47
- data: parsedError,
48
- });
42
+ // resilient parsing in case the response is not JSON or does not match the schema:
43
+ try {
44
+ const parsedError = exports.openAIErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
45
+ return new OpenAIError({
46
+ url,
47
+ requestBodyValues,
48
+ statusCode: response.status,
49
+ message: parsedError.error.message,
50
+ data: parsedError,
51
+ });
52
+ }
53
+ catch (parseError) {
54
+ return new OpenAIError({
55
+ url,
56
+ requestBodyValues,
57
+ statusCode: response.status,
58
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
59
+ });
60
+ }
49
61
  };
50
62
  exports.failedOpenAICallResponseHandler = failedOpenAICallResponseHandler;
@@ -35,13 +35,13 @@ export declare const openAIErrorDataSchema: z.ZodObject<{
35
35
  }>;
36
36
  export type OpenAIErrorData = z.infer<typeof openAIErrorDataSchema>;
37
37
  export declare class OpenAIError extends ApiCallError {
38
- readonly data: OpenAIErrorData;
38
+ readonly data?: OpenAIErrorData;
39
39
  constructor({ data, statusCode, url, requestBodyValues, message, }: {
40
- message?: string;
40
+ message: string;
41
41
  statusCode: number;
42
42
  url: string;
43
43
  requestBodyValues: unknown;
44
- data: OpenAIErrorData;
44
+ data?: OpenAIErrorData;
45
45
  });
46
46
  }
47
47
  export declare const failedOpenAICallResponseHandler: ResponseHandler<ApiCallError>;
@@ -10,7 +10,7 @@ export const openAIErrorDataSchema = z.object({
10
10
  }),
11
11
  });
12
12
  export class OpenAIError extends ApiCallError {
13
- constructor({ data, statusCode, url, requestBodyValues, message = data.error.message, }) {
13
+ constructor({ data, statusCode, url, requestBodyValues, message, }) {
14
14
  super({
15
15
  message,
16
16
  statusCode,
@@ -18,7 +18,7 @@ export class OpenAIError extends ApiCallError {
18
18
  url,
19
19
  isRetryable: (statusCode === 429 &&
20
20
  // insufficient_quota is also reported as a 429, but it's not retryable:
21
- data.error.type !== "insufficient_quota") ||
21
+ data?.error.type !== "insufficient_quota") ||
22
22
  statusCode >= 500,
23
23
  });
24
24
  Object.defineProperty(this, "data", {
@@ -32,11 +32,23 @@ export class OpenAIError extends ApiCallError {
32
32
  }
33
33
  export const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
34
34
  const responseBody = await response.text();
35
- const parsedError = openAIErrorDataSchema.parse(SecureJSON.parse(responseBody));
36
- return new OpenAIError({
37
- url,
38
- requestBodyValues,
39
- statusCode: response.status,
40
- data: parsedError,
41
- });
35
+ // resilient parsing in case the response is not JSON or does not match the schema:
36
+ try {
37
+ const parsedError = openAIErrorDataSchema.parse(SecureJSON.parse(responseBody));
38
+ return new OpenAIError({
39
+ url,
40
+ requestBodyValues,
41
+ statusCode: response.status,
42
+ message: parsedError.error.message,
43
+ data: parsedError,
44
+ });
45
+ }
46
+ catch (parseError) {
47
+ return new OpenAIError({
48
+ url,
49
+ requestBodyValues,
50
+ statusCode: response.status,
51
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
52
+ });
53
+ }
42
54
  };
@@ -109,7 +109,9 @@ exports.OpenAIImageGenerationResponseFormat = {
109
109
  async function callOpenAIImageGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, prompt, n, size, responseFormat, user, }) {
110
110
  return (0, postToApi_js_1.postJsonToApi)({
111
111
  url: `${baseUrl}/images/generations`,
112
- apiKey,
112
+ headers: {
113
+ Authorization: `Bearer ${apiKey}`,
114
+ },
113
115
  body: {
114
116
  prompt,
115
117
  n,
@@ -104,7 +104,9 @@ export const OpenAIImageGenerationResponseFormat = {
104
104
  async function callOpenAIImageGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, prompt, n, size, responseFormat, user, }) {
105
105
  return postJsonToApi({
106
106
  url: `${baseUrl}/images/generations`,
107
- apiKey,
107
+ headers: {
108
+ Authorization: `Bearer ${apiKey}`,
109
+ },
108
110
  body: {
109
111
  prompt,
110
112
  n,
@@ -158,7 +158,9 @@ const openAITextEmbeddingResponseSchema = zod_1.default.object({
158
158
  async function callOpenAITextEmbeddingAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, model, input, user, }) {
159
159
  return (0, postToApi_js_1.postJsonToApi)({
160
160
  url: `${baseUrl}/embeddings`,
161
- apiKey,
161
+ headers: {
162
+ Authorization: `Bearer ${apiKey}`,
163
+ },
162
164
  body: {
163
165
  model,
164
166
  input,
@@ -149,7 +149,9 @@ const openAITextEmbeddingResponseSchema = z.object({
149
149
  async function callOpenAITextEmbeddingAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, model, input, user, }) {
150
150
  return postJsonToApi({
151
151
  url: `${baseUrl}/embeddings`,
152
- apiKey,
152
+ headers: {
153
+ Authorization: `Bearer ${apiKey}`,
154
+ },
153
155
  body: {
154
156
  model,
155
157
  input,
@@ -209,10 +209,13 @@ const openAITextGenerationResponseSchema = zod_1.default.object({
209
209
  *
210
210
  * console.log(response.choices[0].text);
211
211
  */
212
- async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
212
+ async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
213
213
  return (0, postToApi_js_1.postJsonToApi)({
214
214
  url: `${baseUrl}/completions`,
215
- apiKey,
215
+ headers: {
216
+ ...headers,
217
+ Authorization: `Bearer ${apiKey}`,
218
+ },
216
219
  body: {
217
220
  stream: responseFormat.stream,
218
221
  model,
@@ -65,6 +65,7 @@ export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, re
65
65
  }) => number;
66
66
  export interface OpenAITextGenerationModelSettings extends TextGenerationModelSettings {
67
67
  model: OpenAITextGenerationModelType;
68
+ headers?: Record<string, string>;
68
69
  baseUrl?: string;
69
70
  apiKey?: string;
70
71
  retry?: RetryFunction;
@@ -200,10 +200,13 @@ const openAITextGenerationResponseSchema = z.object({
200
200
  *
201
201
  * console.log(response.choices[0].text);
202
202
  */
203
- async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
203
+ async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
204
204
  return postJsonToApi({
205
205
  url: `${baseUrl}/completions`,
206
- apiKey,
206
+ headers: {
207
+ ...headers,
208
+ Authorization: `Bearer ${apiKey}`,
209
+ },
207
210
  body: {
208
211
  stream: responseFormat.stream,
209
212
  model,
@@ -132,8 +132,9 @@ async function callOpenAITranscriptionAPI({ baseUrl = "https://api.openai.com/v1
132
132
  }
133
133
  return (0, postToApi_js_1.postToApi)({
134
134
  url: `${baseUrl}/audio/transcriptions`,
135
- apiKey,
136
- contentType: null,
135
+ headers: {
136
+ Authorization: `Bearer ${apiKey}`,
137
+ },
137
138
  body: {
138
139
  content: formData,
139
140
  values: {
@@ -124,8 +124,9 @@ async function callOpenAITranscriptionAPI({ baseUrl = "https://api.openai.com/v1
124
124
  }
125
125
  return postToApi({
126
126
  url: `${baseUrl}/audio/transcriptions`,
127
- apiKey,
128
- contentType: null,
127
+ headers: {
128
+ Authorization: `Bearer ${apiKey}`,
129
+ },
129
130
  body: {
130
131
  content: formData,
131
132
  values: {
@@ -246,10 +246,13 @@ const openAIChatResponseSchema = zod_1.default.object({
246
246
  total_tokens: zod_1.default.number(),
247
247
  }),
248
248
  });
249
- async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
249
+ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
250
250
  return (0, postToApi_js_1.postJsonToApi)({
251
251
  url: `${baseUrl}/chat/completions`,
252
- apiKey,
252
+ headers: {
253
+ ...headers,
254
+ Authorization: `Bearer ${apiKey}`,
255
+ },
253
256
  body: {
254
257
  stream: responseFormat.stream,
255
258
  model,
@@ -77,6 +77,7 @@ export declare const calculateOpenAIChatCostInMillicents: ({ model, response, }:
77
77
  }) => number;
78
78
  export interface OpenAIChatCallSettings {
79
79
  model: OpenAIChatModelType;
80
+ headers?: Record<string, string>;
80
81
  functions?: Array<{
81
82
  name: string;
82
83
  description?: string;
@@ -237,10 +237,13 @@ const openAIChatResponseSchema = z.object({
237
237
  total_tokens: z.number(),
238
238
  }),
239
239
  });
240
- async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
240
+ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
241
241
  return postJsonToApi({
242
242
  url: `${baseUrl}/chat/completions`,
243
- apiKey,
243
+ headers: {
244
+ ...headers,
245
+ Authorization: `Bearer ${apiKey}`,
246
+ },
244
247
  body: {
245
248
  stream: responseFormat.stream,
246
249
  model,
@@ -113,7 +113,9 @@ const stabilityImageGenerationResponseSchema = zod_1.z.object({
113
113
  async function callStabilityImageGenerationAPI({ baseUrl = "https://api.stability.ai/v1", abortSignal, apiKey, engineId, height, width, textPrompts, cfgScale, clipGuidancePreset, sampler, samples, seed, steps, stylePreset, }) {
114
114
  return (0, postToApi_js_1.postJsonToApi)({
115
115
  url: `${baseUrl}/generation/${engineId}/text-to-image`,
116
- apiKey,
116
+ headers: {
117
+ Authorization: `Bearer ${apiKey}`,
118
+ },
117
119
  body: {
118
120
  height,
119
121
  width,
@@ -109,7 +109,9 @@ const stabilityImageGenerationResponseSchema = z.object({
109
109
  async function callStabilityImageGenerationAPI({ baseUrl = "https://api.stability.ai/v1", abortSignal, apiKey, engineId, height, width, textPrompts, cfgScale, clipGuidancePreset, sampler, samples, seed, steps, stylePreset, }) {
110
110
  return postJsonToApi({
111
111
  url: `${baseUrl}/generation/${engineId}/text-to-image`,
112
- apiKey,
112
+ headers: {
113
+ Authorization: `Bearer ${apiKey}`,
114
+ },
113
115
  body: {
114
116
  height,
115
117
  width,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.1.1",
4
+ "version": "0.3.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -65,10 +65,10 @@
65
65
  "@typescript-eslint/parser": "^6.1.0",
66
66
  "copyfiles": "2.4.1",
67
67
  "eslint": "^8.45.0",
68
- "eslint-config-prettier": "8.9.0",
68
+ "eslint-config-prettier": "8.10.0",
69
69
  "husky": "^8.0.3",
70
70
  "lint-staged": "13.2.3",
71
- "prettier": "3.0.0",
71
+ "prettier": "3.0.1",
72
72
  "rimraf": "5.0.1",
73
73
  "typescript": "5.1.6",
74
74
  "zod": "3.21.4",
@@ -0,0 +1,33 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.InstructionToAlpacaPromptMapping = void 0;
4
+ const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
5
+ const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
6
+ /**
7
+ * Maps an instruction prompt to the Alpaca prompt format.
8
+ *
9
+ * If the instruction has a system prompt, it overrides the default system prompt
10
+ * (which can impact the results, because the model may be trained on the default system prompt).
11
+ *
12
+ * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
13
+ */
14
+ const InstructionToAlpacaPromptMapping = () => ({
15
+ stopTokens: [],
16
+ map: (instruction) => {
17
+ let text = instruction.system ??
18
+ (instruction.input != null
19
+ ? DEFAULT_SYSTEM_PROMPT_INPUT
20
+ : DEFAULT_SYSTEM_PROMPT_NO_INPUT);
21
+ text += "\n\n### Instruction:\n";
22
+ if (instruction.system != null) {
23
+ text += `${instruction.system}\n`;
24
+ }
25
+ text += instruction.instruction;
26
+ if (instruction.input != null) {
27
+ text += `\n\n### Input:\n${instruction.input}`;
28
+ }
29
+ text += "\n\n### Response:\n";
30
+ return text;
31
+ },
32
+ });
33
+ exports.InstructionToAlpacaPromptMapping = InstructionToAlpacaPromptMapping;
@@ -0,0 +1,11 @@
1
+ import { InstructionPrompt } from "./InstructionPrompt.js";
2
+ import { PromptMapping } from "./PromptMapping.js";
3
+ /**
4
+ * Maps an instruction prompt to the Alpaca prompt format.
5
+ *
6
+ * If the instruction has a system prompt, it overrides the default system prompt
7
+ * (which can impact the results, because the model may be trained on the default system prompt).
8
+ *
9
+ * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
10
+ */
11
+ export declare const InstructionToAlpacaPromptMapping: () => PromptMapping<InstructionPrompt, string>;
@@ -0,0 +1,29 @@
1
+ const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
2
+ const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
3
+ /**
4
+ * Maps an instruction prompt to the Alpaca prompt format.
5
+ *
6
+ * If the instruction has a system prompt, it overrides the default system prompt
7
+ * (which can impact the results, because the model may be trained on the default system prompt).
8
+ *
9
+ * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
10
+ */
11
+ export const InstructionToAlpacaPromptMapping = () => ({
12
+ stopTokens: [],
13
+ map: (instruction) => {
14
+ let text = instruction.system ??
15
+ (instruction.input != null
16
+ ? DEFAULT_SYSTEM_PROMPT_INPUT
17
+ : DEFAULT_SYSTEM_PROMPT_NO_INPUT);
18
+ text += "\n\n### Instruction:\n";
19
+ if (instruction.system != null) {
20
+ text += `${instruction.system}\n`;
21
+ }
22
+ text += instruction.instruction;
23
+ if (instruction.input != null) {
24
+ text += `\n\n### Input:\n${instruction.input}`;
25
+ }
26
+ text += "\n\n### Response:\n";
27
+ return text;
28
+ },
29
+ });
@@ -1,7 +1,19 @@
1
1
  /**
2
- * A single instruction prompt. It can contain an optional system message to provide context for the language model.
2
+ * A single instruction prompt. It can contain an optional system message to define the role and behavior of the language model
3
+ * and an optiona input to provide context for the language model.
3
4
  */
4
5
  export type InstructionPrompt = {
6
+ /**
7
+ * Optional system message to provide context for the language model. Note that for some models,
8
+ * changing the system message can impact the results, because the model may be trained on the default system message.
9
+ */
5
10
  system?: string;
11
+ /**
12
+ * The instruction for the model.
13
+ */
6
14
  instruction: string;
15
+ /**
16
+ * Optional additional input or context, e.g. a the content from which information should be extracted.
17
+ */
18
+ input?: string;
7
19
  };
@@ -18,7 +18,7 @@ const InstructionToLlama2PromptMapping = () => ({
18
18
  stopTokens: [END_SEGMENT],
19
19
  map: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
20
20
  ? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
21
- : ""} ${instruction.instruction} ${END_INSTRUCTION}\n`,
21
+ : ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
22
22
  });
23
23
  exports.InstructionToLlama2PromptMapping = InstructionToLlama2PromptMapping;
24
24
  const ChatToLlama2PromptMapping = () => ({
@@ -15,7 +15,7 @@ export const InstructionToLlama2PromptMapping = () => ({
15
15
  stopTokens: [END_SEGMENT],
16
16
  map: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
17
17
  ? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
18
- : ""} ${instruction.instruction} ${END_INSTRUCTION}\n`,
18
+ : ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
19
19
  });
20
20
  export const ChatToLlama2PromptMapping = () => ({
21
21
  map: (chatPrompt) => {
@@ -15,6 +15,12 @@ const InstructionToOpenAIChatPromptMapping = () => ({
15
15
  role: "user",
16
16
  content: instruction.instruction,
17
17
  });
18
+ if (instruction.input != null) {
19
+ messages.push({
20
+ role: "user",
21
+ content: instruction.input,
22
+ });
23
+ }
18
24
  return messages;
19
25
  },
20
26
  stopTokens: [],
@@ -12,6 +12,12 @@ export const InstructionToOpenAIChatPromptMapping = () => ({
12
12
  role: "user",
13
13
  content: instruction.instruction,
14
14
  });
15
+ if (instruction.input != null) {
16
+ messages.push({
17
+ role: "user",
18
+ content: instruction.input,
19
+ });
20
+ }
15
21
  return messages;
16
22
  },
17
23
  stopTokens: [],
@@ -4,9 +4,17 @@ exports.ChatToTextPromptMapping = exports.InstructionToTextPromptMapping = void
4
4
  const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
5
5
  const InstructionToTextPromptMapping = () => ({
6
6
  stopTokens: [],
7
- map: (instruction) => instruction.system != null
8
- ? `${instruction.system}\n\n${instruction.instruction}`
9
- : instruction.instruction,
7
+ map: (instruction) => {
8
+ let text = "";
9
+ if (instruction.system != null) {
10
+ text += `${instruction.system}\n\n`;
11
+ }
12
+ text += instruction.instruction;
13
+ if (instruction.input != null) {
14
+ text += `\n\n${instruction.input}`;
15
+ }
16
+ return text;
17
+ },
10
18
  });
11
19
  exports.InstructionToTextPromptMapping = InstructionToTextPromptMapping;
12
20
  /**
@@ -1,9 +1,17 @@
1
1
  import { validateChatPrompt } from "./chat/validateChatPrompt.js";
2
2
  export const InstructionToTextPromptMapping = () => ({
3
3
  stopTokens: [],
4
- map: (instruction) => instruction.system != null
5
- ? `${instruction.system}\n\n${instruction.instruction}`
6
- : instruction.instruction,
4
+ map: (instruction) => {
5
+ let text = "";
6
+ if (instruction.system != null) {
7
+ text += `${instruction.system}\n\n`;
8
+ }
9
+ text += instruction.instruction;
10
+ if (instruction.input != null) {
11
+ text += `\n\n${instruction.input}`;
12
+ }
13
+ return text;
14
+ },
7
15
  });
8
16
  /**
9
17
  * A mapping from a chat prompt to a text prompt.
@@ -0,0 +1,55 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ChatToVicunaPromptMapping = void 0;
4
+ const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
5
+ const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.";
6
+ /**
7
+ * A mapping from a chat prompt to a Vicuna prompt.
8
+ *
9
+ * Overridding the system message in the first chat message can affect model respones.
10
+ *
11
+ * Vicuna prompt template:
12
+ * ```
13
+ * A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
14
+ *
15
+ * USER: {prompt}
16
+ * ASSISTANT:
17
+ * ```
18
+ */
19
+ const ChatToVicunaPromptMapping = () => ({
20
+ map: (chatPrompt) => {
21
+ (0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
22
+ let text = "";
23
+ for (let i = 0; i < chatPrompt.length; i++) {
24
+ const message = chatPrompt[i];
25
+ // system message:
26
+ if (i === 0 &&
27
+ "system" in message &&
28
+ typeof message.system === "string") {
29
+ text += `${message.system}\n\n`;
30
+ continue;
31
+ }
32
+ // first message was not a system message:
33
+ if (i === 0) {
34
+ text += `${DEFAULT_SYSTEM_PROMPT}\n\n`;
35
+ }
36
+ // user message
37
+ if ("user" in message) {
38
+ text += `USER: ${message.user}\n`;
39
+ continue;
40
+ }
41
+ // ai message:
42
+ if ("ai" in message) {
43
+ text += `ASSISTANT:\n${message.ai}\n`;
44
+ continue;
45
+ }
46
+ // unsupported message:
47
+ throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
48
+ }
49
+ // AI message prefix:
50
+ text += `ASSISTANT: `;
51
+ return text;
52
+ },
53
+ stopTokens: [`\nUSER:`],
54
+ });
55
+ exports.ChatToVicunaPromptMapping = ChatToVicunaPromptMapping;
@@ -0,0 +1,16 @@
1
+ import { PromptMapping } from "./PromptMapping.js";
2
+ import { ChatPrompt } from "./chat/ChatPrompt.js";
3
+ /**
4
+ * A mapping from a chat prompt to a Vicuna prompt.
5
+ *
6
+ * Overridding the system message in the first chat message can affect model respones.
7
+ *
8
+ * Vicuna prompt template:
9
+ * ```
10
+ * A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
11
+ *
12
+ * USER: {prompt}
13
+ * ASSISTANT:
14
+ * ```
15
+ */
16
+ export declare const ChatToVicunaPromptMapping: () => PromptMapping<ChatPrompt, string>;
@@ -0,0 +1,51 @@
1
+ import { validateChatPrompt } from "./chat/validateChatPrompt.js";
2
+ const DEFAULT_SYSTEM_PROMPT = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.";
3
+ /**
4
+ * A mapping from a chat prompt to a Vicuna prompt.
5
+ *
6
+ * Overridding the system message in the first chat message can affect model respones.
7
+ *
8
+ * Vicuna prompt template:
9
+ * ```
10
+ * A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
11
+ *
12
+ * USER: {prompt}
13
+ * ASSISTANT:
14
+ * ```
15
+ */
16
+ export const ChatToVicunaPromptMapping = () => ({
17
+ map: (chatPrompt) => {
18
+ validateChatPrompt(chatPrompt);
19
+ let text = "";
20
+ for (let i = 0; i < chatPrompt.length; i++) {
21
+ const message = chatPrompt[i];
22
+ // system message:
23
+ if (i === 0 &&
24
+ "system" in message &&
25
+ typeof message.system === "string") {
26
+ text += `${message.system}\n\n`;
27
+ continue;
28
+ }
29
+ // first message was not a system message:
30
+ if (i === 0) {
31
+ text += `${DEFAULT_SYSTEM_PROMPT}\n\n`;
32
+ }
33
+ // user message
34
+ if ("user" in message) {
35
+ text += `USER: ${message.user}\n`;
36
+ continue;
37
+ }
38
+ // ai message:
39
+ if ("ai" in message) {
40
+ text += `ASSISTANT:\n${message.ai}\n`;
41
+ continue;
42
+ }
43
+ // unsupported message:
44
+ throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
45
+ }
46
+ // AI message prefix:
47
+ text += `ASSISTANT: `;
48
+ return text;
49
+ },
50
+ stopTokens: [`\nUSER:`],
51
+ });
package/prompt/index.cjs CHANGED
@@ -14,12 +14,14 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
+ __exportStar(require("./AlpacaPromptMapping.cjs"), exports);
17
18
  __exportStar(require("./InstructionPrompt.cjs"), exports);
18
19
  __exportStar(require("./Llama2PromptMapping.cjs"), exports);
19
20
  __exportStar(require("./OpenAIChatPromptMapping.cjs"), exports);
20
21
  __exportStar(require("./PromptMapping.cjs"), exports);
21
22
  __exportStar(require("./PromptMappingTextGenerationModel.cjs"), exports);
22
23
  __exportStar(require("./TextPromptMapping.cjs"), exports);
24
+ __exportStar(require("./VicunaPromptMapping.cjs"), exports);
23
25
  __exportStar(require("./chat/ChatPrompt.cjs"), exports);
24
26
  __exportStar(require("./chat/trimChatPrompt.cjs"), exports);
25
27
  __exportStar(require("./chat/validateChatPrompt.cjs"), exports);
package/prompt/index.d.ts CHANGED
@@ -1,9 +1,11 @@
1
+ export * from "./AlpacaPromptMapping.js";
1
2
  export * from "./InstructionPrompt.js";
2
3
  export * from "./Llama2PromptMapping.js";
3
4
  export * from "./OpenAIChatPromptMapping.js";
4
5
  export * from "./PromptMapping.js";
5
6
  export * from "./PromptMappingTextGenerationModel.js";
6
7
  export * from "./TextPromptMapping.js";
8
+ export * from "./VicunaPromptMapping.js";
7
9
  export * from "./chat/ChatPrompt.js";
8
10
  export * from "./chat/trimChatPrompt.js";
9
11
  export * from "./chat/validateChatPrompt.js";
package/prompt/index.js CHANGED
@@ -1,9 +1,11 @@
1
+ export * from "./AlpacaPromptMapping.js";
1
2
  export * from "./InstructionPrompt.js";
2
3
  export * from "./Llama2PromptMapping.js";
3
4
  export * from "./OpenAIChatPromptMapping.js";
4
5
  export * from "./PromptMapping.js";
5
6
  export * from "./PromptMappingTextGenerationModel.js";
6
7
  export * from "./TextPromptMapping.js";
8
+ export * from "./VicunaPromptMapping.js";
7
9
  export * from "./chat/ChatPrompt.js";
8
10
  export * from "./chat/trimChatPrompt.js";
9
11
  export * from "./chat/validateChatPrompt.js";
@@ -18,10 +18,12 @@ const createJsonResponseHandler = (responseSchema) => async ({ response, url, re
18
18
  exports.createJsonResponseHandler = createJsonResponseHandler;
19
19
  const createTextResponseHandler = () => async ({ response }) => response.text();
20
20
  exports.createTextResponseHandler = createTextResponseHandler;
21
- const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => (0, exports.postToApi)({
21
+ const postJsonToApi = async ({ url, headers, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => (0, exports.postToApi)({
22
22
  url,
23
- apiKey,
24
- contentType: "application/json",
23
+ headers: {
24
+ ...headers,
25
+ "Content-Type": "application/json",
26
+ },
25
27
  body: {
26
28
  content: JSON.stringify(body),
27
29
  values: body,
@@ -31,15 +33,8 @@ const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler, success
31
33
  abortSignal,
32
34
  });
33
35
  exports.postJsonToApi = postJsonToApi;
34
- const postToApi = async ({ url, apiKey, contentType, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
36
+ const postToApi = async ({ url, headers = {}, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
35
37
  try {
36
- const headers = {};
37
- if (apiKey !== undefined) {
38
- headers["Authorization"] = `Bearer ${apiKey}`;
39
- }
40
- if (contentType !== null) {
41
- headers["Content-Type"] = contentType;
42
- }
43
38
  const response = await fetch(url, {
44
39
  method: "POST",
45
40
  headers,
@@ -7,18 +7,17 @@ export type ResponseHandler<T> = (options: {
7
7
  }) => PromiseLike<T>;
8
8
  export declare const createJsonResponseHandler: <T>(responseSchema: z.ZodType<T, z.ZodTypeDef, T>) => ResponseHandler<T>;
9
9
  export declare const createTextResponseHandler: () => ResponseHandler<string>;
10
- export declare const postJsonToApi: <T>({ url, apiKey, body, failedResponseHandler, successfulResponseHandler, abortSignal, }: {
10
+ export declare const postJsonToApi: <T>({ url, headers, body, failedResponseHandler, successfulResponseHandler, abortSignal, }: {
11
11
  url: string;
12
- apiKey?: string | undefined;
12
+ headers?: Record<string, string> | undefined;
13
13
  body: unknown;
14
14
  failedResponseHandler: ResponseHandler<ApiCallError>;
15
15
  successfulResponseHandler: ResponseHandler<T>;
16
16
  abortSignal?: AbortSignal | undefined;
17
17
  }) => Promise<T>;
18
- export declare const postToApi: <T>({ url, apiKey, contentType, body, successfulResponseHandler, failedResponseHandler, abortSignal, }: {
18
+ export declare const postToApi: <T>({ url, headers, body, successfulResponseHandler, failedResponseHandler, abortSignal, }: {
19
19
  url: string;
20
- apiKey?: string | undefined;
21
- contentType: string | null;
20
+ headers?: Record<string, string> | undefined;
22
21
  body: {
23
22
  content: string | FormData;
24
23
  values: unknown;
@@ -13,10 +13,12 @@ export const createJsonResponseHandler = (responseSchema) => async ({ response,
13
13
  return parsedResult.data;
14
14
  };
15
15
  export const createTextResponseHandler = () => async ({ response }) => response.text();
16
- export const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => postToApi({
16
+ export const postJsonToApi = async ({ url, headers, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => postToApi({
17
17
  url,
18
- apiKey,
19
- contentType: "application/json",
18
+ headers: {
19
+ ...headers,
20
+ "Content-Type": "application/json",
21
+ },
20
22
  body: {
21
23
  content: JSON.stringify(body),
22
24
  values: body,
@@ -25,15 +27,8 @@ export const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler,
25
27
  successfulResponseHandler,
26
28
  abortSignal,
27
29
  });
28
- export const postToApi = async ({ url, apiKey, contentType, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
30
+ export const postToApi = async ({ url, headers = {}, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
29
31
  try {
30
- const headers = {};
31
- if (apiKey !== undefined) {
32
- headers["Authorization"] = `Bearer ${apiKey}`;
33
- }
34
- if (contentType !== null) {
35
- headers["Content-Type"] = contentType;
36
- }
37
32
  const response = await fetch(url, {
38
33
  method: "POST",
39
34
  headers,
@@ -20,7 +20,7 @@ async function _retryWithExponentialBackoff(f, { maxTries, delay, backoffFactor,
20
20
  catch (error) {
21
21
  const newErrors = [...errors, error];
22
22
  const tryNumber = newErrors.length;
23
- if (tryNumber > maxTries) {
23
+ if (tryNumber >= maxTries) {
24
24
  throw new RetryError_js_1.RetryError({
25
25
  message: `Failed after ${tryNumber} tries.`,
26
26
  reason: "maxTriesExceeded",
@@ -16,7 +16,7 @@ async function _retryWithExponentialBackoff(f, { maxTries, delay, backoffFactor,
16
16
  catch (error) {
17
17
  const newErrors = [...errors, error];
18
18
  const tryNumber = newErrors.length;
19
- if (tryNumber > maxTries) {
19
+ if (tryNumber >= maxTries) {
20
20
  throw new RetryError({
21
21
  message: `Failed after ${tryNumber} tries.`,
22
22
  reason: "maxTriesExceeded",