modelfusion 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +13 -9
  2. package/model-provider/cohere/CohereError.cjs +11 -0
  3. package/model-provider/cohere/CohereError.js +11 -0
  4. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +3 -1
  5. package/model-provider/cohere/CohereTextEmbeddingModel.js +3 -1
  6. package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -1
  7. package/model-provider/cohere/CohereTextGenerationModel.js +3 -1
  8. package/model-provider/cohere/CohereTokenizer.cjs +6 -2
  9. package/model-provider/cohere/CohereTokenizer.js +6 -2
  10. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +3 -1
  11. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +3 -1
  12. package/model-provider/openai/OpenAIError.cjs +21 -9
  13. package/model-provider/openai/OpenAIError.d.ts +3 -3
  14. package/model-provider/openai/OpenAIError.js +21 -9
  15. package/model-provider/openai/OpenAIImageGenerationModel.cjs +3 -1
  16. package/model-provider/openai/OpenAIImageGenerationModel.js +3 -1
  17. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -1
  18. package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -1
  19. package/model-provider/openai/OpenAITextGenerationModel.cjs +5 -2
  20. package/model-provider/openai/OpenAITextGenerationModel.d.ts +1 -0
  21. package/model-provider/openai/OpenAITextGenerationModel.js +5 -2
  22. package/model-provider/openai/OpenAITranscriptionModel.cjs +3 -2
  23. package/model-provider/openai/OpenAITranscriptionModel.js +3 -2
  24. package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -2
  25. package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -0
  26. package/model-provider/openai/chat/OpenAIChatModel.js +5 -2
  27. package/model-provider/stability/StabilityImageGenerationModel.cjs +3 -1
  28. package/model-provider/stability/StabilityImageGenerationModel.js +3 -1
  29. package/package.json +1 -1
  30. package/util/api/postToApi.cjs +6 -11
  31. package/util/api/postToApi.d.ts +4 -5
  32. package/util/api/postToApi.js +6 -11
  33. package/util/api/retryWithExponentialBackoff.cjs +1 -1
  34. package/util/api/retryWithExponentialBackoff.js +1 -1
package/README.md CHANGED
@@ -377,17 +377,21 @@ const { chunks } = await retrieveTextChunks(
377
377
  - [Memory](https://modelfusion.dev/integration/vector-index/memory)
378
378
  - [Pinecone](https://modelfusion.dev/integration/vector-index/pinecone)
379
379
 
380
- ### Prompt Mappings
380
+ ### Observability
381
381
 
382
- Use higher level prompts that are mapped into model specific prompts.
382
+ - [Helicone](https://modelfusion.dev/integration/observability/helicone)
383
383
 
384
- | Model | Instruction Prompt | Chat Prompt |
385
- | ------------ | ------------------ | ----------- |
386
- | OpenAI | ✅ | ✅ |
387
- | Llama 2 | ✅ | ✅ |
388
- | Alpaca | | |
389
- | Vicuna | | |
390
- | Generic Text | ✅ | ✅ |
384
+ ### Prompt Formats
385
+
386
+ Use higher level prompts that are mapped into model specific prompt formats.
387
+
388
+ | Prompt Format | Instruction Prompt | Chat Prompt |
389
+ | ------------- | ------------------ | ----------- |
390
+ | OpenAI Chat | ✅ | ✅ |
391
+ | Llama 2 | ✅ | ✅ |
392
+ | Alpaca | ✅ | ❌ |
393
+ | Vicuna | ❌ | ✅ |
394
+ | Generic Text | ✅ | ✅ |
391
395
 
392
396
  ## Documentation
393
397
 
@@ -25,6 +25,17 @@ class CohereError extends ApiCallError_js_1.ApiCallError {
25
25
  exports.CohereError = CohereError;
26
26
  const failedCohereCallResponseHandler = async ({ response, url, requestBodyValues }) => {
27
27
  const responseBody = await response.text();
28
+ // For some errors, the body of Cohere responses is empty:
29
+ if (responseBody.trim() === "") {
30
+ return new CohereError({
31
+ url,
32
+ requestBodyValues,
33
+ statusCode: response.status,
34
+ data: {
35
+ message: response.statusText,
36
+ },
37
+ });
38
+ }
28
39
  const parsedError = exports.cohereErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
29
40
  return new CohereError({
30
41
  url,
@@ -18,6 +18,17 @@ export class CohereError extends ApiCallError {
18
18
  }
19
19
  export const failedCohereCallResponseHandler = async ({ response, url, requestBodyValues }) => {
20
20
  const responseBody = await response.text();
21
+ // For some errors, the body of Cohere responses is empty:
22
+ if (responseBody.trim() === "") {
23
+ return new CohereError({
24
+ url,
25
+ requestBodyValues,
26
+ statusCode: response.status,
27
+ data: {
28
+ message: response.statusText,
29
+ },
30
+ });
31
+ }
21
32
  const parsedError = cohereErrorDataSchema.parse(SecureJSON.parse(responseBody));
22
33
  return new CohereError({
23
34
  url,
@@ -159,7 +159,9 @@ const cohereTextEmbeddingResponseSchema = zod_1.default.object({
159
159
  async function callCohereEmbeddingAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, texts, truncate, }) {
160
160
  return (0, postToApi_js_1.postJsonToApi)({
161
161
  url: `${baseUrl}/embed`,
162
- apiKey,
162
+ headers: {
163
+ Authorization: `Bearer ${apiKey}`,
164
+ },
163
165
  body: {
164
166
  model,
165
167
  texts,
@@ -152,7 +152,9 @@ const cohereTextEmbeddingResponseSchema = z.object({
152
152
  async function callCohereEmbeddingAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, texts, truncate, }) {
153
153
  return postJsonToApi({
154
154
  url: `${baseUrl}/embed`,
155
- apiKey,
155
+ headers: {
156
+ Authorization: `Bearer ${apiKey}`,
157
+ },
156
158
  body: {
157
159
  model,
158
160
  texts,
@@ -179,7 +179,9 @@ const cohereTextGenerationResponseSchema = zod_1.z.object({
179
179
  async function callCohereTextGenerationAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, responseFormat, apiKey, model, prompt, numGenerations, maxTokens, temperature, k, p, frequencyPenalty, presencePenalty, endSequences, stopSequences, returnLikelihoods, logitBias, truncate, }) {
180
180
  return (0, postToApi_js_1.postJsonToApi)({
181
181
  url: `${baseUrl}/generate`,
182
- apiKey,
182
+ headers: {
183
+ Authorization: `Bearer ${apiKey}`,
184
+ },
183
185
  body: {
184
186
  stream: responseFormat.stream,
185
187
  model,
@@ -172,7 +172,9 @@ const cohereTextGenerationResponseSchema = z.object({
172
172
  async function callCohereTextGenerationAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, responseFormat, apiKey, model, prompt, numGenerations, maxTokens, temperature, k, p, frequencyPenalty, presencePenalty, endSequences, stopSequences, returnLikelihoods, logitBias, truncate, }) {
173
173
  return postJsonToApi({
174
174
  url: `${baseUrl}/generate`,
175
- apiKey,
175
+ headers: {
176
+ Authorization: `Bearer ${apiKey}`,
177
+ },
176
178
  body: {
177
179
  stream: responseFormat.stream,
178
180
  model,
@@ -97,7 +97,9 @@ const cohereDetokenizationResponseSchema = zod_1.default.object({
97
97
  async function callCohereDetokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, tokens, }) {
98
98
  return (0, postToApi_js_1.postJsonToApi)({
99
99
  url: `${baseUrl}/detokenize`,
100
- apiKey,
100
+ headers: {
101
+ Authorization: `Bearer ${apiKey}`,
102
+ },
101
103
  body: {
102
104
  model,
103
105
  tokens,
@@ -124,7 +126,9 @@ const cohereTokenizationResponseSchema = zod_1.default.object({
124
126
  async function callCohereTokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, text, }) {
125
127
  return (0, postToApi_js_1.postJsonToApi)({
126
128
  url: `${baseUrl}/tokenize`,
127
- apiKey,
129
+ headers: {
130
+ Authorization: `Bearer ${apiKey}`,
131
+ },
128
132
  body: {
129
133
  model,
130
134
  text,
@@ -90,7 +90,9 @@ const cohereDetokenizationResponseSchema = z.object({
90
90
  async function callCohereDetokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, tokens, }) {
91
91
  return postJsonToApi({
92
92
  url: `${baseUrl}/detokenize`,
93
- apiKey,
93
+ headers: {
94
+ Authorization: `Bearer ${apiKey}`,
95
+ },
94
96
  body: {
95
97
  model,
96
98
  tokens,
@@ -117,7 +119,9 @@ const cohereTokenizationResponseSchema = z.object({
117
119
  async function callCohereTokenizeAPI({ baseUrl = "https://api.cohere.ai/v1", abortSignal, apiKey, model, text, }) {
118
120
  return postJsonToApi({
119
121
  url: `${baseUrl}/tokenize`,
120
- apiKey,
122
+ headers: {
123
+ Authorization: `Bearer ${apiKey}`,
124
+ },
121
125
  body: {
122
126
  model,
123
127
  text,
@@ -149,7 +149,9 @@ const huggingFaceTextGenerationResponseSchema = zod_1.default.array(zod_1.defaul
149
149
  async function callHuggingFaceTextGenerationAPI({ baseUrl = "https://api-inference.huggingface.co/models", abortSignal, apiKey, model, inputs, topK, topP, temperature, repetitionPenalty, maxNewTokens, maxTime, numReturnSequences, doSample, options, }) {
150
150
  return (0, postToApi_js_1.postJsonToApi)({
151
151
  url: `${baseUrl}/${model}`,
152
- apiKey,
152
+ headers: {
153
+ Authorization: `Bearer ${apiKey}`,
154
+ },
153
155
  body: {
154
156
  inputs,
155
157
  top_k: topK,
@@ -142,7 +142,9 @@ const huggingFaceTextGenerationResponseSchema = z.array(z.object({
142
142
  async function callHuggingFaceTextGenerationAPI({ baseUrl = "https://api-inference.huggingface.co/models", abortSignal, apiKey, model, inputs, topK, topP, temperature, repetitionPenalty, maxNewTokens, maxTime, numReturnSequences, doSample, options, }) {
143
143
  return postJsonToApi({
144
144
  url: `${baseUrl}/${model}`,
145
- apiKey,
145
+ headers: {
146
+ Authorization: `Bearer ${apiKey}`,
147
+ },
146
148
  body: {
147
149
  inputs,
148
150
  top_k: topK,
@@ -16,7 +16,7 @@ exports.openAIErrorDataSchema = zod_1.z.object({
16
16
  }),
17
17
  });
18
18
  class OpenAIError extends ApiCallError_js_1.ApiCallError {
19
- constructor({ data, statusCode, url, requestBodyValues, message = data.error.message, }) {
19
+ constructor({ data, statusCode, url, requestBodyValues, message, }) {
20
20
  super({
21
21
  message,
22
22
  statusCode,
@@ -24,7 +24,7 @@ class OpenAIError extends ApiCallError_js_1.ApiCallError {
24
24
  url,
25
25
  isRetryable: (statusCode === 429 &&
26
26
  // insufficient_quota is also reported as a 429, but it's not retryable:
27
- data.error.type !== "insufficient_quota") ||
27
+ data?.error.type !== "insufficient_quota") ||
28
28
  statusCode >= 500,
29
29
  });
30
30
  Object.defineProperty(this, "data", {
@@ -39,12 +39,24 @@ class OpenAIError extends ApiCallError_js_1.ApiCallError {
39
39
  exports.OpenAIError = OpenAIError;
40
40
  const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
41
41
  const responseBody = await response.text();
42
- const parsedError = exports.openAIErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
43
- return new OpenAIError({
44
- url,
45
- requestBodyValues,
46
- statusCode: response.status,
47
- data: parsedError,
48
- });
42
+ // resilient parsing in case the response is not JSON or does not match the schema:
43
+ try {
44
+ const parsedError = exports.openAIErrorDataSchema.parse(secure_json_parse_1.default.parse(responseBody));
45
+ return new OpenAIError({
46
+ url,
47
+ requestBodyValues,
48
+ statusCode: response.status,
49
+ message: parsedError.error.message,
50
+ data: parsedError,
51
+ });
52
+ }
53
+ catch (parseError) {
54
+ return new OpenAIError({
55
+ url,
56
+ requestBodyValues,
57
+ statusCode: response.status,
58
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
59
+ });
60
+ }
49
61
  };
50
62
  exports.failedOpenAICallResponseHandler = failedOpenAICallResponseHandler;
@@ -35,13 +35,13 @@ export declare const openAIErrorDataSchema: z.ZodObject<{
35
35
  }>;
36
36
  export type OpenAIErrorData = z.infer<typeof openAIErrorDataSchema>;
37
37
  export declare class OpenAIError extends ApiCallError {
38
- readonly data: OpenAIErrorData;
38
+ readonly data?: OpenAIErrorData;
39
39
  constructor({ data, statusCode, url, requestBodyValues, message, }: {
40
- message?: string;
40
+ message: string;
41
41
  statusCode: number;
42
42
  url: string;
43
43
  requestBodyValues: unknown;
44
- data: OpenAIErrorData;
44
+ data?: OpenAIErrorData;
45
45
  });
46
46
  }
47
47
  export declare const failedOpenAICallResponseHandler: ResponseHandler<ApiCallError>;
@@ -10,7 +10,7 @@ export const openAIErrorDataSchema = z.object({
10
10
  }),
11
11
  });
12
12
  export class OpenAIError extends ApiCallError {
13
- constructor({ data, statusCode, url, requestBodyValues, message = data.error.message, }) {
13
+ constructor({ data, statusCode, url, requestBodyValues, message, }) {
14
14
  super({
15
15
  message,
16
16
  statusCode,
@@ -18,7 +18,7 @@ export class OpenAIError extends ApiCallError {
18
18
  url,
19
19
  isRetryable: (statusCode === 429 &&
20
20
  // insufficient_quota is also reported as a 429, but it's not retryable:
21
- data.error.type !== "insufficient_quota") ||
21
+ data?.error.type !== "insufficient_quota") ||
22
22
  statusCode >= 500,
23
23
  });
24
24
  Object.defineProperty(this, "data", {
@@ -32,11 +32,23 @@ export class OpenAIError extends ApiCallError {
32
32
  }
33
33
  export const failedOpenAICallResponseHandler = async ({ response, url, requestBodyValues }) => {
34
34
  const responseBody = await response.text();
35
- const parsedError = openAIErrorDataSchema.parse(SecureJSON.parse(responseBody));
36
- return new OpenAIError({
37
- url,
38
- requestBodyValues,
39
- statusCode: response.status,
40
- data: parsedError,
41
- });
35
+ // resilient parsing in case the response is not JSON or does not match the schema:
36
+ try {
37
+ const parsedError = openAIErrorDataSchema.parse(SecureJSON.parse(responseBody));
38
+ return new OpenAIError({
39
+ url,
40
+ requestBodyValues,
41
+ statusCode: response.status,
42
+ message: parsedError.error.message,
43
+ data: parsedError,
44
+ });
45
+ }
46
+ catch (parseError) {
47
+ return new OpenAIError({
48
+ url,
49
+ requestBodyValues,
50
+ statusCode: response.status,
51
+ message: responseBody.trim() !== "" ? responseBody : response.statusText,
52
+ });
53
+ }
42
54
  };
@@ -109,7 +109,9 @@ exports.OpenAIImageGenerationResponseFormat = {
109
109
  async function callOpenAIImageGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, prompt, n, size, responseFormat, user, }) {
110
110
  return (0, postToApi_js_1.postJsonToApi)({
111
111
  url: `${baseUrl}/images/generations`,
112
- apiKey,
112
+ headers: {
113
+ Authorization: `Bearer ${apiKey}`,
114
+ },
113
115
  body: {
114
116
  prompt,
115
117
  n,
@@ -104,7 +104,9 @@ export const OpenAIImageGenerationResponseFormat = {
104
104
  async function callOpenAIImageGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, prompt, n, size, responseFormat, user, }) {
105
105
  return postJsonToApi({
106
106
  url: `${baseUrl}/images/generations`,
107
- apiKey,
107
+ headers: {
108
+ Authorization: `Bearer ${apiKey}`,
109
+ },
108
110
  body: {
109
111
  prompt,
110
112
  n,
@@ -158,7 +158,9 @@ const openAITextEmbeddingResponseSchema = zod_1.default.object({
158
158
  async function callOpenAITextEmbeddingAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, model, input, user, }) {
159
159
  return (0, postToApi_js_1.postJsonToApi)({
160
160
  url: `${baseUrl}/embeddings`,
161
- apiKey,
161
+ headers: {
162
+ Authorization: `Bearer ${apiKey}`,
163
+ },
162
164
  body: {
163
165
  model,
164
166
  input,
@@ -149,7 +149,9 @@ const openAITextEmbeddingResponseSchema = z.object({
149
149
  async function callOpenAITextEmbeddingAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, apiKey, model, input, user, }) {
150
150
  return postJsonToApi({
151
151
  url: `${baseUrl}/embeddings`,
152
- apiKey,
152
+ headers: {
153
+ Authorization: `Bearer ${apiKey}`,
154
+ },
153
155
  body: {
154
156
  model,
155
157
  input,
@@ -209,10 +209,13 @@ const openAITextGenerationResponseSchema = zod_1.default.object({
209
209
  *
210
210
  * console.log(response.choices[0].text);
211
211
  */
212
- async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
212
+ async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
213
213
  return (0, postToApi_js_1.postJsonToApi)({
214
214
  url: `${baseUrl}/completions`,
215
- apiKey,
215
+ headers: {
216
+ ...headers,
217
+ Authorization: `Bearer ${apiKey}`,
218
+ },
216
219
  body: {
217
220
  stream: responseFormat.stream,
218
221
  model,
@@ -65,6 +65,7 @@ export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, re
65
65
  }) => number;
66
66
  export interface OpenAITextGenerationModelSettings extends TextGenerationModelSettings {
67
67
  model: OpenAITextGenerationModelType;
68
+ headers?: Record<string, string>;
68
69
  baseUrl?: string;
69
70
  apiKey?: string;
70
71
  retry?: RetryFunction;
@@ -200,10 +200,13 @@ const openAITextGenerationResponseSchema = z.object({
200
200
  *
201
201
  * console.log(response.choices[0].text);
202
202
  */
203
- async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
203
+ async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
204
204
  return postJsonToApi({
205
205
  url: `${baseUrl}/completions`,
206
- apiKey,
206
+ headers: {
207
+ ...headers,
208
+ Authorization: `Bearer ${apiKey}`,
209
+ },
207
210
  body: {
208
211
  stream: responseFormat.stream,
209
212
  model,
@@ -132,8 +132,9 @@ async function callOpenAITranscriptionAPI({ baseUrl = "https://api.openai.com/v1
132
132
  }
133
133
  return (0, postToApi_js_1.postToApi)({
134
134
  url: `${baseUrl}/audio/transcriptions`,
135
- apiKey,
136
- contentType: null,
135
+ headers: {
136
+ Authorization: `Bearer ${apiKey}`,
137
+ },
137
138
  body: {
138
139
  content: formData,
139
140
  values: {
@@ -124,8 +124,9 @@ async function callOpenAITranscriptionAPI({ baseUrl = "https://api.openai.com/v1
124
124
  }
125
125
  return postToApi({
126
126
  url: `${baseUrl}/audio/transcriptions`,
127
- apiKey,
128
- contentType: null,
127
+ headers: {
128
+ Authorization: `Bearer ${apiKey}`,
129
+ },
129
130
  body: {
130
131
  content: formData,
131
132
  values: {
@@ -246,10 +246,13 @@ const openAIChatResponseSchema = zod_1.default.object({
246
246
  total_tokens: zod_1.default.number(),
247
247
  }),
248
248
  });
249
- async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
249
+ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
250
250
  return (0, postToApi_js_1.postJsonToApi)({
251
251
  url: `${baseUrl}/chat/completions`,
252
- apiKey,
252
+ headers: {
253
+ ...headers,
254
+ Authorization: `Bearer ${apiKey}`,
255
+ },
253
256
  body: {
254
257
  stream: responseFormat.stream,
255
258
  model,
@@ -77,6 +77,7 @@ export declare const calculateOpenAIChatCostInMillicents: ({ model, response, }:
77
77
  }) => number;
78
78
  export interface OpenAIChatCallSettings {
79
79
  model: OpenAIChatModelType;
80
+ headers?: Record<string, string>;
80
81
  functions?: Array<{
81
82
  name: string;
82
83
  description?: string;
@@ -237,10 +237,13 @@ const openAIChatResponseSchema = z.object({
237
237
  total_tokens: z.number(),
238
238
  }),
239
239
  });
240
- async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
240
+ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
241
241
  return postJsonToApi({
242
242
  url: `${baseUrl}/chat/completions`,
243
- apiKey,
243
+ headers: {
244
+ ...headers,
245
+ Authorization: `Bearer ${apiKey}`,
246
+ },
244
247
  body: {
245
248
  stream: responseFormat.stream,
246
249
  model,
@@ -113,7 +113,9 @@ const stabilityImageGenerationResponseSchema = zod_1.z.object({
113
113
  async function callStabilityImageGenerationAPI({ baseUrl = "https://api.stability.ai/v1", abortSignal, apiKey, engineId, height, width, textPrompts, cfgScale, clipGuidancePreset, sampler, samples, seed, steps, stylePreset, }) {
114
114
  return (0, postToApi_js_1.postJsonToApi)({
115
115
  url: `${baseUrl}/generation/${engineId}/text-to-image`,
116
- apiKey,
116
+ headers: {
117
+ Authorization: `Bearer ${apiKey}`,
118
+ },
117
119
  body: {
118
120
  height,
119
121
  width,
@@ -109,7 +109,9 @@ const stabilityImageGenerationResponseSchema = z.object({
109
109
  async function callStabilityImageGenerationAPI({ baseUrl = "https://api.stability.ai/v1", abortSignal, apiKey, engineId, height, width, textPrompts, cfgScale, clipGuidancePreset, sampler, samples, seed, steps, stylePreset, }) {
110
110
  return postJsonToApi({
111
111
  url: `${baseUrl}/generation/${engineId}/text-to-image`,
112
- apiKey,
112
+ headers: {
113
+ Authorization: `Bearer ${apiKey}`,
114
+ },
113
115
  body: {
114
116
  height,
115
117
  width,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.2.0",
4
+ "version": "0.3.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -18,10 +18,12 @@ const createJsonResponseHandler = (responseSchema) => async ({ response, url, re
18
18
  exports.createJsonResponseHandler = createJsonResponseHandler;
19
19
  const createTextResponseHandler = () => async ({ response }) => response.text();
20
20
  exports.createTextResponseHandler = createTextResponseHandler;
21
- const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => (0, exports.postToApi)({
21
+ const postJsonToApi = async ({ url, headers, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => (0, exports.postToApi)({
22
22
  url,
23
- apiKey,
24
- contentType: "application/json",
23
+ headers: {
24
+ ...headers,
25
+ "Content-Type": "application/json",
26
+ },
25
27
  body: {
26
28
  content: JSON.stringify(body),
27
29
  values: body,
@@ -31,15 +33,8 @@ const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler, success
31
33
  abortSignal,
32
34
  });
33
35
  exports.postJsonToApi = postJsonToApi;
34
- const postToApi = async ({ url, apiKey, contentType, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
36
+ const postToApi = async ({ url, headers = {}, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
35
37
  try {
36
- const headers = {};
37
- if (apiKey !== undefined) {
38
- headers["Authorization"] = `Bearer ${apiKey}`;
39
- }
40
- if (contentType !== null) {
41
- headers["Content-Type"] = contentType;
42
- }
43
38
  const response = await fetch(url, {
44
39
  method: "POST",
45
40
  headers,
@@ -7,18 +7,17 @@ export type ResponseHandler<T> = (options: {
7
7
  }) => PromiseLike<T>;
8
8
  export declare const createJsonResponseHandler: <T>(responseSchema: z.ZodType<T, z.ZodTypeDef, T>) => ResponseHandler<T>;
9
9
  export declare const createTextResponseHandler: () => ResponseHandler<string>;
10
- export declare const postJsonToApi: <T>({ url, apiKey, body, failedResponseHandler, successfulResponseHandler, abortSignal, }: {
10
+ export declare const postJsonToApi: <T>({ url, headers, body, failedResponseHandler, successfulResponseHandler, abortSignal, }: {
11
11
  url: string;
12
- apiKey?: string | undefined;
12
+ headers?: Record<string, string> | undefined;
13
13
  body: unknown;
14
14
  failedResponseHandler: ResponseHandler<ApiCallError>;
15
15
  successfulResponseHandler: ResponseHandler<T>;
16
16
  abortSignal?: AbortSignal | undefined;
17
17
  }) => Promise<T>;
18
- export declare const postToApi: <T>({ url, apiKey, contentType, body, successfulResponseHandler, failedResponseHandler, abortSignal, }: {
18
+ export declare const postToApi: <T>({ url, headers, body, successfulResponseHandler, failedResponseHandler, abortSignal, }: {
19
19
  url: string;
20
- apiKey?: string | undefined;
21
- contentType: string | null;
20
+ headers?: Record<string, string> | undefined;
22
21
  body: {
23
22
  content: string | FormData;
24
23
  values: unknown;
@@ -13,10 +13,12 @@ export const createJsonResponseHandler = (responseSchema) => async ({ response,
13
13
  return parsedResult.data;
14
14
  };
15
15
  export const createTextResponseHandler = () => async ({ response }) => response.text();
16
- export const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => postToApi({
16
+ export const postJsonToApi = async ({ url, headers, body, failedResponseHandler, successfulResponseHandler, abortSignal, }) => postToApi({
17
17
  url,
18
- apiKey,
19
- contentType: "application/json",
18
+ headers: {
19
+ ...headers,
20
+ "Content-Type": "application/json",
21
+ },
20
22
  body: {
21
23
  content: JSON.stringify(body),
22
24
  values: body,
@@ -25,15 +27,8 @@ export const postJsonToApi = async ({ url, apiKey, body, failedResponseHandler,
25
27
  successfulResponseHandler,
26
28
  abortSignal,
27
29
  });
28
- export const postToApi = async ({ url, apiKey, contentType, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
30
+ export const postToApi = async ({ url, headers = {}, body, successfulResponseHandler, failedResponseHandler, abortSignal, }) => {
29
31
  try {
30
- const headers = {};
31
- if (apiKey !== undefined) {
32
- headers["Authorization"] = `Bearer ${apiKey}`;
33
- }
34
- if (contentType !== null) {
35
- headers["Content-Type"] = contentType;
36
- }
37
32
  const response = await fetch(url, {
38
33
  method: "POST",
39
34
  headers,
@@ -20,7 +20,7 @@ async function _retryWithExponentialBackoff(f, { maxTries, delay, backoffFactor,
20
20
  catch (error) {
21
21
  const newErrors = [...errors, error];
22
22
  const tryNumber = newErrors.length;
23
- if (tryNumber > maxTries) {
23
+ if (tryNumber >= maxTries) {
24
24
  throw new RetryError_js_1.RetryError({
25
25
  message: `Failed after ${tryNumber} tries.`,
26
26
  reason: "maxTriesExceeded",
@@ -16,7 +16,7 @@ async function _retryWithExponentialBackoff(f, { maxTries, delay, backoffFactor,
16
16
  catch (error) {
17
17
  const newErrors = [...errors, error];
18
18
  const tryNumber = newErrors.length;
19
- if (tryNumber > maxTries) {
19
+ if (tryNumber >= maxTries) {
20
20
  throw new RetryError({
21
21
  message: `Failed after ${tryNumber} tries.`,
22
22
  reason: "maxTriesExceeded",