call-ai 0.0.0-dev-prompts
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE.md +232 -0
- package/README.md +264 -0
- package/api-core.d.ts +13 -0
- package/api-core.js +238 -0
- package/api-core.js.map +1 -0
- package/api.d.ts +4 -0
- package/api.js +365 -0
- package/api.js.map +1 -0
- package/api.ts.off +595 -0
- package/env.d.ts +22 -0
- package/env.js +65 -0
- package/env.js.map +1 -0
- package/error-handling.d.ts +14 -0
- package/error-handling.js +144 -0
- package/error-handling.js.map +1 -0
- package/image.d.ts +2 -0
- package/image.js +72 -0
- package/image.js.map +1 -0
- package/index.d.ts +7 -0
- package/index.js +8 -0
- package/index.js.map +1 -0
- package/index.ts.bak +16 -0
- package/key-management.d.ts +29 -0
- package/key-management.js +190 -0
- package/key-management.js.map +1 -0
- package/non-streaming.d.ts +7 -0
- package/non-streaming.js +206 -0
- package/non-streaming.js.map +1 -0
- package/package.json +43 -0
- package/response-metadata.d.ts +6 -0
- package/response-metadata.js +22 -0
- package/response-metadata.js.map +1 -0
- package/strategies/index.d.ts +2 -0
- package/strategies/index.js +3 -0
- package/strategies/index.js.map +1 -0
- package/strategies/model-strategies.d.ts +6 -0
- package/strategies/model-strategies.js +138 -0
- package/strategies/model-strategies.js.map +1 -0
- package/strategies/strategy-selector.d.ts +2 -0
- package/strategies/strategy-selector.js +66 -0
- package/strategies/strategy-selector.js.map +1 -0
- package/streaming.d.ts +4 -0
- package/streaming.js +365 -0
- package/streaming.js.map +1 -0
- package/streaming.ts.off +571 -0
- package/tsconfig.json +18 -0
- package/types.d.ts +228 -0
- package/types.js +33 -0
- package/types.js.map +1 -0
- package/utils.d.ts +8 -0
- package/utils.js +42 -0
- package/utils.js.map +1 -0
- package/version.d.ts +1 -0
- package/version.js +2 -0
- package/version.js.map +1 -0
package/api.ts.off
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core API implementation for call-ai
|
|
3
|
+
*/
|
|
4
|
+
import { CallAIError, CallAIErrorParams, CallAIOptions, Message, ResponseMeta, SchemaStrategy, StreamResponse } from "./types.js";
|
|
5
|
+
import { chooseSchemaStrategy } from "./strategies/index.js";
|
|
6
|
+
import { responseMetadata, boxString } from "./response-metadata.js";
|
|
7
|
+
import { keyStore, globalDebug } from "./key-management.js";
|
|
8
|
+
import { handleApiError, checkForInvalidModelError } from "./error-handling.js";
|
|
9
|
+
import { createBackwardCompatStreamingProxy } from "./api-core.js";
|
|
10
|
+
import { extractContent, extractClaudeResponse } from "./non-streaming.js";
|
|
11
|
+
import { createStreamingGenerator } from "./streaming.js";
|
|
12
|
+
import { PACKAGE_VERSION } from "./version.js";
|
|
13
|
+
import { callAiEnv, callAiFetch } from "./utils.js";
|
|
14
|
+
|
|
15
|
+
// Key management is now imported from ./key-management
|
|
16
|
+
|
|
17
|
+
// initKeyStore is imported from key-management.ts
|
|
18
|
+
// No need to call initKeyStore() here as it's called on module load in key-management.ts
|
|
19
|
+
|
|
20
|
+
// isNewKeyError is imported from key-management.ts
|
|
21
|
+
|
|
22
|
+
// refreshApiKey is imported from key-management.ts
|
|
23
|
+
|
|
24
|
+
// getHashFromKey is imported from key-management.ts
|
|
25
|
+
|
|
26
|
+
// storeKeyMetadata is imported from key-management.ts
|
|
27
|
+
|
|
28
|
+
// Response metadata is now imported from ./response-metadata
|
|
29
|
+
|
|
30
|
+
// boxString and getMeta functions are now imported from ./response-metadata
|
|
31
|
+
// Re-export getMeta to maintain backward compatibility
|
|
32
|
+
// export { getMeta };
|
|
33
|
+
|
|
34
|
+
// Import package version for debugging
|
|
35
|
+
|
|
36
|
+
// Default fallback model when the primary model fails or is unavailable
|
|
37
|
+
const FALLBACK_MODEL = "openrouter/auto";
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Make an AI API call with the given options
|
|
41
|
+
* @param prompt User prompt as string or an array of message objects
|
|
42
|
+
* @param options Configuration options including optional schema for structured output
|
|
43
|
+
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
44
|
+
* or a Promise that resolves to an AsyncGenerator when streaming is enabled.
|
|
45
|
+
* The AsyncGenerator yields partial responses as they arrive.
|
|
46
|
+
*/
|
|
47
|
+
export function callAi(prompt: string | Message[], options: CallAIOptions = {}): Promise<string | StreamResponse> {
|
|
48
|
+
// Check if we need to force streaming based on model strategy
|
|
49
|
+
const schemaStrategy = chooseSchemaStrategy(options.model, options.schema || null);
|
|
50
|
+
|
|
51
|
+
// We no longer set a default maxTokens
|
|
52
|
+
// Will only include max_tokens in the request if explicitly set by the user
|
|
53
|
+
|
|
54
|
+
// Handle special case: Claude with tools requires streaming
|
|
55
|
+
if (!options.stream && schemaStrategy.shouldForceStream) {
|
|
56
|
+
// Buffer streaming results into a single response
|
|
57
|
+
return bufferStreamingResults(prompt, options);
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
// Handle normal non-streaming mode
|
|
61
|
+
if (options.stream !== true) {
|
|
62
|
+
return callAINonStreaming(prompt, options);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Handle streaming mode - return a Promise that resolves to an AsyncGenerator
|
|
66
|
+
// but also supports legacy non-awaited usage for backward compatibility
|
|
67
|
+
const streamPromise = (async () => {
|
|
68
|
+
// Do setup and validation before returning the generator
|
|
69
|
+
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
|
|
70
|
+
|
|
71
|
+
// Use either explicit debug option or global debug flag
|
|
72
|
+
const debug = options.debug || globalDebug;
|
|
73
|
+
if (debug) {
|
|
74
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`);
|
|
75
|
+
console.log(`[callAi:${PACKAGE_VERSION}] With model: ${model}`);
|
|
76
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Request headers:`, JSON.stringify(requestOptions.headers));
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
let response;
|
|
80
|
+
try {
|
|
81
|
+
response = await callAiFetch(options)(endpoint, requestOptions);
|
|
82
|
+
if (options.debug) {
|
|
83
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Fetch completed with status:`, response.status, response.statusText);
|
|
84
|
+
|
|
85
|
+
// Log all headers
|
|
86
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Response headers:`);
|
|
87
|
+
response.headers.forEach((value, name) => {
|
|
88
|
+
console.log(`[callAi:${PACKAGE_VERSION}] ${name}: ${value}`);
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
// Clone response for diagnostic purposes only
|
|
92
|
+
const diagnosticResponse = response.clone();
|
|
93
|
+
try {
|
|
94
|
+
// Try to get the response as text for debugging
|
|
95
|
+
const responseText = await diagnosticResponse.text();
|
|
96
|
+
console.log(
|
|
97
|
+
`[callAi:${PACKAGE_VERSION}] First 500 chars of response body:`,
|
|
98
|
+
responseText.substring(0, 500) + (responseText.length > 500 ? "..." : ""),
|
|
99
|
+
);
|
|
100
|
+
} catch (e) {
|
|
101
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Could not read response body for diagnostics:`, e);
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
} catch (fetchError) {
|
|
105
|
+
if (options.debug) {
|
|
106
|
+
console.error(`[callAi:${PACKAGE_VERSION}] Network error during fetch:`, fetchError);
|
|
107
|
+
}
|
|
108
|
+
throw fetchError; // Re-throw network errors
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
// Explicitly check for HTTP error status and log extensively if debug is enabled
|
|
112
|
+
// Safe access to headers in case of mock environments
|
|
113
|
+
const contentType = response?.headers?.get?.("content-type") || "";
|
|
114
|
+
|
|
115
|
+
if (options.debug) {
|
|
116
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Response.ok =`, response.ok);
|
|
117
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Response.status =`, response.status);
|
|
118
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Response.statusText =`, response.statusText);
|
|
119
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Response.type =`, response.type);
|
|
120
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Content-Type =`, contentType);
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Browser-compatible error handling - must check BOTH status code AND content-type
|
|
124
|
+
// Some browsers will report status 200 for SSE streams even when server returns 400
|
|
125
|
+
const hasHttpError = !response.ok || response.status >= 400;
|
|
126
|
+
const hasJsonError = contentType.includes("application/json");
|
|
127
|
+
|
|
128
|
+
if (hasHttpError || hasJsonError) {
|
|
129
|
+
if (options.debug) {
|
|
130
|
+
console.log(
|
|
131
|
+
`[callAi:${PACKAGE_VERSION}] ⚠️ Error detected - HTTP Status: ${response.status}, Content-Type: ${contentType}`,
|
|
132
|
+
);
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// Handle the error with fallback model if appropriate
|
|
136
|
+
if (!options.skipRetry) {
|
|
137
|
+
const clonedResponse = response.clone();
|
|
138
|
+
let isInvalidModel = false;
|
|
139
|
+
|
|
140
|
+
try {
|
|
141
|
+
// Check if this is an invalid model error
|
|
142
|
+
const modelCheckResult = await checkForInvalidModelError(clonedResponse, model, options.debug);
|
|
143
|
+
isInvalidModel = modelCheckResult.isInvalidModel;
|
|
144
|
+
|
|
145
|
+
if (isInvalidModel) {
|
|
146
|
+
if (options.debug) {
|
|
147
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Retrying with fallback model: ${FALLBACK_MODEL}`);
|
|
148
|
+
}
|
|
149
|
+
// Retry with fallback model
|
|
150
|
+
return (await callAi(prompt, {
|
|
151
|
+
...options,
|
|
152
|
+
model: FALLBACK_MODEL,
|
|
153
|
+
})) as StreamResponse;
|
|
154
|
+
}
|
|
155
|
+
} catch (modelCheckError) {
|
|
156
|
+
console.error(`[callAi:${PACKAGE_VERSION}] Error during model check:`, modelCheckError);
|
|
157
|
+
// Continue with normal error handling
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// Extract error details from response
|
|
162
|
+
try {
|
|
163
|
+
// Try to get error details from the response body
|
|
164
|
+
const errorBody = await response.text();
|
|
165
|
+
if (options.debug) {
|
|
166
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Error body:`, errorBody);
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
try {
|
|
170
|
+
// Try to parse JSON error
|
|
171
|
+
const errorJson = JSON.parse(errorBody);
|
|
172
|
+
if (options.debug) {
|
|
173
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Parsed error:`, errorJson);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
// Extract message from OpenRouter error format
|
|
177
|
+
let errorMessage = "";
|
|
178
|
+
|
|
179
|
+
// Handle common error formats
|
|
180
|
+
if (errorJson.error && typeof errorJson.error === "object" && errorJson.error.message) {
|
|
181
|
+
// OpenRouter/OpenAI format: { error: { message: "..." } }
|
|
182
|
+
errorMessage = errorJson.error.message;
|
|
183
|
+
} else if (errorJson.error && typeof errorJson.error === "string") {
|
|
184
|
+
// Simple error format: { error: "..." }
|
|
185
|
+
errorMessage = errorJson.error;
|
|
186
|
+
} else if (errorJson.message) {
|
|
187
|
+
// Generic format: { message: "..." }
|
|
188
|
+
errorMessage = errorJson.message;
|
|
189
|
+
} else {
|
|
190
|
+
// Fallback with status details
|
|
191
|
+
errorMessage = `API returned ${response.status}: ${response.statusText}`;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
// Add status details to error message if not already included
|
|
195
|
+
if (!errorMessage.includes(response.status.toString())) {
|
|
196
|
+
errorMessage = `${errorMessage} (Status: ${response.status})`;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if (options.debug) {
|
|
200
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Extracted error message:`, errorMessage);
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// Create error with standard format
|
|
204
|
+
const error = new CallAIError({
|
|
205
|
+
message: errorMessage,
|
|
206
|
+
status: response.status,
|
|
207
|
+
statusText: response.statusText,
|
|
208
|
+
details: errorJson,
|
|
209
|
+
contentType,
|
|
210
|
+
});
|
|
211
|
+
throw error;
|
|
212
|
+
} catch (jsonError) {
|
|
213
|
+
// If JSON parsing fails, extract a useful message from the raw error body
|
|
214
|
+
if (options.debug) {
|
|
215
|
+
console.log(`[callAi:${PACKAGE_VERSION}] JSON parse error:`, jsonError);
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
// Try to extract a useful message even from non-JSON text
|
|
219
|
+
let errorMessage = "";
|
|
220
|
+
|
|
221
|
+
// Check if it's a plain text error message
|
|
222
|
+
if (errorBody && errorBody.trim().length > 0) {
|
|
223
|
+
// Limit length for readability
|
|
224
|
+
errorMessage = errorBody.length > 100 ? errorBody.substring(0, 100) + "..." : errorBody;
|
|
225
|
+
} else {
|
|
226
|
+
errorMessage = `API error: ${response.status} ${response.statusText}`;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
// Add status details if not already included
|
|
230
|
+
if (!errorMessage.includes(response.status.toString())) {
|
|
231
|
+
errorMessage = `${errorMessage} (Status: ${response.status})`;
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
if (options.debug) {
|
|
235
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Extracted text error message:`, errorMessage);
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
const error = new CallAIError({
|
|
239
|
+
message: errorMessage,
|
|
240
|
+
status: response.status,
|
|
241
|
+
statusText: response.statusText,
|
|
242
|
+
details: errorBody,
|
|
243
|
+
contentType,
|
|
244
|
+
});
|
|
245
|
+
throw error;
|
|
246
|
+
}
|
|
247
|
+
} catch (responseError) {
|
|
248
|
+
if (responseError instanceof Error) {
|
|
249
|
+
// Re-throw if it's already properly formatted
|
|
250
|
+
throw responseError;
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
// Fallback error
|
|
254
|
+
const error = new CallAIError({
|
|
255
|
+
message: `API returned ${response.status}: ${response.statusText}`,
|
|
256
|
+
status: response.status,
|
|
257
|
+
statusText: response.statusText,
|
|
258
|
+
details: undefined,
|
|
259
|
+
contentType,
|
|
260
|
+
});
|
|
261
|
+
throw error;
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
// Only if response is OK, create and return the streaming generator
|
|
265
|
+
if (options.debug) {
|
|
266
|
+
console.log(`[callAi:${PACKAGE_VERSION}] Response OK, creating streaming generator`);
|
|
267
|
+
}
|
|
268
|
+
return createStreamingGenerator(response, options, schemaStrategy, model);
|
|
269
|
+
})();
|
|
270
|
+
|
|
271
|
+
// For backward compatibility with v0.6.x where users didn't await the result
|
|
272
|
+
if (callAiEnv.NODE_ENV !== "production") {
|
|
273
|
+
if (options.debug) {
|
|
274
|
+
console.warn(
|
|
275
|
+
`[callAi:${PACKAGE_VERSION}] No await found - using legacy streaming pattern. This will be removed in a future version and may cause issues with certain models.`,
|
|
276
|
+
);
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
// Create a proxy object that acts both as a Promise and an AsyncGenerator for backward compatibility
|
|
281
|
+
//... @ts-ignore - We're deliberately implementing a proxy with dual behavior
|
|
282
|
+
return createBackwardCompatStreamingProxy(streamPromise);
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
/**
|
|
286
|
+
* Buffer streaming results into a single response for cases where
|
|
287
|
+
* we need to use streaming internally but the caller requested non-streaming
|
|
288
|
+
*/
|
|
289
|
+
async function bufferStreamingResults(prompt: string | Message[], options: CallAIOptions): Promise<string> {
|
|
290
|
+
// Create a copy of options with streaming enabled
|
|
291
|
+
const streamingOptions = {
|
|
292
|
+
...options,
|
|
293
|
+
stream: true,
|
|
294
|
+
};
|
|
295
|
+
|
|
296
|
+
try {
|
|
297
|
+
// Get streaming generator
|
|
298
|
+
const generator = (await callAi(prompt, streamingOptions)) as AsyncGenerator<string, string, unknown>;
|
|
299
|
+
|
|
300
|
+
// For Claude JSON responses, take only the last chunk (the final processed result)
|
|
301
|
+
// For all other cases, concatenate chunks as before
|
|
302
|
+
const isClaudeJson = /claude/.test(options.model || "") && options.schema;
|
|
303
|
+
|
|
304
|
+
if (isClaudeJson) {
|
|
305
|
+
// For Claude with JSON schema, we only want the last yielded value
|
|
306
|
+
// which will be the complete, properly processed JSON
|
|
307
|
+
let lastChunk = "";
|
|
308
|
+
for await (const chunk of generator) {
|
|
309
|
+
// Replace the last chunk entirely instead of concatenating
|
|
310
|
+
lastChunk = chunk;
|
|
311
|
+
}
|
|
312
|
+
return lastChunk;
|
|
313
|
+
} else {
|
|
314
|
+
// For all other cases, concatenate chunks
|
|
315
|
+
let result = "";
|
|
316
|
+
for await (const chunk of generator) {
|
|
317
|
+
result += chunk;
|
|
318
|
+
}
|
|
319
|
+
return result;
|
|
320
|
+
}
|
|
321
|
+
} catch (error) {
|
|
322
|
+
// Handle errors with standard API error handling
|
|
323
|
+
await handleApiError(error as CallAIErrorParams, "Buffered streaming", options.debug, {
|
|
324
|
+
apiKey: options.apiKey,
|
|
325
|
+
endpoint: options.endpoint,
|
|
326
|
+
skipRefresh: options.skipRefresh,
|
|
327
|
+
refreshToken: options.refreshToken,
|
|
328
|
+
updateRefreshToken: options.updateRefreshToken,
|
|
329
|
+
});
|
|
330
|
+
// If we get here, key was refreshed successfully, retry the operation with the new key
|
|
331
|
+
// Retry with the refreshed key
|
|
332
|
+
return bufferStreamingResults(prompt, {
|
|
333
|
+
...options,
|
|
334
|
+
apiKey: keyStore.current || undefined, // Use the refreshed key from keyStore
|
|
335
|
+
});
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
// This line should never be reached, but it satisfies the linter by ensuring
|
|
339
|
+
// all code paths return a value
|
|
340
|
+
throw new Error("Unexpected code path in bufferStreamingResults");
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
/**
|
|
344
|
+
* Standardized API error handler
|
|
345
|
+
*/
|
|
346
|
+
// createBackwardCompatStreamingProxy is imported from api-core.ts
|
|
347
|
+
|
|
348
|
+
// handleApiError is imported from error-handling.ts
|
|
349
|
+
|
|
350
|
+
// checkForInvalidModelError is imported from error-handling.ts
|
|
351
|
+
|
|
352
|
+
/**
|
|
353
|
+
* Prepare request parameters common to both streaming and non-streaming calls
|
|
354
|
+
*/
|
|
355
|
+
function prepareRequestParams(
|
|
356
|
+
prompt: string | Message[],
|
|
357
|
+
options: CallAIOptions,
|
|
358
|
+
): {
|
|
359
|
+
apiKey: string;
|
|
360
|
+
model: string;
|
|
361
|
+
endpoint: string;
|
|
362
|
+
requestOptions: RequestInit;
|
|
363
|
+
schemaStrategy: SchemaStrategy;
|
|
364
|
+
} {
|
|
365
|
+
// First try to get the API key from options or window globals
|
|
366
|
+
const apiKey = options.apiKey || keyStore.current || callAiEnv.CALLAI_API_KEY; // Try keyStore first in case it was refreshed in a previous call
|
|
367
|
+
const schema = options.schema || null;
|
|
368
|
+
|
|
369
|
+
// If no API key exists, we won't throw immediately. We'll continue and let handleApiError
|
|
370
|
+
// attempt to fetch a key if needed. This will be handled later in the call chain.
|
|
371
|
+
|
|
372
|
+
// Select the appropriate strategy based on model and schema
|
|
373
|
+
const schemaStrategy = chooseSchemaStrategy(options.model, schema);
|
|
374
|
+
const model = schemaStrategy.model;
|
|
375
|
+
|
|
376
|
+
// Get custom chat API origin if set
|
|
377
|
+
const customChatOrigin = options.chatUrl || callAiEnv.CALLAI_CHAT_URL;
|
|
378
|
+
|
|
379
|
+
// Use custom origin or default OpenRouter URL
|
|
380
|
+
const endpoint =
|
|
381
|
+
options.endpoint ||
|
|
382
|
+
(customChatOrigin ? `${customChatOrigin}/api/v1/chat/completions` : "https://openrouter.ai/api/v1/chat/completions");
|
|
383
|
+
|
|
384
|
+
// Handle both string prompts and message arrays for backward compatibility
|
|
385
|
+
const messages: Message[] = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt }];
|
|
386
|
+
|
|
387
|
+
// Common parameters for both streaming and non-streaming
|
|
388
|
+
const requestParams: CallAIOptions = {
|
|
389
|
+
model,
|
|
390
|
+
messages,
|
|
391
|
+
stream: options.stream !== undefined ? options.stream : false,
|
|
392
|
+
};
|
|
393
|
+
|
|
394
|
+
// Only include temperature if explicitly set
|
|
395
|
+
if (options.temperature) {
|
|
396
|
+
requestParams.temperature = options.temperature;
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
// Only include top_p if explicitly set
|
|
400
|
+
if (options.topP !== undefined) {
|
|
401
|
+
requestParams.top_p = options.topP;
|
|
402
|
+
}
|
|
403
|
+
|
|
404
|
+
// Only include max_tokens if explicitly set
|
|
405
|
+
if (options.maxTokens !== undefined) {
|
|
406
|
+
requestParams.max_tokens = options.maxTokens;
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
// Add optional parameters if specified
|
|
410
|
+
if (options.stop) {
|
|
411
|
+
// Handle both single string and array of stop sequences
|
|
412
|
+
requestParams.stop = Array.isArray(options.stop) ? options.stop : [options.stop];
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
// Add response_format parameter for models that support JSON output
|
|
416
|
+
if (options.responseFormat === "json") {
|
|
417
|
+
requestParams.response_format = { type: "json_object" };
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
// Add schema structure if provided (for function calling/JSON mode)
|
|
421
|
+
if (schema) {
|
|
422
|
+
// Apply schema-specific parameters using the selected strategy
|
|
423
|
+
Object.assign(requestParams, schemaStrategy.prepareRequest(schema, messages));
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
// HTTP headers for the request
|
|
427
|
+
const headers: Record<string, string> = {
|
|
428
|
+
Authorization: `Bearer ${apiKey}`,
|
|
429
|
+
"Content-Type": "application/json",
|
|
430
|
+
"HTTP-Referer": options.referer || "https://vibes.diy",
|
|
431
|
+
"X-Title": options.title || "Vibes",
|
|
432
|
+
};
|
|
433
|
+
|
|
434
|
+
// Add any additional headers
|
|
435
|
+
if (options.headers) {
|
|
436
|
+
Object.assign(headers, options.headers);
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
// Build the requestOptions object for fetch
|
|
440
|
+
const requestOptions: RequestInit = {
|
|
441
|
+
method: "POST",
|
|
442
|
+
headers: {
|
|
443
|
+
...headers,
|
|
444
|
+
"Content-Type": "application/json",
|
|
445
|
+
},
|
|
446
|
+
body: JSON.stringify(requestParams),
|
|
447
|
+
};
|
|
448
|
+
|
|
449
|
+
// If we don't have an API key, throw a clear error that can be caught and handled
|
|
450
|
+
// by the error handling system to trigger key fetching
|
|
451
|
+
if (!apiKey) {
|
|
452
|
+
throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY");
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
// Debug logging for request payload
|
|
456
|
+
if (options.debug) {
|
|
457
|
+
console.log(`[callAi-prepareRequest:raw] Endpoint: ${endpoint}`);
|
|
458
|
+
console.log(`[callAi-prepareRequest:raw] Model: ${model}`);
|
|
459
|
+
console.log(`[callAi-prepareRequest:raw] Payload:`, JSON.stringify(requestParams));
|
|
460
|
+
}
|
|
461
|
+
|
|
462
|
+
return { apiKey, model, endpoint, requestOptions, schemaStrategy };
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
/**
|
|
466
|
+
* Internal implementation for non-streaming API calls
|
|
467
|
+
*/
|
|
468
|
+
async function callAINonStreaming(prompt: string | Message[], options: CallAIOptions = {}, isRetry = false): Promise<string> {
|
|
469
|
+
try {
|
|
470
|
+
// Start timing for metadata
|
|
471
|
+
const startTime = Date.now();
|
|
472
|
+
|
|
473
|
+
// Create metadata object
|
|
474
|
+
const meta: ResponseMeta = {
|
|
475
|
+
model: options.model || "unknown",
|
|
476
|
+
timing: {
|
|
477
|
+
startTime: startTime,
|
|
478
|
+
},
|
|
479
|
+
};
|
|
480
|
+
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, options);
|
|
481
|
+
|
|
482
|
+
const response = await callAiFetch(options)(endpoint, requestOptions);
|
|
483
|
+
|
|
484
|
+
// We don't store the raw Response object in metadata anymore
|
|
485
|
+
|
|
486
|
+
// Handle HTTP errors, with potential fallback for invalid model
|
|
487
|
+
if (!response.ok || response.status >= 400) {
|
|
488
|
+
const { isInvalidModel } = await checkForInvalidModelError(response, model, options.debug);
|
|
489
|
+
|
|
490
|
+
if (isInvalidModel) {
|
|
491
|
+
// Retry with fallback model
|
|
492
|
+
return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true);
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
// Create a proper error object with the status code preserved
|
|
496
|
+
const error = new CallAIError({
|
|
497
|
+
message: `HTTP error! Status: ${response.status}`,
|
|
498
|
+
status: response.status,
|
|
499
|
+
statusText: response.statusText,
|
|
500
|
+
details: undefined,
|
|
501
|
+
contentType: "text/plain",
|
|
502
|
+
});
|
|
503
|
+
throw error;
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
let result;
|
|
507
|
+
|
|
508
|
+
// For Claude, use text() instead of json() to avoid potential hanging
|
|
509
|
+
if (/claude/i.test(model)) {
|
|
510
|
+
try {
|
|
511
|
+
result = await extractClaudeResponse(response);
|
|
512
|
+
} catch (error) {
|
|
513
|
+
handleApiError(error as CallAIErrorParams, "Claude API response processing failed", options.debug);
|
|
514
|
+
}
|
|
515
|
+
} else {
|
|
516
|
+
result = await response.json();
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
// Debug logging for raw API response
|
|
520
|
+
if (options.debug) {
|
|
521
|
+
console.log(`[callAi-nonStreaming:raw] Response:`, JSON.stringify(result));
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
// Handle error responses
|
|
525
|
+
if (result.error) {
|
|
526
|
+
if (options.debug) {
|
|
527
|
+
console.error("API returned an error:", result.error);
|
|
528
|
+
}
|
|
529
|
+
// If it's a model error and not already a retry, try with fallback
|
|
530
|
+
if (
|
|
531
|
+
!isRetry &&
|
|
532
|
+
!options.skipRetry &&
|
|
533
|
+
result.error.message &&
|
|
534
|
+
result.error.message.toLowerCase().includes("not a valid model")
|
|
535
|
+
) {
|
|
536
|
+
if (options.debug) {
|
|
537
|
+
console.warn(`Model ${model} error, retrying with ${FALLBACK_MODEL}`);
|
|
538
|
+
}
|
|
539
|
+
return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true);
|
|
540
|
+
}
|
|
541
|
+
return JSON.stringify({
|
|
542
|
+
error: result.error,
|
|
543
|
+
message: result.error.message || "API returned an error",
|
|
544
|
+
});
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
// Extract content from the response
|
|
548
|
+
const content = extractContent(result, schemaStrategy);
|
|
549
|
+
|
|
550
|
+
// Store the raw response data for user access
|
|
551
|
+
if (result) {
|
|
552
|
+
// Store the parsed JSON result from the API call
|
|
553
|
+
meta.rawResponse = result;
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
// Update model info
|
|
557
|
+
meta.model = model;
|
|
558
|
+
|
|
559
|
+
// Update timing info
|
|
560
|
+
if (meta.timing) {
|
|
561
|
+
meta.timing.endTime = Date.now();
|
|
562
|
+
meta.timing.duration = meta.timing.endTime - meta.timing.startTime;
|
|
563
|
+
}
|
|
564
|
+
|
|
565
|
+
// Process the content based on model type
|
|
566
|
+
const processedContent = schemaStrategy.processResponse(content);
|
|
567
|
+
|
|
568
|
+
// Box the string for WeakMap storage
|
|
569
|
+
const boxed = boxString(processedContent);
|
|
570
|
+
responseMetadata.set(boxed, meta);
|
|
571
|
+
|
|
572
|
+
return processedContent;
|
|
573
|
+
} catch (error) {
|
|
574
|
+
await handleApiError(error, "Non-streaming API call", options.debug, {
|
|
575
|
+
apiKey: options.apiKey,
|
|
576
|
+
endpoint: options.endpoint,
|
|
577
|
+
skipRefresh: options.skipRefresh,
|
|
578
|
+
refreshToken: options.refreshToken,
|
|
579
|
+
updateRefreshToken: options.updateRefreshToken,
|
|
580
|
+
});
|
|
581
|
+
// If we get here, key was refreshed successfully, retry the operation with the new key
|
|
582
|
+
// Retry with the refreshed key
|
|
583
|
+
return callAINonStreaming(
|
|
584
|
+
prompt,
|
|
585
|
+
{
|
|
586
|
+
...options,
|
|
587
|
+
apiKey: keyStore.current || undefined, // Use the refreshed key from keyStore
|
|
588
|
+
},
|
|
589
|
+
true,
|
|
590
|
+
); // Set isRetry to true
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
// This line will never be reached, but it satisfies the linter
|
|
594
|
+
throw new Error("Unexpected code path in callAINonStreaming");
|
|
595
|
+
}
|
package/env.d.ts
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { Env } from "@adviser/cement";
|
|
2
|
+
declare class CallAIEnv {
|
|
3
|
+
env: () => Env;
|
|
4
|
+
merge(oEnv: Env): Env;
|
|
5
|
+
readonly def: {
|
|
6
|
+
readonly CALLAI_REFRESH_ENDPOINT: string;
|
|
7
|
+
readonly CALLAI_CHAT_URL: string;
|
|
8
|
+
};
|
|
9
|
+
get CALLAI_IMG_URL(): string | undefined;
|
|
10
|
+
get CALLAI_CHAT_URL(): string | undefined;
|
|
11
|
+
getWindowCALLAI_API_KEY(): string | undefined;
|
|
12
|
+
get CALLAI_API_KEY(): string | undefined;
|
|
13
|
+
get CALLAI_REFRESH_ENDPOINT(): string | undefined;
|
|
14
|
+
get CALL_AI_REFRESH_TOKEN(): string | undefined;
|
|
15
|
+
get CALLAI_REKEY_ENDPOINT(): string | undefined;
|
|
16
|
+
get CALL_AI_KEY_TOKEN(): string | undefined;
|
|
17
|
+
get CALLAI_REFRESH_TOKEN(): string | undefined;
|
|
18
|
+
get CALLAI_DEBUG(): boolean;
|
|
19
|
+
get NODE_ENV(): string | undefined;
|
|
20
|
+
}
|
|
21
|
+
export declare const callAiEnv: CallAIEnv;
|
|
22
|
+
export {};
|
package/env.js
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import { envFactory, Lazy } from "@adviser/cement";
|
|
2
|
+
class CallAIEnv {
|
|
3
|
+
env = Lazy(() => {
|
|
4
|
+
return envFactory({ symbol: "callAi" });
|
|
5
|
+
});
|
|
6
|
+
merge(oEnv) {
|
|
7
|
+
const myEnv = this.env();
|
|
8
|
+
myEnv.keys().forEach((k) => {
|
|
9
|
+
const v = myEnv.get(k);
|
|
10
|
+
if (!v) {
|
|
11
|
+
oEnv.set(k, myEnv.get(k));
|
|
12
|
+
}
|
|
13
|
+
});
|
|
14
|
+
this.env = () => oEnv;
|
|
15
|
+
return oEnv;
|
|
16
|
+
}
|
|
17
|
+
def = {
|
|
18
|
+
get CALLAI_REFRESH_ENDPOINT() {
|
|
19
|
+
return callAiEnv.CALLAI_REFRESH_ENDPOINT ?? "https://vibecode.garden";
|
|
20
|
+
},
|
|
21
|
+
get CALLAI_CHAT_URL() {
|
|
22
|
+
return callAiEnv.CALLAI_CHAT_URL ?? "https://vibes-diy-api.com";
|
|
23
|
+
},
|
|
24
|
+
};
|
|
25
|
+
get CALLAI_IMG_URL() {
|
|
26
|
+
return this.env().get("CALLAI_IMG_URL");
|
|
27
|
+
}
|
|
28
|
+
get CALLAI_CHAT_URL() {
|
|
29
|
+
return this.env().get("CALLAI_CHAT_URL");
|
|
30
|
+
}
|
|
31
|
+
getWindowCALLAI_API_KEY() {
|
|
32
|
+
const w = globalThis.window;
|
|
33
|
+
return w.callAi?.API_KEY;
|
|
34
|
+
}
|
|
35
|
+
get CALLAI_API_KEY() {
|
|
36
|
+
const x = this.env().get("CALLAI_API_KEY") ??
|
|
37
|
+
this.env().get("OPENROUTER_API_KEY") ??
|
|
38
|
+
this.getWindowCALLAI_API_KEY() ??
|
|
39
|
+
this.env().get("LOW_BALANCE_OPENROUTER_API_KEY");
|
|
40
|
+
return x;
|
|
41
|
+
}
|
|
42
|
+
get CALLAI_REFRESH_ENDPOINT() {
|
|
43
|
+
return this.env().get("CALLAI_REFRESH_ENDPOINT");
|
|
44
|
+
}
|
|
45
|
+
get CALL_AI_REFRESH_TOKEN() {
|
|
46
|
+
return this.env().get("CALL_AI_REFRESH_TOKEN");
|
|
47
|
+
}
|
|
48
|
+
get CALLAI_REKEY_ENDPOINT() {
|
|
49
|
+
return this.env().get("CALLAI_REKEY_ENDPOINT");
|
|
50
|
+
}
|
|
51
|
+
get CALL_AI_KEY_TOKEN() {
|
|
52
|
+
return this.env().get("CALL_AI_KEY_TOKEN");
|
|
53
|
+
}
|
|
54
|
+
get CALLAI_REFRESH_TOKEN() {
|
|
55
|
+
return this.env().get("CALLAI_REFRESH_TOKEN");
|
|
56
|
+
}
|
|
57
|
+
get CALLAI_DEBUG() {
|
|
58
|
+
return !!this.env().get("CALLAI_DEBUG");
|
|
59
|
+
}
|
|
60
|
+
get NODE_ENV() {
|
|
61
|
+
return this.env().get("NODE_ENV");
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
export const callAiEnv = new CallAIEnv();
|
|
65
|
+
//# sourceMappingURL=env.js.map
|
package/env.js.map
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"env.js","sourceRoot":"","sources":["../jsr/env.ts"],"names":[],"mappings":"AAAA,OAAO,EAAO,UAAU,EAAE,IAAI,EAAE,MAAM,iBAAiB,CAAC;AAExD,MAAM,SAAS;IACb,GAAG,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QACf,OAAO,UAAU,CAAC,EAAE,MAAM,EAAE,QAAQ,EAAE,CAAC,CAAC;IAAA,CACzC,CAAC,CAAC;IAEH,KAAK,CAAC,IAAS,EAAE;QACf,MAAM,KAAK,GAAG,IAAI,CAAC,GAAG,EAAE,CAAC;QACzB,KAAK,CAAC,IAAI,EAAE,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC;YAC1B,MAAM,CAAC,GAAG,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC;YACvB,IAAI,CAAC,CAAC,EAAE,CAAC;gBACP,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC,CAAC,CAAC;YAC5B,CAAC;QAAA,CACF,CAAC,CAAC;QACH,IAAI,CAAC,GAAG,GAAG,GAAG,EAAE,CAAC,IAAI,CAAC;QACtB,OAAO,IAAI,CAAC;IAAA,CACb;IAEQ,GAAG,GAAG;QACb,IAAI,uBAAuB,GAAG;YAE5B,OAAO,SAAS,CAAC,uBAAuB,IAAI,yBAAyB,CAAC;QAAA,CACvE;QACD,IAAI,eAAe,GAAG;YACpB,OAAO,SAAS,CAAC,eAAe,IAAI,2BAA2B,CAAC;QAAA,CACjE;KACF,CAAC;IAEF,IAAI,cAAc,GAAG;QACnB,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,gBAAgB,CAAC,CAAC;IAAA,CACzC;IAED,IAAI,eAAe,GAAG;QACpB,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,iBAAiB,CAAC,CAAC;IAAA,CAC1C;IAED,uBAAuB,GAAG;QACxB,MAAM,CAAC,GAAG,UAAU,CAAC,MAA2C,CAAC;QACjE,OAAO,CAAC,CAAC,MAAM,EAAE,OAAO,CAAC;IAAA,CAC1B;IAED,IAAI,cAAc,GAAG;QACnB,MAAM,CAAC,GACL,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,gBAAgB,CAAC;YAChC,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,oBAAoB,CAAC;YACpC,IAAI,CAAC,uBAAuB,EAAE;YAC9B,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,gCAAgC,CAAC,CAAC;QAInD,OAAO,CAAC,CAAC;IAAA,CACV;IACD,IAAI,uBAAuB,GAAG;QAC5B,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,yBAAyB,CAAC,CAAC;IAAA,CAClD;IACD,IAAI,qBAAqB,GAAG;QAC1B,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,uBAAuB,CAAC,CAAC;IAAA,CAChD;IAED,IAAI,qBAAqB,GAAG;QAC1B,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,uBAAuB,CAAC,CAAC;IAAA,CAChD;IACD,IAAI,iBAAiB,GAAG;QACtB,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,mBAAmB,CAAC,CAAC;IAAA,CAC5C;IACD,IAAI,oBAAoB,GAAG;QACzB,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,sBAAsB,CAAC,CAAC;IAAA,CAC/C;IACD,IAAI,YAAY,GAAG;QACjB,OAAO,CAAC,CAAC,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,cAAc,CAAC,CAAC;IAAA,CACzC;IAED,IAAI,QAAQ,GAAG;QACb,OAAO,IAAI,CAAC,GAAG,EAAE,CAAC,GAAG,CAAC,UAAU,CAAC,CAAC;IAAA,CACnC;CACF;AAED,MAAM,CAAC,MAAM,SAAS,GAAG,IAAI,SAAS,EAAE,CAAC"}
|