call-ai 0.8.3 → 0.8.5-dev-preview

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,265 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.FALLBACK_MODEL = exports.PACKAGE_VERSION = void 0;
4
+ exports.callAINonStreaming = callAINonStreaming;
5
+ exports.extractContent = extractContent;
6
+ exports.extractClaudeResponse = extractClaudeResponse;
7
+ const key_management_1 = require("./key-management");
8
+ const error_handling_1 = require("./error-handling");
9
+ const response_metadata_1 = require("./response-metadata");
10
+ // Import package version for debugging
11
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
12
+ const PACKAGE_VERSION = require("../package.json").version;
13
+ exports.PACKAGE_VERSION = PACKAGE_VERSION;
14
+ const FALLBACK_MODEL = "openrouter/auto";
15
+ exports.FALLBACK_MODEL = FALLBACK_MODEL;
16
+ // Internal implementation for non-streaming API calls
17
+ async function callAINonStreaming(prompt, options = {}, isRetry = false) {
18
+ // Ensure keyStore is initialized first
19
+ const { initKeyStore } = require("./key-management");
20
+ initKeyStore();
21
+ // Convert simple string prompts to message array format
22
+ const messages = Array.isArray(prompt)
23
+ ? prompt
24
+ : [{ role: "user", content: prompt }];
25
+ // API key should be provided by options (validation happens in callAI)
26
+ const apiKey = options.apiKey;
27
+ const model = options.model || "openai/gpt-3.5-turbo";
28
+ // Default endpoint compatible with OpenAI API
29
+ const endpoint = options.endpoint || "https://openrouter.ai/api/v1";
30
+ // Build the endpoint URL
31
+ const url = `${endpoint}/chat/completions`;
32
+ // Choose a schema strategy based on model
33
+ const schemaStrategy = options.schemaStrategy;
34
+ // Default to JSON response for certain models
35
+ const responseFormat = options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model)
36
+ ? "json"
37
+ : undefined;
38
+ const debug = options.debug === undefined ? key_management_1.globalDebug : options.debug;
39
+ if (debug) {
40
+ console.log(`[callAI:${PACKAGE_VERSION}] Making non-streaming request to: ${url}`);
41
+ console.log(`[callAI:${PACKAGE_VERSION}] With model: ${model}`);
42
+ }
43
+ // Build request body
44
+ const requestBody = {
45
+ model,
46
+ messages,
47
+ max_tokens: options.maxTokens || 2048,
48
+ temperature: options.temperature !== undefined ? options.temperature : 0.7,
49
+ top_p: options.topP !== undefined ? options.topP : 1,
50
+ stream: false,
51
+ };
52
+ // Add response_format if specified or for JSON handling
53
+ if (responseFormat === "json") {
54
+ requestBody.response_format = { type: "json_object" };
55
+ }
56
+ // Add schema-specific parameters (if schema is provided)
57
+ if (options.schema) {
58
+ Object.assign(requestBody, schemaStrategy.prepareRequest(options.schema, messages));
59
+ }
60
+ // Add HTTP referer and other options to help with abuse prevention
61
+ const headers = {
62
+ Authorization: `Bearer ${apiKey}`,
63
+ "HTTP-Referer": options.referer || "https://vibes.diy",
64
+ "X-Title": options.title || "Vibes",
65
+ "Content-Type": "application/json",
66
+ };
67
+ // Add any additional headers
68
+ if (options.headers) {
69
+ Object.assign(headers, options.headers);
70
+ }
71
+ // Copy any other options not explicitly handled above
72
+ Object.keys(options).forEach((key) => {
73
+ if (![
74
+ "apiKey",
75
+ "model",
76
+ "endpoint",
77
+ "stream",
78
+ "schema",
79
+ "maxTokens",
80
+ "temperature",
81
+ "topP",
82
+ "responseFormat",
83
+ "referer",
84
+ "title",
85
+ "headers",
86
+ "skipRefresh",
87
+ "debug",
88
+ ].includes(key)) {
89
+ requestBody[key] = options[key];
90
+ }
91
+ });
92
+ if (debug) {
93
+ console.log(`[callAI:${PACKAGE_VERSION}] Request headers:`, headers);
94
+ console.log(`[callAI:${PACKAGE_VERSION}] Request body:`, requestBody);
95
+ }
96
+ // Create metadata object for this response
97
+ const meta = {
98
+ model,
99
+ endpoint,
100
+ timing: {
101
+ startTime: Date.now(),
102
+ endTime: 0,
103
+ duration: 0,
104
+ },
105
+ };
106
+ try {
107
+ // Make the API request - matching original implementation structure
108
+ const response = await fetch(url, {
109
+ method: "POST",
110
+ headers,
111
+ body: JSON.stringify(requestBody),
112
+ });
113
+ // Handle HTTP errors
114
+ if (!response.ok) {
115
+ // Check if this is an invalid model error that we can handle with a fallback
116
+ const { isInvalidModel, errorData } = await (0, error_handling_1.checkForInvalidModelError)(response, model, debug);
117
+ if (isInvalidModel && !isRetry && !options.skipRetry) {
118
+ if (debug) {
119
+ console.log(`[callAI:${PACKAGE_VERSION}] Invalid model "${model}", falling back to "${FALLBACK_MODEL}"`);
120
+ }
121
+ // Retry with the fallback model
122
+ return callAINonStreaming(prompt, {
123
+ ...options,
124
+ model: FALLBACK_MODEL,
125
+ }, true);
126
+ }
127
+ // For other errors, throw with details
128
+ const errorText = errorData
129
+ ? JSON.stringify(errorData)
130
+ : `HTTP error! Status: ${response.status}`;
131
+ throw new Error(errorText);
132
+ }
133
+ // Parse response
134
+ let result;
135
+ try {
136
+ // For special cases like Claude, use a different extraction method
137
+ if (/claude/.test(model)) {
138
+ result = await extractClaudeResponse(response);
139
+ }
140
+ else {
141
+ const json = await response.json();
142
+ result = extractContent(json, schemaStrategy);
143
+ }
144
+ }
145
+ catch (parseError) {
146
+ throw new Error(`Failed to parse API response: ${parseError instanceof Error ? parseError.message : String(parseError)}`);
147
+ }
148
+ // Update metadata with completion timing
149
+ const endTime = Date.now();
150
+ meta.timing.endTime = endTime;
151
+ meta.timing.duration = endTime - meta.timing.startTime;
152
+ // Store metadata for this response
153
+ const resultString = typeof result === "string" ? result : JSON.stringify(result);
154
+ // Box the string for WeakMap storage
155
+ const boxed = (0, response_metadata_1.boxString)(resultString);
156
+ response_metadata_1.responseMetadata.set(boxed, meta);
157
+ return resultString;
158
+ }
159
+ catch (error) {
160
+ // Check if this is a network/fetch error
161
+ const isNetworkError = error instanceof Error &&
162
+ (error.message.includes("Network") || error.name === "TypeError");
163
+ if (isNetworkError) {
164
+ // Direct re-throw for network errors (original implementation pattern)
165
+ if (debug) {
166
+ console.error(`[callAI:${PACKAGE_VERSION}] Network error during fetch:`, error);
167
+ }
168
+ throw error;
169
+ }
170
+ // For other errors, use API error handling
171
+ await (0, error_handling_1.handleApiError)(error, "Non-streaming API call", options.debug, {
172
+ apiKey: apiKey || undefined,
173
+ endpoint: options.endpoint || undefined,
174
+ skipRefresh: options.skipRefresh,
175
+ });
176
+ // If handleApiError refreshed the key, we want to retry with the new key
177
+ if (key_management_1.keyStore.current && key_management_1.keyStore.current !== apiKey) {
178
+ if (debug) {
179
+ console.log(`[callAI:${PACKAGE_VERSION}] Retrying with refreshed API key`);
180
+ }
181
+ // Retry the request with the new key
182
+ return callAINonStreaming(prompt, {
183
+ ...options,
184
+ apiKey: key_management_1.keyStore.current,
185
+ }, isRetry);
186
+ }
187
+ // If we get here, handleApiError failed to recover, so we should never reach this
188
+ // But just in case, rethrow the error
189
+ throw error;
190
+ }
191
+ }
192
+ // Extract content from API response accounting for different formats
193
+ function extractContent(result, schemaStrategy) {
194
+ // Debug output has been removed for brevity
195
+ if (!result) {
196
+ return "";
197
+ }
198
+ // Handle different response formats
199
+ if (result.choices && result.choices.length > 0) {
200
+ const choice = result.choices[0];
201
+ // Handle OpenAI format - content directly in message
202
+ if (choice.message && choice.message.content) {
203
+ return schemaStrategy.processResponse(choice.message.content);
204
+ }
205
+ // Handle function call response - pass through the schemaStrategy
206
+ if (choice.message && choice.message.function_call) {
207
+ return schemaStrategy.processResponse(choice.message.function_call);
208
+ }
209
+ // Handle function/tools response (newer format)
210
+ if (choice.message && choice.message.tool_calls) {
211
+ return schemaStrategy.processResponse(choice.message.tool_calls);
212
+ }
213
+ // Handle anthropic/claude format with content blocks
214
+ if (choice.message && Array.isArray(choice.message.content)) {
215
+ let textContent = "";
216
+ let toolUse = null;
217
+ // Find text or tool_use blocks
218
+ for (const block of choice.message.content) {
219
+ if (block.type === "text") {
220
+ textContent += block.text || "";
221
+ }
222
+ else if (block.type === "tool_use") {
223
+ toolUse = block;
224
+ break; // We found what we need
225
+ }
226
+ }
227
+ // If we have a tool_use block, that takes precedence
228
+ if (toolUse) {
229
+ return schemaStrategy.processResponse(toolUse);
230
+ }
231
+ // Otherwise use the accumulated text content
232
+ return schemaStrategy.processResponse(textContent);
233
+ }
234
+ // Fallback for simple text response
235
+ if (choice.text) {
236
+ return schemaStrategy.processResponse(choice.text);
237
+ }
238
+ }
239
+ // Return raw result if we couldn't extract content
240
+ return result;
241
+ }
242
+ // Extract response from Claude API with timeout handling
243
+ async function extractClaudeResponse(response) {
244
+ try {
245
+ const timeoutPromise = new Promise((_, reject) => {
246
+ setTimeout(() => {
247
+ reject(new Error("Timeout extracting Claude response"));
248
+ }, 5000); // 5 second timeout
249
+ });
250
+ const responsePromise = response.json();
251
+ // Race between timeout and response
252
+ const json = await Promise.race([responsePromise, timeoutPromise]);
253
+ if (json.choices &&
254
+ json.choices.length > 0 &&
255
+ json.choices[0].message &&
256
+ json.choices[0].message.content) {
257
+ return json.choices[0].message.content;
258
+ }
259
+ // If content not found in expected structure, return the whole JSON
260
+ return json;
261
+ }
262
+ catch (error) {
263
+ throw new Error(`Failed to extract Claude response: ${error instanceof Error ? error.message : String(error)}`);
264
+ }
265
+ }
@@ -0,0 +1,18 @@
1
+ /**
2
+ * Response metadata handling for call-ai
3
+ */
4
+ import { ResponseMeta } from "./types";
5
+ declare const responseMetadata: WeakMap<object, ResponseMeta>;
6
+ declare const stringResponseMap: Map<string, object>;
7
+ /**
8
+ * Helper to box a string so it can be used with WeakMap
9
+ * @internal
10
+ */
11
+ declare function boxString(str: string): object;
12
+ /**
13
+ * Retrieve metadata associated with a response from callAI()
14
+ * @param response A response from callAI, either string or AsyncGenerator
15
+ * @returns The metadata object if available, undefined otherwise
16
+ */
17
+ declare function getMeta(response: string | AsyncGenerator<string, string, unknown>): ResponseMeta | undefined;
18
+ export { responseMetadata, stringResponseMap, boxString, getMeta };
@@ -0,0 +1,44 @@
1
+ "use strict";
2
+ /**
3
+ * Response metadata handling for call-ai
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.stringResponseMap = exports.responseMetadata = void 0;
7
+ exports.boxString = boxString;
8
+ exports.getMeta = getMeta;
9
+ // WeakMap to store metadata for responses without modifying the response objects
10
+ const responseMetadata = new WeakMap();
11
+ exports.responseMetadata = responseMetadata;
12
+ // Store for string responses - we need to box strings since WeakMap keys must be objects
13
+ const stringResponseMap = new Map();
14
+ exports.stringResponseMap = stringResponseMap;
15
+ /**
16
+ * Helper to box a string so it can be used with WeakMap
17
+ * @internal
18
+ */
19
+ function boxString(str) {
20
+ // Check if already boxed
21
+ if (stringResponseMap.has(str)) {
22
+ return stringResponseMap.get(str);
23
+ }
24
+ // Create a new box
25
+ const box = Object.create(null);
26
+ stringResponseMap.set(str, box);
27
+ return box;
28
+ }
29
+ /**
30
+ * Retrieve metadata associated with a response from callAI()
31
+ * @param response A response from callAI, either string or AsyncGenerator
32
+ * @returns The metadata object if available, undefined otherwise
33
+ */
34
+ function getMeta(response) {
35
+ if (typeof response === "string") {
36
+ const box = stringResponseMap.get(response);
37
+ if (box) {
38
+ return responseMetadata.get(box);
39
+ }
40
+ return undefined;
41
+ }
42
+ // For AsyncGenerator and other objects, look up directly
43
+ return responseMetadata.get(response);
44
+ }
@@ -7,7 +7,7 @@ const utils_1 = require("../utils");
7
7
  */
8
8
  exports.openAIStrategy = {
9
9
  name: "openai",
10
- prepareRequest: (schema, messages) => {
10
+ prepareRequest: (schema) => {
11
11
  if (!schema)
12
12
  return {};
13
13
  // Process schema for JSON schema approach
@@ -68,7 +68,7 @@ exports.geminiStrategy = {
68
68
  exports.claudeStrategy = {
69
69
  name: "anthropic",
70
70
  shouldForceStream: true,
71
- prepareRequest: (schema, messages) => {
71
+ prepareRequest: (schema) => {
72
72
  if (!schema)
73
73
  return {};
74
74
  // Process schema for tool use - format for OpenRouter/Claude
@@ -0,0 +1,7 @@
1
+ /**
2
+ * Streaming response handling for call-ai
3
+ */
4
+ import { CallAIOptions, SchemaStrategy } from "./types";
5
+ declare function createStreamingGenerator(response: Response, options: CallAIOptions, schemaStrategy: SchemaStrategy, model: string): AsyncGenerator<string, string, unknown>;
6
+ declare function callAIStreaming(prompt: string | any[], options?: CallAIOptions, isRetry?: boolean): AsyncGenerator<string, string, unknown>;
7
+ export { createStreamingGenerator, callAIStreaming };