call-ai 0.8.3 → 0.8.5-dev-preview

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,483 @@
1
+ "use strict";
2
+ /**
3
+ * Streaming response handling for call-ai
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.createStreamingGenerator = createStreamingGenerator;
7
+ exports.callAIStreaming = callAIStreaming;
8
+ const key_management_1 = require("./key-management");
9
+ const response_metadata_1 = require("./response-metadata");
10
+ const error_handling_1 = require("./error-handling");
11
+ const non_streaming_1 = require("./non-streaming");
12
+ // Generator factory function for streaming API calls
13
+ // This is called after the fetch is made and response is validated
14
+ //
15
+ // Note: Even though we checked response.ok before creating this generator,
16
+ // we need to be prepared for errors that may occur during streaming. Some APIs
17
+ // return a 200 OK initially but then deliver error information in the stream.
18
+ async function* createStreamingGenerator(response, options, schemaStrategy, model) {
19
+ // Create a metadata object for this streaming response
20
+ const meta = {
21
+ model,
22
+ endpoint: options.endpoint || "https://openrouter.ai/api/v1",
23
+ timing: {
24
+ startTime: Date.now(),
25
+ endTime: 0,
26
+ duration: 0,
27
+ },
28
+ };
29
+ // Tool calls assembly (for Claude/Anthropic)
30
+ let toolCallsAssembled = "";
31
+ let completeText = "";
32
+ let chunkCount = 0;
33
+ if (options.debug || key_management_1.globalDebug) {
34
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Starting streaming generator with model: ${model}`);
35
+ }
36
+ try {
37
+ // Handle streaming response
38
+ const reader = response.body?.getReader();
39
+ if (!reader) {
40
+ throw new Error("Response body is undefined - API endpoint may not support streaming");
41
+ }
42
+ const textDecoder = new TextDecoder();
43
+ let buffer = ""; // Buffer to accumulate partial SSE messages
44
+ while (true) {
45
+ const { done, value } = await reader.read();
46
+ if (done) {
47
+ if (options.debug || key_management_1.globalDebug) {
48
+ console.log(`[callAI-streaming:complete v${non_streaming_1.PACKAGE_VERSION}] Stream finished after ${chunkCount} chunks`);
49
+ }
50
+ break;
51
+ }
52
+ // Convert bytes to text
53
+ const chunk = textDecoder.decode(value, { stream: true });
54
+ buffer += chunk;
55
+ // Split on double newlines to find complete SSE messages
56
+ let messages = buffer.split(/\n\n/);
57
+ buffer = messages.pop() || ""; // Keep the last incomplete chunk in the buffer
58
+ for (const message of messages) {
59
+ if (!message.trim() || !message.startsWith("data: ")) {
60
+ continue; // Skip empty lines or non-data messages
61
+ }
62
+ // Extract the JSON payload
63
+ let jsonStr = message.slice(6); // Remove 'data: ' prefix
64
+ if (jsonStr === "[DONE]") {
65
+ if (options.debug || key_management_1.globalDebug) {
66
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Received [DONE] signal`);
67
+ }
68
+ continue;
69
+ }
70
+ chunkCount++;
71
+ // Try to parse the JSON
72
+ try {
73
+ const json = JSON.parse(jsonStr);
74
+ // Check for error responses in the stream
75
+ if (json.error ||
76
+ json.type === "error" ||
77
+ (json.choices &&
78
+ json.choices.length > 0 &&
79
+ json.choices[0].finish_reason === "error")) {
80
+ // Extract error message
81
+ const errorMessage = json.error?.message ||
82
+ json.error ||
83
+ json.choices?.[0]?.message?.content ||
84
+ "Unknown streaming error";
85
+ if (options.debug || key_management_1.globalDebug) {
86
+ console.error(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Detected error in streaming response:`, json);
87
+ }
88
+ // Create a detailed error to throw
89
+ const detailedError = new Error(`API streaming error: ${errorMessage}`);
90
+ // Add error metadata
91
+ detailedError.status = json.error?.status || 400;
92
+ detailedError.statusText =
93
+ json.error?.type || "Bad Request";
94
+ detailedError.details = JSON.stringify(json.error || json);
95
+ console.error(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Throwing stream error:`, detailedError);
96
+ throw detailedError;
97
+ }
98
+ // Handle tool use response - Claude with schema cases
99
+ const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode";
100
+ if (isClaudeWithSchema) {
101
+ // Claude streaming tool calls - need to assemble arguments
102
+ if (json.choices && json.choices.length > 0) {
103
+ const choice = json.choices[0];
104
+ // Handle finish reason tool_calls - this is where we know the tool call is complete
105
+ if (choice.finish_reason === "tool_calls") {
106
+ if (options.debug) {
107
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Received tool_calls finish reason. Assembled JSON:`, toolCallsAssembled);
108
+ }
109
+ // Full JSON collected, construct a proper object with it
110
+ try {
111
+ // Try to fix any malformed JSON that might have resulted from chunking
112
+ // This happens when property names get split across chunks
113
+ if (toolCallsAssembled) {
114
+ try {
115
+ // First try parsing as-is
116
+ JSON.parse(toolCallsAssembled);
117
+ }
118
+ catch (parseError) {
119
+ if (options.debug) {
120
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Attempting to fix malformed JSON in tool call:`, toolCallsAssembled);
121
+ }
122
+ // Apply comprehensive fixes for Claude's JSON property splitting
123
+ let fixedJson = toolCallsAssembled;
124
+ // 1. Remove trailing commas
125
+ fixedJson = fixedJson.replace(/,\s*([\}\]])/, "$1");
126
+ // 2. Ensure proper JSON structure
127
+ // Add closing braces if missing
128
+ const openBraces = (fixedJson.match(/\{/g) || []).length;
129
+ const closeBraces = (fixedJson.match(/\}/g) || []).length;
130
+ if (openBraces > closeBraces) {
131
+ fixedJson += "}".repeat(openBraces - closeBraces);
132
+ }
133
+ // Add opening brace if missing
134
+ if (!fixedJson.trim().startsWith("{")) {
135
+ fixedJson = "{" + fixedJson.trim();
136
+ }
137
+ // Ensure it ends with a closing brace
138
+ if (!fixedJson.trim().endsWith("}")) {
139
+ fixedJson += "}";
140
+ }
141
+ // 3. Fix various property name/value split issues
142
+ // Fix dangling property names without values
143
+ fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*$/g, '"$1":null');
144
+ // Fix missing property values
145
+ fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*,/g, '"$1":null,');
146
+ // Fix incomplete property names (when split across chunks)
147
+ fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*"(\w+)$/g, '"$1$2"');
148
+ // Balance brackets
149
+ const openBrackets = (fixedJson.match(/\[/g) || [])
150
+ .length;
151
+ const closeBrackets = (fixedJson.match(/\]/g) || [])
152
+ .length;
153
+ if (openBrackets > closeBrackets) {
154
+ fixedJson += "]".repeat(openBrackets - closeBrackets);
155
+ }
156
+ if (options.debug) {
157
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Applied comprehensive JSON fixes:`, `\nBefore: ${toolCallsAssembled}`, `\nAfter: ${fixedJson}`);
158
+ }
159
+ toolCallsAssembled = fixedJson;
160
+ }
161
+ }
162
+ // Return the assembled tool call
163
+ completeText = toolCallsAssembled;
164
+ yield completeText;
165
+ continue;
166
+ }
167
+ catch (e) {
168
+ console.error("[callAIStreaming] Error handling assembled tool call:", e);
169
+ }
170
+ }
171
+ // Assemble tool_calls arguments from delta
172
+ // Simply accumulate the raw strings without trying to parse them
173
+ if (choice && choice.delta && choice.delta.tool_calls) {
174
+ const toolCall = choice.delta.tool_calls[0];
175
+ if (toolCall &&
176
+ toolCall.function &&
177
+ toolCall.function.arguments !== undefined) {
178
+ toolCallsAssembled += toolCall.function.arguments;
179
+ if (options.debug) {
180
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Accumulated tool call chunk:`, toolCall.function.arguments);
181
+ }
182
+ }
183
+ }
184
+ }
185
+ }
186
+ // Handle tool use response - old format
187
+ if (isClaudeWithSchema &&
188
+ (json.stop_reason === "tool_use" || json.type === "tool_use")) {
189
+ // First try direct tool use object format
190
+ if (json.type === "tool_use") {
191
+ completeText = schemaStrategy.processResponse(json);
192
+ yield completeText;
193
+ continue;
194
+ }
195
+ // Extract the tool use content
196
+ if (json.content && Array.isArray(json.content)) {
197
+ const toolUseBlock = json.content.find((block) => block.type === "tool_use");
198
+ if (toolUseBlock) {
199
+ completeText = schemaStrategy.processResponse(toolUseBlock);
200
+ yield completeText;
201
+ continue;
202
+ }
203
+ }
204
+ // Find tool_use in assistant's content blocks
205
+ if (json.choices && Array.isArray(json.choices)) {
206
+ const choice = json.choices[0];
207
+ if (choice.message && Array.isArray(choice.message.content)) {
208
+ const toolUseBlock = choice.message.content.find((block) => block.type === "tool_use");
209
+ if (toolUseBlock) {
210
+ completeText = schemaStrategy.processResponse(toolUseBlock);
211
+ yield completeText;
212
+ continue;
213
+ }
214
+ }
215
+ // Handle case where the tool use is in the delta
216
+ if (choice.delta && Array.isArray(choice.delta.content)) {
217
+ const toolUseBlock = choice.delta.content.find((block) => block.type === "tool_use");
218
+ if (toolUseBlock) {
219
+ completeText = schemaStrategy.processResponse(toolUseBlock);
220
+ yield completeText;
221
+ continue;
222
+ }
223
+ }
224
+ }
225
+ }
226
+ // Extract content from the delta
227
+ if (json.choices?.[0]?.delta?.content !== undefined) {
228
+ const content = json.choices[0].delta.content || "";
229
+ // Treat all models the same - yield as content arrives
230
+ completeText += content;
231
+ yield schemaStrategy.processResponse(completeText);
232
+ }
233
+ // Handle message content format (non-streaming deltas)
234
+ else if (json.choices?.[0]?.message?.content !== undefined) {
235
+ const content = json.choices[0].message.content || "";
236
+ completeText += content;
237
+ yield schemaStrategy.processResponse(completeText);
238
+ }
239
+ // Handle content blocks for Claude/Anthropic response format
240
+ else if (json.choices?.[0]?.message?.content &&
241
+ Array.isArray(json.choices[0].message.content)) {
242
+ const contentBlocks = json.choices[0].message.content;
243
+ // Find text or tool_use blocks
244
+ for (const block of contentBlocks) {
245
+ if (block.type === "text") {
246
+ completeText += block.text || "";
247
+ }
248
+ else if (isClaudeWithSchema && block.type === "tool_use") {
249
+ completeText = schemaStrategy.processResponse(block);
250
+ break; // We found what we need
251
+ }
252
+ }
253
+ yield schemaStrategy.processResponse(completeText);
254
+ }
255
+ // Find text delta for content blocks (Claude format)
256
+ if (json.type === "content_block_delta" &&
257
+ json.delta &&
258
+ json.delta.type === "text_delta" &&
259
+ json.delta.text) {
260
+ if (options.debug) {
261
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Received text delta:`, json.delta.text);
262
+ }
263
+ completeText += json.delta.text;
264
+ // In some models like Claude, don't yield partial results as they can be malformed JSON
265
+ // Only yield what we've seen so far if it's not a Claude model with schema
266
+ if (!isClaudeWithSchema) {
267
+ yield schemaStrategy.processResponse(completeText);
268
+ }
269
+ }
270
+ }
271
+ catch (e) {
272
+ if (options.debug) {
273
+ console.error(`[callAIStreaming] Error parsing JSON chunk:`, e);
274
+ }
275
+ }
276
+ }
277
+ }
278
+ // We no longer need special error handling here as errors are thrown immediately
279
+ // No extra error handling needed here - errors are thrown immediately
280
+ // If we have assembled tool calls but haven't yielded them yet
281
+ if (toolCallsAssembled && (!completeText || completeText.length === 0)) {
282
+ // Try to fix any remaining JSON issues before returning
283
+ let result = toolCallsAssembled;
284
+ try {
285
+ // Try to parse as-is first
286
+ JSON.parse(result);
287
+ }
288
+ catch (e) {
289
+ if (options.debug) {
290
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Final JSON validation failed:`, e, `\nAttempting to fix JSON:`, result);
291
+ }
292
+ // Apply more robust fixes for Claude's streaming JSON issues
293
+ // 1. Remove trailing commas (common in malformed JSON)
294
+ result = result.replace(/,\s*([\}\]])/, "$1");
295
+ // 2. Ensure we have proper JSON structure
296
+ // Add closing braces if missing
297
+ const openBraces = (result.match(/\{/g) || []).length;
298
+ const closeBraces = (result.match(/\}/g) || []).length;
299
+ if (openBraces > closeBraces) {
300
+ result += "}".repeat(openBraces - closeBraces);
301
+ }
302
+ // Add opening brace if missing
303
+ if (!result.trim().startsWith("{")) {
304
+ result = "{" + result.trim();
305
+ }
306
+ // Ensure it ends with a closing brace
307
+ if (!result.trim().endsWith("}")) {
308
+ result += "}";
309
+ }
310
+ // Fix dangling property names without values
311
+ result = result.replace(/"(\w+)"\s*:\s*$/g, '"$1":null');
312
+ // Fix missing property values
313
+ result = result.replace(/"(\w+)"\s*:\s*,/g, '"$1":null,');
314
+ // Balance brackets
315
+ const openBrackets = (result.match(/\[/g) || []).length;
316
+ const closeBrackets = (result.match(/\]/g) || []).length;
317
+ if (openBrackets > closeBrackets) {
318
+ result += "]".repeat(openBrackets - closeBrackets);
319
+ }
320
+ if (options.debug) {
321
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Applied final JSON fixes:`, result);
322
+ }
323
+ }
324
+ // Return the assembled tool call
325
+ completeText = result;
326
+ // Try one more time to validate
327
+ try {
328
+ JSON.parse(completeText);
329
+ }
330
+ catch (finalParseError) {
331
+ if (options.debug) {
332
+ console.error(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Final JSON validation still failed:`, finalParseError);
333
+ }
334
+ }
335
+ yield completeText;
336
+ }
337
+ // Record streaming completion in metadata
338
+ const endTime = Date.now();
339
+ meta.timing.endTime = endTime;
340
+ meta.timing.duration = endTime - meta.timing.startTime;
341
+ // Add the rawResponse field to match non-streaming behavior
342
+ // For streaming, we use the final complete text as the raw response
343
+ meta.rawResponse = completeText;
344
+ // Store metadata for this response
345
+ const boxed = (0, response_metadata_1.boxString)(completeText);
346
+ response_metadata_1.responseMetadata.set(boxed, meta);
347
+ // Return the complete text as the final value
348
+ return completeText;
349
+ }
350
+ catch (error) {
351
+ // Streaming generators must properly handle errors
352
+ if (options.debug || key_management_1.globalDebug) {
353
+ console.error(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Streaming error:`, error);
354
+ }
355
+ // This error will be caught in the caller's try/catch block
356
+ throw error;
357
+ }
358
+ }
359
+ // Simplified generator for accessing streaming results
360
+ // Returns an async generator that yields blocks of text
361
+ // This is a higher-level function that prepares the request
362
+ // and handles model fallback
363
+ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
364
+ // Convert simple string prompts to message array format
365
+ const messages = Array.isArray(prompt)
366
+ ? prompt
367
+ : [{ role: "user", content: prompt }];
368
+ // API key should be provided by options (validation happens in callAI)
369
+ const apiKey = options.apiKey;
370
+ const model = options.model || "openai/gpt-3.5-turbo";
371
+ // Default endpoint compatible with OpenAI API
372
+ const endpoint = options.endpoint || "https://openrouter.ai/api/v1";
373
+ // Build the endpoint URL
374
+ const url = `${endpoint}/chat/completions`;
375
+ // Choose a schema strategy based on model
376
+ const schemaStrategy = options.schemaStrategy;
377
+ // Default to JSON response for certain models
378
+ const responseFormat = options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model)
379
+ ? "json"
380
+ : undefined;
381
+ const debug = options.debug === undefined ? key_management_1.globalDebug : options.debug;
382
+ if (debug) {
383
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Making streaming request to: ${url}`);
384
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] With model: ${model}`);
385
+ }
386
+ // Build request body
387
+ const requestBody = {
388
+ model,
389
+ messages,
390
+ max_tokens: options.maxTokens || 2048,
391
+ temperature: options.temperature !== undefined ? options.temperature : 0.7,
392
+ top_p: options.topP !== undefined ? options.topP : 1,
393
+ stream: true,
394
+ };
395
+ // Add response_format if specified or for JSON handling
396
+ if (responseFormat === "json") {
397
+ requestBody.response_format = { type: "json_object" };
398
+ }
399
+ // Add schema-specific parameters (if schema is provided)
400
+ if (options.schema) {
401
+ Object.assign(requestBody, schemaStrategy.prepareRequest(options.schema, messages));
402
+ }
403
+ // Add HTTP referer and other options to help with abuse prevention
404
+ const headers = {
405
+ Authorization: `Bearer ${apiKey}`,
406
+ "HTTP-Referer": options.referer || "https://vibes.diy",
407
+ "X-Title": options.title || "Vibes",
408
+ "Content-Type": "application/json",
409
+ };
410
+ // Add any additional headers
411
+ if (options.headers) {
412
+ Object.assign(headers, options.headers);
413
+ }
414
+ // Copy any other options not explicitly handled above
415
+ Object.keys(options).forEach((key) => {
416
+ if (![
417
+ "apiKey",
418
+ "model",
419
+ "endpoint",
420
+ "stream",
421
+ "schema",
422
+ "maxTokens",
423
+ "temperature",
424
+ "topP",
425
+ "responseFormat",
426
+ "referer",
427
+ "title",
428
+ "headers",
429
+ "skipRefresh",
430
+ "debug",
431
+ ].includes(key)) {
432
+ requestBody[key] = options[key];
433
+ }
434
+ });
435
+ if (debug) {
436
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Request headers:`, headers);
437
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Request body:`, requestBody);
438
+ }
439
+ let response;
440
+ try {
441
+ // Make the API request
442
+ response = await fetch(url, {
443
+ method: "POST",
444
+ headers,
445
+ body: JSON.stringify(requestBody),
446
+ });
447
+ // Handle HTTP errors
448
+ if (!response.ok) {
449
+ // Check if this is an invalid model error that we can handle with a fallback
450
+ const { isInvalidModel, errorData } = await (0, error_handling_1.checkForInvalidModelError)(response, model, debug);
451
+ if (isInvalidModel && !isRetry && !options.skipRetry) {
452
+ if (debug) {
453
+ console.log(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Invalid model "${model}", falling back to "${non_streaming_1.FALLBACK_MODEL}"`);
454
+ }
455
+ // Retry with the fallback model using yield* to delegate to the other generator
456
+ yield* callAIStreaming(prompt, {
457
+ ...options,
458
+ model: non_streaming_1.FALLBACK_MODEL,
459
+ }, true);
460
+ // Generator delegation handles returning the final value
461
+ return "";
462
+ }
463
+ // For other errors, throw with details
464
+ const errorText = errorData
465
+ ? JSON.stringify(errorData)
466
+ : `HTTP error! Status: ${response.status}`;
467
+ throw new Error(errorText);
468
+ }
469
+ // Yield streaming results through the generator
470
+ yield* createStreamingGenerator(response, options, schemaStrategy, model);
471
+ // The createStreamingGenerator will return the final assembled string
472
+ return ""; // This is never reached due to yield*
473
+ }
474
+ catch (fetchError) {
475
+ // Network errors must be directly re-thrown without modification
476
+ // This is exactly how the original implementation handles it
477
+ if (debug) {
478
+ console.error(`[callAI:${non_streaming_1.PACKAGE_VERSION}] Network error during fetch:`, fetchError);
479
+ }
480
+ // Critical: throw the exact same error object without any wrapping
481
+ throw fetchError;
482
+ }
483
+ }
package/dist/types.d.ts CHANGED
@@ -18,6 +18,29 @@ export type Message = {
18
18
  role: "user" | "system" | "assistant";
19
19
  content: string | ContentItem[];
20
20
  };
21
+ /**
22
+ * Metadata associated with a response
23
+ * Available through the getMeta() helper function
24
+ */
25
+ export interface ResponseMeta {
26
+ /**
27
+ * The model used for the response
28
+ */
29
+ model: string;
30
+ /**
31
+ * Timing information about the request
32
+ */
33
+ timing?: {
34
+ startTime: number;
35
+ endTime?: number;
36
+ duration?: number;
37
+ };
38
+ /**
39
+ * Raw response data from the fetch call
40
+ * Contains the parsed JSON result from the API call
41
+ */
42
+ rawResponse?: any;
43
+ }
21
44
  export interface Schema {
22
45
  /**
23
46
  * Optional schema name - will be sent to OpenRouter if provided
@@ -87,10 +110,27 @@ export interface CallAIOptions {
87
110
  * API endpoint to send the request to
88
111
  */
89
112
  endpoint?: string;
113
+ /**
114
+ * Custom origin for chat API
115
+ * Can also be set via window.CALLAI_CHAT_URL or process.env.CALLAI_CHAT_URL
116
+ */
117
+ chatUrl?: string;
90
118
  /**
91
119
  * Whether to stream the response
92
120
  */
93
121
  stream?: boolean;
122
+ /**
123
+ * Authentication token for key refresh service
124
+ * Can also be set via window.CALL_AI_REFRESH_TOKEN, process.env.CALL_AI_REFRESH_TOKEN, or default to "use-vibes"
125
+ */
126
+ refreshToken?: string;
127
+ /**
128
+ * Callback function to update refresh token when current token fails
129
+ * Gets called with the current failing token and should return a new token
130
+ * @param currentToken The current refresh token that failed
131
+ * @returns A Promise that resolves to a new refresh token
132
+ */
133
+ updateRefreshToken?: (currentToken: string) => Promise<string>;
94
134
  /**
95
135
  * Schema for structured output
96
136
  */
@@ -105,6 +145,11 @@ export interface CallAIOptions {
105
145
  * Useful in testing and cases where retries should be suppressed
106
146
  */
107
147
  skipRetry?: boolean;
148
+ /**
149
+ * Skip key refresh on 4xx errors
150
+ * Useful for testing error conditions or when you want to handle refresh manually
151
+ */
152
+ skipRefresh?: boolean;
108
153
  /**
109
154
  * Enable raw response logging without any filtering or processing
110
155
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "call-ai",
3
- "version": "0.8.3",
3
+ "version": "0.8.5-dev-preview",
4
4
  "description": "Lightweight library for making AI API calls with streaming support",
5
5
  "main": "dist/index.js",
6
6
  "browser": "dist/index.js",
@@ -16,17 +16,6 @@
16
16
  "bugs": {
17
17
  "url": "https://github.com/fireproof-storage/call-ai/issues"
18
18
  },
19
- "scripts": {
20
- "build": "tsc",
21
- "test": "jest --testPathIgnorePatterns=\".*\\.integration\\.(no-await\\.)?test\\.ts$\"",
22
- "test:integration": "jest --testMatch=\"**/*\\.integration\\.test\\.ts\"",
23
- "test:all": "jest",
24
- "prepublishOnly": "npm run build",
25
- "typecheck": "tsc --noEmit",
26
- "format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"",
27
- "coverage": "jest --coverage",
28
- "check": "npm run typecheck && npm run format && npm run test"
29
- },
30
19
  "keywords": [
31
20
  "ai",
32
21
  "llm",
@@ -51,5 +40,15 @@
51
40
  },
52
41
  "engines": {
53
42
  "node": ">=14.0.0"
43
+ },
44
+ "scripts": {
45
+ "build": "tsc",
46
+ "test": "jest --testMatch=\"**/*unit*.test.ts\"",
47
+ "test:integration": "jest simple.integration",
48
+ "test:all": "pnpm test && pnpm test:integration",
49
+ "typecheck": "tsc --noEmit",
50
+ "format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"",
51
+ "coverage": "jest --coverage",
52
+ "check": "npm run typecheck && npm run format && npm run test"
54
53
  }
55
- }
54
+ }