call-ai 0.0.0-dev-prompts

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/LICENSE.md +232 -0
  2. package/README.md +264 -0
  3. package/api-core.d.ts +13 -0
  4. package/api-core.js +238 -0
  5. package/api-core.js.map +1 -0
  6. package/api.d.ts +4 -0
  7. package/api.js +365 -0
  8. package/api.js.map +1 -0
  9. package/api.ts.off +595 -0
  10. package/env.d.ts +22 -0
  11. package/env.js +65 -0
  12. package/env.js.map +1 -0
  13. package/error-handling.d.ts +14 -0
  14. package/error-handling.js +144 -0
  15. package/error-handling.js.map +1 -0
  16. package/image.d.ts +2 -0
  17. package/image.js +72 -0
  18. package/image.js.map +1 -0
  19. package/index.d.ts +7 -0
  20. package/index.js +8 -0
  21. package/index.js.map +1 -0
  22. package/index.ts.bak +16 -0
  23. package/key-management.d.ts +29 -0
  24. package/key-management.js +190 -0
  25. package/key-management.js.map +1 -0
  26. package/non-streaming.d.ts +7 -0
  27. package/non-streaming.js +206 -0
  28. package/non-streaming.js.map +1 -0
  29. package/package.json +43 -0
  30. package/response-metadata.d.ts +6 -0
  31. package/response-metadata.js +22 -0
  32. package/response-metadata.js.map +1 -0
  33. package/strategies/index.d.ts +2 -0
  34. package/strategies/index.js +3 -0
  35. package/strategies/index.js.map +1 -0
  36. package/strategies/model-strategies.d.ts +6 -0
  37. package/strategies/model-strategies.js +138 -0
  38. package/strategies/model-strategies.js.map +1 -0
  39. package/strategies/strategy-selector.d.ts +2 -0
  40. package/strategies/strategy-selector.js +66 -0
  41. package/strategies/strategy-selector.js.map +1 -0
  42. package/streaming.d.ts +4 -0
  43. package/streaming.js +365 -0
  44. package/streaming.js.map +1 -0
  45. package/streaming.ts.off +571 -0
  46. package/tsconfig.json +18 -0
  47. package/types.d.ts +228 -0
  48. package/types.js +33 -0
  49. package/types.js.map +1 -0
  50. package/utils.d.ts +8 -0
  51. package/utils.js +42 -0
  52. package/utils.js.map +1 -0
  53. package/version.d.ts +1 -0
  54. package/version.js +2 -0
  55. package/version.js.map +1 -0
@@ -0,0 +1,571 @@
1
+ /**
2
+ * Streaming response handling for call-ai
3
+ */
4
+
5
+ import { CallAIError, CallAIOptions, Message, ResponseMeta, SchemaAIMessageRequest, SchemaStrategy, ToolUseType } from "./types.js";
6
+ import { globalDebug } from "./key-management.js";
7
+ import { responseMetadata, boxString } from "./response-metadata.js";
8
+ import { checkForInvalidModelError } from "./error-handling.js";
9
+ import { PACKAGE_VERSION, FALLBACK_MODEL } from "./non-streaming.js";
10
+ import { callAiFetch } from "./utils.js";
11
+
12
+ // Generator factory function for streaming API calls
13
+ // This is called after the fetch is made and response is validated
14
+ //
15
+ // Note: Even though we checked response.ok before creating this generator,
16
+ // we need to be prepared for errors that may occur during streaming. Some APIs
17
+ // return a 200 OK initially but then deliver error information in the stream.
18
+ async function* createStreamingGenerator(
19
+ response: Response,
20
+ options: CallAIOptions,
21
+ schemaStrategy: SchemaStrategy,
22
+ model: string,
23
+ ): AsyncGenerator<string, string, unknown> {
24
+ // Create a metadata object for this streaming response
25
+ const meta: ResponseMeta = {
26
+ model,
27
+ endpoint: options.endpoint || "https://openrouter.ai/api/v1",
28
+ timing: {
29
+ startTime: Date.now(),
30
+ endTime: 0,
31
+ duration: 0,
32
+ },
33
+ };
34
+
35
+ // Tool calls assembly (for Claude/Anthropic)
36
+ let toolCallsAssembled = "";
37
+ let completeText = "";
38
+ let chunkCount = 0;
39
+
40
+ if (options.debug || globalDebug) {
41
+ console.log(`[callAi:${PACKAGE_VERSION}] Starting streaming generator with model: ${model}`);
42
+ }
43
+
44
+ try {
45
+ // Handle streaming response
46
+ const reader = response.body?.getReader();
47
+ if (!reader) {
48
+ throw new Error("Response body is undefined - API endpoint may not support streaming");
49
+ }
50
+
51
+ const textDecoder = new TextDecoder();
52
+ let buffer = ""; // Buffer to accumulate partial SSE messages
53
+
54
+ while (true) {
55
+ const { done, value } = await reader.read();
56
+ if (done) {
57
+ if (options.debug || globalDebug) {
58
+ console.log(`[callAi-streaming:complete v${PACKAGE_VERSION}] Stream finished after ${chunkCount} chunks`);
59
+ }
60
+ break;
61
+ }
62
+
63
+ // Convert bytes to text
64
+ const chunk = textDecoder.decode(value, { stream: true });
65
+ buffer += chunk;
66
+
67
+ // Split on double newlines to find complete SSE messages
68
+ const messages = buffer.split(/\n\n/);
69
+ buffer = messages.pop() || ""; // Keep the last incomplete chunk in the buffer
70
+
71
+ for (const message of messages) {
72
+ if (!message.trim() || !message.startsWith("data: ")) {
73
+ continue; // Skip empty lines or non-data messages
74
+ }
75
+
76
+ // Extract the JSON payload
77
+ const jsonStr = message.slice("data: ".length); // Remove 'data: ' prefix
78
+ if (jsonStr === "[DONE]") {
79
+ if (options.debug || globalDebug) {
80
+ console.log(`[callAi:${PACKAGE_VERSION}] Received [DONE] signal`);
81
+ }
82
+ continue;
83
+ }
84
+
85
+ chunkCount++;
86
+
87
+ // Try to parse the JSON
88
+ try {
89
+ console.log(`[callAi:${PACKAGE_VERSION}] Raw chunk #${chunkCount}:`, jsonStr);
90
+ const json = JSON.parse(jsonStr);
91
+
92
+ // Check for error responses in the stream
93
+ if (
94
+ json.error ||
95
+ json.type === "error" ||
96
+ (json.choices && json.choices.length > 0 && json.choices[0].finish_reason === "error")
97
+ ) {
98
+ // Extract error message
99
+ const errorMessage =
100
+ json.error?.message || json.error || json.choices?.[0]?.message?.content || "Unknown streaming error";
101
+
102
+ if (options.debug || globalDebug) {
103
+ console.error(`[callAi:${PACKAGE_VERSION}] Detected error in streaming response:`, json);
104
+ }
105
+
106
+ // Create a detailed error to throw
107
+ const detailedError = new CallAIError({
108
+ message: `API streaming error: ${errorMessage}`,
109
+ status: json.error?.status || 400,
110
+ statusText: json.error?.type || "Bad Request",
111
+ details: JSON.stringify(json.error || json),
112
+ contentType: "application/json",
113
+ });
114
+ console.error(`[callAi:${PACKAGE_VERSION}] Throwing stream error:`, detailedError);
115
+ throw detailedError;
116
+ }
117
+
118
+ // Handle tool use response - Claude with schema cases
119
+ const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode";
120
+
121
+ if (isClaudeWithSchema) {
122
+ // Claude streaming tool calls - need to assemble arguments
123
+ if (json.choices && json.choices.length > 0) {
124
+ const choice = json.choices[0];
125
+
126
+ // Handle finish reason tool_calls - this is where we know the tool call is complete
127
+ if (choice.finish_reason === "tool_calls") {
128
+ if (options.debug) {
129
+ console.log(`[callAi:${PACKAGE_VERSION}] Received tool_calls finish reason. Assembled JSON:`, toolCallsAssembled);
130
+ }
131
+
132
+ // Full JSON collected, construct a proper object with it
133
+ try {
134
+ // Try to fix any malformed JSON that might have resulted from chunking
135
+ // This happens when property names get split across chunks
136
+ if (toolCallsAssembled) {
137
+ try {
138
+ // First try parsing as-is
139
+ JSON.parse(toolCallsAssembled);
140
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
141
+ } catch (e) {
142
+ if (options.debug) {
143
+ console.log(
144
+ `[callAi:${PACKAGE_VERSION}] Attempting to fix malformed JSON in tool call:`,
145
+ toolCallsAssembled,
146
+ );
147
+ }
148
+
149
+ // Apply comprehensive fixes for Claude's JSON property splitting
150
+ let fixedJson = toolCallsAssembled;
151
+
152
+ // 1. Remove trailing commas
153
+ fixedJson = fixedJson.replace(/,\s*([}\]])/, "$1");
154
+
155
+ // 2. Ensure proper JSON structure
156
+ // Add closing braces if missing
157
+ const openBraces = (fixedJson.match(/\{/g) || []).length;
158
+ const closeBraces = (fixedJson.match(/\}/g) || []).length;
159
+ if (openBraces > closeBraces) {
160
+ fixedJson += "}".repeat(openBraces - closeBraces);
161
+ }
162
+
163
+ // Add opening brace if missing
164
+ if (!fixedJson.trim().startsWith("{")) {
165
+ fixedJson = "{" + fixedJson.trim();
166
+ }
167
+
168
+ // Ensure it ends with a closing brace
169
+ if (!fixedJson.trim().endsWith("}")) {
170
+ fixedJson += "}";
171
+ }
172
+
173
+ // 3. Fix various property name/value split issues
174
+ // Fix dangling property names without values
175
+ fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*$/g, '"$1":null');
176
+
177
+ // Fix missing property values
178
+ fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*,/g, '"$1":null,');
179
+
180
+ // Fix incomplete property names (when split across chunks)
181
+ fixedJson = fixedJson.replace(/"(\w+)"\s*:\s*"(\w+)$/g, '"$1$2"');
182
+
183
+ // Balance brackets
184
+ const openBrackets = (fixedJson.match(/\[/g) || []).length;
185
+ const closeBrackets = (fixedJson.match(/\]/g) || []).length;
186
+ if (openBrackets > closeBrackets) {
187
+ fixedJson += "]".repeat(openBrackets - closeBrackets);
188
+ }
189
+
190
+ if (options.debug) {
191
+ console.log(
192
+ `[callAi:${PACKAGE_VERSION}] Applied comprehensive JSON fixes:`,
193
+ `\nBefore: ${toolCallsAssembled}`,
194
+ `\nAfter: ${fixedJson}`,
195
+ );
196
+ }
197
+
198
+ toolCallsAssembled = fixedJson;
199
+ }
200
+ }
201
+
202
+ // Return the assembled tool call
203
+ completeText = toolCallsAssembled;
204
+ yield completeText;
205
+ continue;
206
+ } catch (e) {
207
+ console.error("[callAIStreaming] Error handling assembled tool call:", e);
208
+ }
209
+ }
210
+
211
+ // Assemble tool_calls arguments from delta
212
+ // Simply accumulate the raw strings without trying to parse them
213
+ if (choice && choice.delta && choice.delta.tool_calls) {
214
+ const toolCall = choice.delta.tool_calls[0];
215
+ if (toolCall && toolCall.function && toolCall.function.arguments !== undefined) {
216
+ toolCallsAssembled += toolCall.function.arguments;
217
+ if (options.debug) {
218
+ console.log(`[callAi:${PACKAGE_VERSION}] Accumulated tool call chunk:`, toolCall.function.arguments);
219
+ }
220
+ }
221
+ }
222
+ }
223
+ }
224
+
225
+ // Handle tool use response - old format
226
+ if (isClaudeWithSchema && (json.stop_reason === "tool_use" || json.type === "tool_use")) {
227
+ // First try direct tool use object format
228
+ if (json.type === "tool_use") {
229
+ completeText = schemaStrategy.processResponse(json);
230
+ yield completeText;
231
+ continue;
232
+ }
233
+
234
+ // Extract the tool use content
235
+ if (json.content && Array.isArray(json.content)) {
236
+ const toolUseBlock = json.content.find((block: ToolUseType) => block.type === "tool_use");
237
+ if (toolUseBlock) {
238
+ completeText = schemaStrategy.processResponse(toolUseBlock);
239
+ yield completeText;
240
+ continue;
241
+ }
242
+ }
243
+
244
+ // Find tool_use in assistant's content blocks
245
+ if (json.choices && Array.isArray(json.choices)) {
246
+ const choice = json.choices[0];
247
+ if (choice.message && Array.isArray(choice.message.content)) {
248
+ const toolUseBlock = choice.message.content.find((block: ToolUseType) => block.type === "tool_use");
249
+ if (toolUseBlock) {
250
+ completeText = schemaStrategy.processResponse(toolUseBlock);
251
+ yield completeText;
252
+ continue;
253
+ }
254
+ }
255
+
256
+ // Handle case where the tool use is in the delta
257
+ if (choice.delta && Array.isArray(choice.delta.content)) {
258
+ const toolUseBlock = choice.delta.content.find((block: ToolUseType) => block.type === "tool_use");
259
+ if (toolUseBlock) {
260
+ completeText = schemaStrategy.processResponse(toolUseBlock);
261
+ yield completeText;
262
+ continue;
263
+ }
264
+ }
265
+ }
266
+ }
267
+
268
+ // Extract content from the delta
269
+ if (json.choices?.[0]?.delta?.content !== undefined) {
270
+ const content = json.choices[0].delta.content || "";
271
+
272
+ // Treat all models the same - yield as content arrives
273
+ completeText += content;
274
+ yield schemaStrategy.processResponse(completeText);
275
+ }
276
+ // Handle message content format (non-streaming deltas)
277
+ else if (json.choices?.[0]?.message?.content !== undefined) {
278
+ const content = json.choices[0].message.content || "";
279
+ completeText += content;
280
+ yield schemaStrategy.processResponse(completeText);
281
+ }
282
+ // Handle content blocks for Claude/Anthropic response format
283
+ else if (json.choices?.[0]?.message?.content && Array.isArray(json.choices[0].message.content)) {
284
+ const contentBlocks = json.choices[0].message.content;
285
+ // Find text or tool_use blocks
286
+ for (const block of contentBlocks) {
287
+ if (block.type === "text") {
288
+ completeText += block.text || "";
289
+ } else if (isClaudeWithSchema && block.type === "tool_use") {
290
+ completeText = schemaStrategy.processResponse(block);
291
+ break; // We found what we need
292
+ }
293
+ }
294
+
295
+ yield schemaStrategy.processResponse(completeText);
296
+ }
297
+
298
+ // Find text delta for content blocks (Claude format)
299
+ if (json.type === "content_block_delta" && json.delta && json.delta.type === "text_delta" && json.delta.text) {
300
+ if (options.debug) {
301
+ console.log(`[callAi:${PACKAGE_VERSION}] Received text delta:`, json.delta.text);
302
+ }
303
+ completeText += json.delta.text;
304
+ // In some models like Claude, don't yield partial results as they can be malformed JSON
305
+ // Only yield what we've seen so far if it's not a Claude model with schema
306
+ if (!isClaudeWithSchema) {
307
+ yield schemaStrategy.processResponse(completeText);
308
+ }
309
+ }
310
+ } catch (e) {
311
+ if (options.debug) {
312
+ console.error(`[callAIStreaming] Error parsing JSON chunk:`, e);
313
+ }
314
+ }
315
+ }
316
+ }
317
+
318
+ // We no longer need special error handling here as errors are thrown immediately
319
+
320
+ // No extra error handling needed here - errors are thrown immediately
321
+
322
+ // If we have assembled tool calls but haven't yielded them yet
323
+ if (toolCallsAssembled && (!completeText || completeText.length === 0)) {
324
+ // Try to fix any remaining JSON issues before returning
325
+ let result = toolCallsAssembled;
326
+
327
+ try {
328
+ // Try to parse as-is first
329
+ JSON.parse(result);
330
+ } catch (e) {
331
+ if (options.debug) {
332
+ console.log(`[callAi:${PACKAGE_VERSION}] Final JSON validation failed:`, e, `\nAttempting to fix JSON:`, result);
333
+ }
334
+
335
+ // Apply more robust fixes for Claude's streaming JSON issues
336
+
337
+ // 1. Remove trailing commas (common in malformed JSON)
338
+ result = result.replace(/,\s*([}\]])/, "$1");
339
+
340
+ // 2. Ensure we have proper JSON structure
341
+ // Add closing braces if missing
342
+ const openBraces = (result.match(/\{/g) || []).length;
343
+ const closeBraces = (result.match(/\}/g) || []).length;
344
+ if (openBraces > closeBraces) {
345
+ result += "}".repeat(openBraces - closeBraces);
346
+ }
347
+
348
+ // Add opening brace if missing
349
+ if (!result.trim().startsWith("{")) {
350
+ result = "{" + result.trim();
351
+ }
352
+
353
+ // Ensure it ends with a closing brace
354
+ if (!result.trim().endsWith("}")) {
355
+ result += "}";
356
+ }
357
+
358
+ // Fix dangling property names without values
359
+ result = result.replace(/"(\w+)"\s*:\s*$/g, '"$1":null');
360
+
361
+ // Fix missing property values
362
+ result = result.replace(/"(\w+)"\s*:\s*,/g, '"$1":null,');
363
+
364
+ // Balance brackets
365
+ const openBrackets = (result.match(/\[/g) || []).length;
366
+ const closeBrackets = (result.match(/\]/g) || []).length;
367
+ if (openBrackets > closeBrackets) {
368
+ result += "]".repeat(openBrackets - closeBrackets);
369
+ }
370
+
371
+ if (options.debug) {
372
+ console.log(`[callAi:${PACKAGE_VERSION}] Applied final JSON fixes:`, result);
373
+ }
374
+ }
375
+
376
+ // Return the assembled tool call
377
+ completeText = result;
378
+
379
+ // Try one more time to validate
380
+ try {
381
+ JSON.parse(completeText);
382
+ } catch (finalParseError) {
383
+ if (options.debug) {
384
+ console.error(`[callAi:${PACKAGE_VERSION}] Final JSON validation still failed:`, finalParseError);
385
+ }
386
+ }
387
+
388
+ yield completeText;
389
+ }
390
+
391
+ // Record streaming completion in metadata
392
+ const endTime = Date.now();
393
+ meta.timing.endTime = endTime;
394
+ meta.timing.duration = endTime - meta.timing.startTime;
395
+
396
+ // Add the rawResponse field to match non-streaming behavior
397
+ // For streaming, we use the final complete text as the raw response
398
+ meta.rawResponse = completeText;
399
+
400
+ // Store metadata for this response
401
+ const boxed = boxString(completeText);
402
+ responseMetadata.set(boxed, meta);
403
+
404
+ // Return the complete text as the final value
405
+ return completeText;
406
+ } catch (error) {
407
+ // Streaming generators must properly handle errors
408
+ if (options.debug || globalDebug) {
409
+ console.error(`[callAi:${PACKAGE_VERSION}] Streaming error:`, error);
410
+ }
411
+
412
+ // This error will be caught in the caller's try/catch block
413
+ throw error;
414
+ }
415
+ }
416
+
417
+ // Simplified generator for accessing streaming results
418
+ // Returns an async generator that yields blocks of text
419
+ // This is a higher-level function that prepares the request
420
+ // and handles model fallback
421
+ async function* callAIStreaming(
422
+ prompt: string | Message[],
423
+ options: CallAIOptions = {},
424
+ isRetry = false,
425
+ ): AsyncGenerator<string, string, unknown> {
426
+ // Convert simple string prompts to message array format
427
+ const messages = Array.isArray(prompt) ? prompt : [{ role: "user", content: prompt } satisfies Message];
428
+
429
+ // API key should be provided by options (validation happens in callAi)
430
+ const apiKey = options.apiKey;
431
+ const model = options.model || "openai/gpt-3.5-turbo";
432
+
433
+ // Default endpoint compatible with OpenAI API
434
+ const endpoint = options.endpoint || "https://openrouter.ai/api/v1";
435
+
436
+ // Build the endpoint URL
437
+ const url = `${endpoint}/chat/completions`;
438
+
439
+ // Choose a schema strategy based on model
440
+ const schemaStrategy = options.schemaStrategy;
441
+ if (!schemaStrategy) {
442
+ throw new Error("Schema strategy is required for streaming");
443
+ }
444
+
445
+ // Default to JSON response for certain models
446
+ const responseFormat = options.responseFormat || /gpt-4/.test(model) || /gpt-3.5/.test(model) ? "json" : undefined;
447
+
448
+ const debug = options.debug === undefined ? globalDebug : options.debug;
449
+
450
+ if (debug) {
451
+ console.log(`[callAi:${PACKAGE_VERSION}] Making streaming request to: ${url}`);
452
+ console.log(`[callAi:${PACKAGE_VERSION}] With model: ${model}`);
453
+ }
454
+
455
+ // Build request body
456
+ const requestBody: SchemaAIMessageRequest = {
457
+ model,
458
+ messages,
459
+ max_tokens: options.maxTokens || 2048,
460
+ temperature: options.temperature !== undefined ? options.temperature : 0.7,
461
+ top_p: options.topP ? options.topP : 1,
462
+ stream: true,
463
+ };
464
+
465
+ // Add response_format if specified or for JSON handling
466
+ if (responseFormat === "json") {
467
+ requestBody.response_format = { type: "json_object" };
468
+ }
469
+
470
+ // Add schema-specific parameters (if schema is provided)
471
+ if (options.schema) {
472
+ Object.assign(requestBody, schemaStrategy?.prepareRequest(options.schema, messages));
473
+ }
474
+
475
+ // Add HTTP referer and other options to help with abuse prevention
476
+ const headers: Record<string, string> = {
477
+ Authorization: `Bearer ${apiKey}`,
478
+ "HTTP-Referer": options.referer || "https://vibes.diy",
479
+ "X-Title": options.title || "Vibes",
480
+ "Content-Type": "application/json",
481
+ };
482
+
483
+ // Add any additional headers
484
+ if (options.headers) {
485
+ Object.assign(headers, options.headers);
486
+ }
487
+
488
+ // Copy any other options not explicitly handled above
489
+ Object.keys(options).forEach((key) => {
490
+ if (
491
+ ![
492
+ "apiKey",
493
+ "model",
494
+ "endpoint",
495
+ "stream",
496
+ "schema",
497
+ "maxTokens",
498
+ "temperature",
499
+ "topP",
500
+ "responseFormat",
501
+ "referer",
502
+ "title",
503
+ "headers",
504
+ "skipRefresh",
505
+ "debug",
506
+ ].includes(key)
507
+ ) {
508
+ requestBody[key] = options[key];
509
+ }
510
+ });
511
+
512
+ if (debug) {
513
+ console.log(`[callAi:${PACKAGE_VERSION}] Request headers:`, headers);
514
+ console.log(`[callAi:${PACKAGE_VERSION}] Request body:`, requestBody);
515
+ }
516
+
517
+ let response;
518
+ try {
519
+ // Make the API request
520
+ response = await callAiFetch(options)(url, {
521
+ method: "POST",
522
+ headers,
523
+ body: JSON.stringify(requestBody),
524
+ });
525
+
526
+ // Handle HTTP errors
527
+ if (!response.ok) {
528
+ // Check if this is an invalid model error that we can handle with a fallback
529
+ const { isInvalidModel, errorData } = await checkForInvalidModelError(response, model, debug);
530
+
531
+ if (isInvalidModel && !isRetry && !options.skipRetry) {
532
+ if (debug) {
533
+ console.log(`[callAi:${PACKAGE_VERSION}] Invalid model "${model}", falling back to "${FALLBACK_MODEL}"`);
534
+ }
535
+
536
+ // Retry with the fallback model using yield* to delegate to the other generator
537
+ yield* callAIStreaming(
538
+ prompt,
539
+ {
540
+ ...options,
541
+ model: FALLBACK_MODEL,
542
+ },
543
+ true, // Mark as retry to prevent infinite fallback loops
544
+ );
545
+
546
+ // Generator delegation handles returning the final value
547
+ return "";
548
+ }
549
+
550
+ // For other errors, throw with details
551
+ const errorText = errorData ? JSON.stringify(errorData) : `HTTP error! Status: ${response.status}`;
552
+ throw new Error(errorText);
553
+ }
554
+
555
+ // Yield streaming results through the generator
556
+ yield* createStreamingGenerator(response, options, schemaStrategy, model);
557
+
558
+ // The createStreamingGenerator will return the final assembled string
559
+ return ""; // This is never reached due to yield*
560
+ } catch (fetchError) {
561
+ // Network errors must be directly re-thrown without modification
562
+ // This is exactly how the original implementation handles it
563
+ if (debug) {
564
+ console.error(`[callAi:${PACKAGE_VERSION}] Network error during fetch:`, fetchError);
565
+ }
566
+ // Critical: throw the exact same error object without any wrapping
567
+ throw fetchError;
568
+ }
569
+ }
570
+
571
+ export { createStreamingGenerator, callAIStreaming };
package/tsconfig.json ADDED
@@ -0,0 +1,18 @@
1
+ {
2
+ "extends": [
3
+ "/home/runner/work/vibes.diy/vibes.diy/tsconfig.dist.json"
4
+ ],
5
+ "compilerOptions": {
6
+ "outDir": "../npm/",
7
+ "noEmit": false
8
+ },
9
+ "include": [
10
+ "**/*"
11
+ ],
12
+ "exclude": [
13
+ "node_modules",
14
+ "dist",
15
+ ".git",
16
+ ".vscode"
17
+ ]
18
+ }