call-ai 0.5.0 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api.d.ts +12 -0
- package/dist/api.js +379 -0
- package/dist/index.d.ts +4 -52
- package/dist/index.js +23 -651
- package/dist/strategies/index.d.ts +5 -0
- package/dist/strategies/index.js +21 -0
- package/dist/strategies/model-strategies.d.ts +24 -0
- package/dist/strategies/model-strategies.js +184 -0
- package/dist/strategies/strategy-selector.d.ts +8 -0
- package/dist/strategies/strategy-selector.js +79 -0
- package/dist/types.d.ts +106 -0
- package/dist/types.js +5 -0
- package/dist/utils.d.ts +8 -0
- package/dist/utils.js +52 -0
- package/package.json +13 -9
package/dist/api.d.ts
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core API implementation for call-ai
|
|
3
|
+
*/
|
|
4
|
+
import { CallAIOptions, Message } from "./types";
|
|
5
|
+
/**
|
|
6
|
+
* Make an AI API call with the given options
|
|
7
|
+
* @param prompt User prompt as string or an array of message objects
|
|
8
|
+
* @param options Configuration options including optional schema for structured output
|
|
9
|
+
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
10
|
+
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
11
|
+
*/
|
|
12
|
+
export declare function callAI(prompt: string | Message[], options?: CallAIOptions): Promise<string> | AsyncGenerator<string, string, unknown>;
|
package/dist/api.js
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.callAI = callAI;
|
|
4
|
+
const strategies_1 = require("./strategies");
|
|
5
|
+
/**
|
|
6
|
+
* Make an AI API call with the given options
|
|
7
|
+
* @param prompt User prompt as string or an array of message objects
|
|
8
|
+
* @param options Configuration options including optional schema for structured output
|
|
9
|
+
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
10
|
+
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
11
|
+
*/
|
|
12
|
+
function callAI(prompt, options = {}) {
|
|
13
|
+
// Check if we need to force streaming based on model strategy
|
|
14
|
+
const schemaStrategy = (0, strategies_1.chooseSchemaStrategy)(options.model, options.schema || null);
|
|
15
|
+
// Handle special case: Claude with tools requires streaming
|
|
16
|
+
if (!options.stream && schemaStrategy.shouldForceStream) {
|
|
17
|
+
// Buffer streaming results into a single response
|
|
18
|
+
return bufferStreamingResults(prompt, options);
|
|
19
|
+
}
|
|
20
|
+
// Handle normal non-streaming mode
|
|
21
|
+
if (options.stream !== true) {
|
|
22
|
+
return callAINonStreaming(prompt, options);
|
|
23
|
+
}
|
|
24
|
+
// Handle streaming mode
|
|
25
|
+
return callAIStreaming(prompt, options);
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Buffer streaming results into a single response for cases where
|
|
29
|
+
* we need to use streaming internally but the caller requested non-streaming
|
|
30
|
+
*/
|
|
31
|
+
async function bufferStreamingResults(prompt, options) {
|
|
32
|
+
// Create a copy of options with streaming enabled
|
|
33
|
+
const streamingOptions = {
|
|
34
|
+
...options,
|
|
35
|
+
stream: true,
|
|
36
|
+
};
|
|
37
|
+
try {
|
|
38
|
+
// Get streaming generator
|
|
39
|
+
const generator = callAIStreaming(prompt, streamingOptions);
|
|
40
|
+
// Buffer all chunks
|
|
41
|
+
let finalResult = "";
|
|
42
|
+
let chunkCount = 0;
|
|
43
|
+
for await (const chunk of generator) {
|
|
44
|
+
finalResult = chunk; // Each chunk contains the full accumulated text
|
|
45
|
+
chunkCount++;
|
|
46
|
+
}
|
|
47
|
+
return finalResult;
|
|
48
|
+
}
|
|
49
|
+
catch (error) {
|
|
50
|
+
return handleApiError(error, "Streaming buffer error");
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Standardized API error handler
|
|
55
|
+
*/
|
|
56
|
+
function handleApiError(error, context) {
|
|
57
|
+
console.error(`[callAI:${context}]:`, error);
|
|
58
|
+
return JSON.stringify({
|
|
59
|
+
error: String(error),
|
|
60
|
+
message: `Sorry, I couldn't process that request: ${String(error)}`,
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Prepare request parameters common to both streaming and non-streaming calls
|
|
65
|
+
*/
|
|
66
|
+
function prepareRequestParams(prompt, options) {
|
|
67
|
+
const apiKey = options.apiKey ||
|
|
68
|
+
(typeof window !== "undefined" ? window.CALLAI_API_KEY : null);
|
|
69
|
+
const schema = options.schema || null;
|
|
70
|
+
if (!apiKey) {
|
|
71
|
+
throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY");
|
|
72
|
+
}
|
|
73
|
+
// Select the appropriate strategy based on model and schema
|
|
74
|
+
const schemaStrategy = (0, strategies_1.chooseSchemaStrategy)(options.model, schema);
|
|
75
|
+
const model = schemaStrategy.model;
|
|
76
|
+
const endpoint = options.endpoint || "https://openrouter.ai/api/v1/chat/completions";
|
|
77
|
+
// Handle both string prompts and message arrays for backward compatibility
|
|
78
|
+
const messages = Array.isArray(prompt)
|
|
79
|
+
? prompt
|
|
80
|
+
: [{ role: "user", content: prompt }];
|
|
81
|
+
// Build request parameters
|
|
82
|
+
const requestParams = {
|
|
83
|
+
model: model,
|
|
84
|
+
stream: options.stream === true,
|
|
85
|
+
messages: messages,
|
|
86
|
+
};
|
|
87
|
+
// Support for multimodal content (like images)
|
|
88
|
+
if (options.modalities && options.modalities.length > 0) {
|
|
89
|
+
requestParams.modalities = options.modalities;
|
|
90
|
+
}
|
|
91
|
+
// Apply the strategy's request preparation
|
|
92
|
+
const strategyParams = schemaStrategy.prepareRequest(schema, messages);
|
|
93
|
+
// If the strategy returns custom messages, use those instead
|
|
94
|
+
if (strategyParams.messages) {
|
|
95
|
+
requestParams.messages = strategyParams.messages;
|
|
96
|
+
}
|
|
97
|
+
// Add all other strategy parameters
|
|
98
|
+
Object.entries(strategyParams).forEach(([key, value]) => {
|
|
99
|
+
if (key !== "messages") {
|
|
100
|
+
requestParams[key] = value;
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
// Add any other options provided, but exclude internal keys
|
|
104
|
+
Object.entries(options).forEach(([key, value]) => {
|
|
105
|
+
if (!["apiKey", "model", "endpoint", "stream", "schema"].includes(key)) {
|
|
106
|
+
requestParams[key] = value;
|
|
107
|
+
}
|
|
108
|
+
});
|
|
109
|
+
const requestOptions = {
|
|
110
|
+
method: "POST",
|
|
111
|
+
headers: {
|
|
112
|
+
Authorization: `Bearer ${apiKey}`,
|
|
113
|
+
"Content-Type": "application/json",
|
|
114
|
+
},
|
|
115
|
+
body: JSON.stringify(requestParams),
|
|
116
|
+
};
|
|
117
|
+
return { apiKey, model, endpoint, requestOptions, schemaStrategy };
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Internal implementation for non-streaming API calls
|
|
121
|
+
*/
|
|
122
|
+
async function callAINonStreaming(prompt, options = {}) {
|
|
123
|
+
try {
|
|
124
|
+
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, options);
|
|
125
|
+
const response = await fetch(endpoint, requestOptions);
|
|
126
|
+
let result;
|
|
127
|
+
// For Claude, use text() instead of json() to avoid potential hanging
|
|
128
|
+
if (/claude/i.test(model)) {
|
|
129
|
+
try {
|
|
130
|
+
result = await extractClaudeResponse(response);
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
return handleApiError(error, "Claude API response processing failed");
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
result = await response.json();
|
|
138
|
+
}
|
|
139
|
+
// Handle error responses
|
|
140
|
+
if (result.error) {
|
|
141
|
+
console.error("API returned an error:", result.error);
|
|
142
|
+
return JSON.stringify({
|
|
143
|
+
error: result.error,
|
|
144
|
+
message: result.error.message || "API returned an error",
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
// Extract content from the response
|
|
148
|
+
const content = extractContent(result, schemaStrategy);
|
|
149
|
+
// Process the content based on model type
|
|
150
|
+
return schemaStrategy.processResponse(content);
|
|
151
|
+
}
|
|
152
|
+
catch (error) {
|
|
153
|
+
return handleApiError(error, "Non-streaming API call");
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Extract content from API response accounting for different formats
|
|
158
|
+
*/
|
|
159
|
+
function extractContent(result, schemaStrategy) {
|
|
160
|
+
// Find tool use content or normal content
|
|
161
|
+
let content;
|
|
162
|
+
// Extract tool use content if necessary
|
|
163
|
+
if (schemaStrategy.strategy === "tool_mode" &&
|
|
164
|
+
result.stop_reason === "tool_use") {
|
|
165
|
+
// Try to find tool_use block in different response formats
|
|
166
|
+
if (result.content && Array.isArray(result.content)) {
|
|
167
|
+
const toolUseBlock = result.content.find((block) => block.type === "tool_use");
|
|
168
|
+
if (toolUseBlock) {
|
|
169
|
+
content = toolUseBlock;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
if (!content && result.choices && Array.isArray(result.choices)) {
|
|
173
|
+
const choice = result.choices[0];
|
|
174
|
+
if (choice.message && Array.isArray(choice.message.content)) {
|
|
175
|
+
const toolUseBlock = choice.message.content.find((block) => block.type === "tool_use");
|
|
176
|
+
if (toolUseBlock) {
|
|
177
|
+
content = toolUseBlock;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
// If no tool use content was found, use the standard message content
|
|
183
|
+
if (!content) {
|
|
184
|
+
if (!result.choices || !result.choices.length) {
|
|
185
|
+
throw new Error("Invalid response format from API");
|
|
186
|
+
}
|
|
187
|
+
content = result.choices[0]?.message?.content || "";
|
|
188
|
+
}
|
|
189
|
+
return content;
|
|
190
|
+
}
|
|
191
|
+
/**
|
|
192
|
+
* Extract response from Claude API with timeout handling
|
|
193
|
+
*/
|
|
194
|
+
async function extractClaudeResponse(response) {
|
|
195
|
+
let textResponse;
|
|
196
|
+
const textPromise = response.text();
|
|
197
|
+
const timeoutPromise = new Promise((_resolve, reject) => {
|
|
198
|
+
setTimeout(() => {
|
|
199
|
+
reject(new Error("Text extraction timed out after 5 seconds"));
|
|
200
|
+
}, 5000);
|
|
201
|
+
});
|
|
202
|
+
try {
|
|
203
|
+
textResponse = (await Promise.race([
|
|
204
|
+
textPromise,
|
|
205
|
+
timeoutPromise,
|
|
206
|
+
]));
|
|
207
|
+
}
|
|
208
|
+
catch (textError) {
|
|
209
|
+
console.error(`Text extraction timed out or failed:`, textError);
|
|
210
|
+
throw new Error("Claude response text extraction timed out. This is likely an issue with the Claude API's response format.");
|
|
211
|
+
}
|
|
212
|
+
try {
|
|
213
|
+
return JSON.parse(textResponse);
|
|
214
|
+
}
|
|
215
|
+
catch (err) {
|
|
216
|
+
console.error(`Failed to parse Claude response as JSON:`, err);
|
|
217
|
+
throw new Error(`Failed to parse Claude response as JSON: ${err}`);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Internal implementation for streaming API calls
|
|
222
|
+
*/
|
|
223
|
+
async function* callAIStreaming(prompt, options = {}) {
|
|
224
|
+
try {
|
|
225
|
+
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
|
|
226
|
+
const response = await fetch(endpoint, requestOptions);
|
|
227
|
+
if (!response.ok) {
|
|
228
|
+
const errorText = await response.text();
|
|
229
|
+
console.error(`API Error: ${response.status} ${response.statusText}`, errorText);
|
|
230
|
+
throw new Error(`API returned error ${response.status}: ${response.statusText}`);
|
|
231
|
+
}
|
|
232
|
+
// Handle streaming response
|
|
233
|
+
if (!response.body) {
|
|
234
|
+
throw new Error("Response body is undefined - API endpoint may not support streaming");
|
|
235
|
+
}
|
|
236
|
+
const reader = response.body.getReader();
|
|
237
|
+
const decoder = new TextDecoder();
|
|
238
|
+
let completeText = "";
|
|
239
|
+
let chunkCount = 0;
|
|
240
|
+
let toolCallsAssembled = "";
|
|
241
|
+
while (true) {
|
|
242
|
+
const { done, value } = await reader.read();
|
|
243
|
+
if (done) {
|
|
244
|
+
break;
|
|
245
|
+
}
|
|
246
|
+
const chunk = decoder.decode(value);
|
|
247
|
+
const lines = chunk.split("\n").filter((line) => line.trim() !== "");
|
|
248
|
+
for (const line of lines) {
|
|
249
|
+
if (line.startsWith("data: ")) {
|
|
250
|
+
// Skip [DONE] marker or OPENROUTER PROCESSING lines
|
|
251
|
+
if (line.includes("[DONE]") ||
|
|
252
|
+
line.includes("OPENROUTER PROCESSING")) {
|
|
253
|
+
continue;
|
|
254
|
+
}
|
|
255
|
+
try {
|
|
256
|
+
const jsonLine = line.replace("data: ", "");
|
|
257
|
+
if (!jsonLine.trim()) {
|
|
258
|
+
continue;
|
|
259
|
+
}
|
|
260
|
+
chunkCount++;
|
|
261
|
+
// Parse the JSON chunk
|
|
262
|
+
const json = JSON.parse(jsonLine);
|
|
263
|
+
// Handle tool use response - Claude with schema cases
|
|
264
|
+
const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode";
|
|
265
|
+
if (isClaudeWithSchema) {
|
|
266
|
+
// Claude streaming tool calls - need to assemble arguments
|
|
267
|
+
if (json.choices && json.choices.length > 0) {
|
|
268
|
+
const choice = json.choices[0];
|
|
269
|
+
// Handle finish reason tool_calls
|
|
270
|
+
if (choice.finish_reason === "tool_calls") {
|
|
271
|
+
try {
|
|
272
|
+
// Parse the assembled JSON
|
|
273
|
+
completeText = toolCallsAssembled;
|
|
274
|
+
yield completeText;
|
|
275
|
+
continue;
|
|
276
|
+
}
|
|
277
|
+
catch (e) {
|
|
278
|
+
console.error("[callAIStreaming] Error parsing assembled tool call:", e);
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
// Assemble tool_calls arguments from delta
|
|
282
|
+
if (choice.delta && choice.delta.tool_calls) {
|
|
283
|
+
const toolCall = choice.delta.tool_calls[0];
|
|
284
|
+
if (toolCall &&
|
|
285
|
+
toolCall.function &&
|
|
286
|
+
toolCall.function.arguments !== undefined) {
|
|
287
|
+
toolCallsAssembled += toolCall.function.arguments;
|
|
288
|
+
// We don't yield here to avoid partial JSON
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
// Handle tool use response - old format
|
|
294
|
+
if (isClaudeWithSchema &&
|
|
295
|
+
(json.stop_reason === "tool_use" || json.type === "tool_use")) {
|
|
296
|
+
// First try direct tool use object format
|
|
297
|
+
if (json.type === "tool_use") {
|
|
298
|
+
completeText = schemaStrategy.processResponse(json);
|
|
299
|
+
yield completeText;
|
|
300
|
+
continue;
|
|
301
|
+
}
|
|
302
|
+
// Extract the tool use content
|
|
303
|
+
if (json.content && Array.isArray(json.content)) {
|
|
304
|
+
const toolUseBlock = json.content.find((block) => block.type === "tool_use");
|
|
305
|
+
if (toolUseBlock) {
|
|
306
|
+
completeText = schemaStrategy.processResponse(toolUseBlock);
|
|
307
|
+
yield completeText;
|
|
308
|
+
continue;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
// Find tool_use in assistant's content blocks
|
|
312
|
+
if (json.choices && Array.isArray(json.choices)) {
|
|
313
|
+
const choice = json.choices[0];
|
|
314
|
+
if (choice.message && Array.isArray(choice.message.content)) {
|
|
315
|
+
const toolUseBlock = choice.message.content.find((block) => block.type === "tool_use");
|
|
316
|
+
if (toolUseBlock) {
|
|
317
|
+
completeText = schemaStrategy.processResponse(toolUseBlock);
|
|
318
|
+
yield completeText;
|
|
319
|
+
continue;
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
// Handle case where the tool use is in the delta
|
|
323
|
+
if (choice.delta && Array.isArray(choice.delta.content)) {
|
|
324
|
+
const toolUseBlock = choice.delta.content.find((block) => block.type === "tool_use");
|
|
325
|
+
if (toolUseBlock) {
|
|
326
|
+
completeText = schemaStrategy.processResponse(toolUseBlock);
|
|
327
|
+
yield completeText;
|
|
328
|
+
continue;
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
// Extract content from the delta
|
|
334
|
+
if (json.choices?.[0]?.delta?.content !== undefined) {
|
|
335
|
+
const content = json.choices[0].delta.content || "";
|
|
336
|
+
// Treat all models the same - yield as content arrives
|
|
337
|
+
completeText += content;
|
|
338
|
+
yield schemaStrategy.processResponse(completeText);
|
|
339
|
+
}
|
|
340
|
+
// Handle message content format (non-streaming deltas)
|
|
341
|
+
else if (json.choices?.[0]?.message?.content !== undefined) {
|
|
342
|
+
const content = json.choices[0].message.content || "";
|
|
343
|
+
completeText += content;
|
|
344
|
+
yield schemaStrategy.processResponse(completeText);
|
|
345
|
+
}
|
|
346
|
+
// Handle content blocks for Claude/Anthropic response format
|
|
347
|
+
else if (json.choices?.[0]?.message?.content &&
|
|
348
|
+
Array.isArray(json.choices[0].message.content)) {
|
|
349
|
+
const contentBlocks = json.choices[0].message.content;
|
|
350
|
+
// Find text or tool_use blocks
|
|
351
|
+
for (const block of contentBlocks) {
|
|
352
|
+
if (block.type === "text") {
|
|
353
|
+
completeText += block.text || "";
|
|
354
|
+
}
|
|
355
|
+
else if (isClaudeWithSchema && block.type === "tool_use") {
|
|
356
|
+
completeText = schemaStrategy.processResponse(block);
|
|
357
|
+
break; // We found what we need
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
yield schemaStrategy.processResponse(completeText);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
catch (e) {
|
|
364
|
+
console.error(`[callAIStreaming] Error parsing JSON chunk:`, e);
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
// If we have assembled tool calls but haven't yielded them yet
|
|
370
|
+
if (toolCallsAssembled && (!completeText || completeText.length === 0)) {
|
|
371
|
+
return toolCallsAssembled;
|
|
372
|
+
}
|
|
373
|
+
// Ensure the final return has proper, processed content
|
|
374
|
+
return schemaStrategy.processResponse(completeText);
|
|
375
|
+
}
|
|
376
|
+
catch (error) {
|
|
377
|
+
return handleApiError(error, "Streaming API call");
|
|
378
|
+
}
|
|
379
|
+
}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,55 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* call-ai: A lightweight library for making AI API calls
|
|
3
3
|
*/
|
|
4
|
-
export
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
export interface Schema {
|
|
9
|
-
/**
|
|
10
|
-
* Optional schema name - will be sent to OpenRouter if provided
|
|
11
|
-
* If not specified, defaults to "result"
|
|
12
|
-
*/
|
|
13
|
-
name?: string;
|
|
14
|
-
/**
|
|
15
|
-
* Properties defining the structure of your schema
|
|
16
|
-
*/
|
|
17
|
-
properties: Record<string, any>;
|
|
18
|
-
/**
|
|
19
|
-
* Fields that are required in the response (defaults to all properties)
|
|
20
|
-
*/
|
|
21
|
-
required?: string[];
|
|
22
|
-
/**
|
|
23
|
-
* Whether to allow fields not defined in properties (defaults to false)
|
|
24
|
-
*/
|
|
25
|
-
additionalProperties?: boolean;
|
|
26
|
-
/**
|
|
27
|
-
* Any additional schema properties to pass through
|
|
28
|
-
*/
|
|
29
|
-
[key: string]: any;
|
|
30
|
-
}
|
|
31
|
-
export interface CallAIOptions {
|
|
32
|
-
apiKey?: string;
|
|
33
|
-
model?: string;
|
|
34
|
-
endpoint?: string;
|
|
35
|
-
stream?: boolean;
|
|
36
|
-
schema?: Schema | null;
|
|
37
|
-
[key: string]: any;
|
|
38
|
-
}
|
|
39
|
-
export interface AIResponse {
|
|
40
|
-
text: string;
|
|
41
|
-
usage?: {
|
|
42
|
-
promptTokens: number;
|
|
43
|
-
completionTokens: number;
|
|
44
|
-
totalTokens: number;
|
|
45
|
-
};
|
|
46
|
-
model: string;
|
|
47
|
-
}
|
|
48
|
-
/**
|
|
49
|
-
* Make an AI API call with the given options
|
|
50
|
-
* @param prompt User prompt as string or an array of message objects
|
|
51
|
-
* @param options Configuration options including optional schema for structured output
|
|
52
|
-
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
53
|
-
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
54
|
-
*/
|
|
55
|
-
export declare function callAI(prompt: string | Message[], options?: CallAIOptions): Promise<string> | AsyncGenerator<string, string, unknown>;
|
|
4
|
+
export * from "./types";
|
|
5
|
+
export { callAI } from "./api";
|
|
6
|
+
export * from "./strategies";
|
|
7
|
+
export * from "./utils";
|