call-ai 0.4.1 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +46 -7
- package/dist/api.d.ts +12 -0
- package/dist/api.js +379 -0
- package/dist/index.d.ts +4 -52
- package/dist/index.js +23 -127
- package/dist/strategies/index.d.ts +5 -0
- package/dist/strategies/index.js +21 -0
- package/dist/strategies/model-strategies.d.ts +24 -0
- package/dist/strategies/model-strategies.js +184 -0
- package/dist/strategies/strategy-selector.d.ts +8 -0
- package/dist/strategies/strategy-selector.js +79 -0
- package/dist/types.d.ts +106 -0
- package/dist/types.js +5 -0
- package/dist/utils.d.ts +8 -0
- package/dist/utils.js +52 -0
- package/package.json +14 -8
package/README.md
CHANGED
|
@@ -109,15 +109,45 @@ for await (const chunk of generator) {
|
|
|
109
109
|
|
|
110
110
|
## Supported LLM Providers
|
|
111
111
|
|
|
112
|
-
|
|
112
|
+
Call-AI supports all models available through OpenRouter, including:
|
|
113
113
|
|
|
114
|
-
-
|
|
115
|
-
-
|
|
116
|
-
-
|
|
117
|
-
-
|
|
118
|
-
-
|
|
114
|
+
- OpenAI models (GPT-4, GPT-3.5, etc.)
|
|
115
|
+
- Anthropic Claude
|
|
116
|
+
- Gemini
|
|
117
|
+
- Llama 3
|
|
118
|
+
- Mistral
|
|
119
|
+
- And many more
|
|
119
120
|
|
|
120
|
-
|
|
121
|
+
## Choosing a model
|
|
122
|
+
|
|
123
|
+
Different LLMs have different strengths when working with structured data. Based on our testing, here's a guide to help you choose the right model for your schema needs:
|
|
124
|
+
|
|
125
|
+
### Schema Complexity Guide
|
|
126
|
+
|
|
127
|
+
| Model Family | Grade | Simple Flat Schema | Complex Flat Schema | Nested Schema | Best For |
|
|
128
|
+
|--------------|-------|-------------------|---------------------|---------------|----------|
|
|
129
|
+
| OpenAI | A | ✅ Excellent | ✅ Excellent | ✅ Excellent | Most reliable for all schema types |
|
|
130
|
+
| Gemini | A | ✅ Excellent | ✅ Excellent | ✅ Good | Good all-around performance, especially with flat schemas |
|
|
131
|
+
| Claude | B | ✅ Excellent | ⚠️ Good (occasional JSON errors) | ✅ Good | Simple schemas, robust handling of complex prompts |
|
|
132
|
+
| Llama 3 | C | ✅ Good | ✅ Good | ❌ Poor | Simpler flat schemas, may struggle with nested structures |
|
|
133
|
+
| Deepseek | C | ✅ Good | ✅ Good | ❌ Poor | Basic flat schemas only |
|
|
134
|
+
|
|
135
|
+
### Schema Structure Recommendations
|
|
136
|
+
|
|
137
|
+
1. **Flat schemas perform better across all models**. If you need maximum compatibility, avoid deeply nested structures.
|
|
138
|
+
|
|
139
|
+
2. **Field names matter**. Some models have preferences for certain property naming patterns:
|
|
140
|
+
- Use simple, common naming patterns like `name`, `type`, `items`, `price`
|
|
141
|
+
- Avoid deeply nested object hierarchies (more than 2 levels deep)
|
|
142
|
+
- Keep array items simple (strings or flat objects)
|
|
143
|
+
|
|
144
|
+
3. **Model-specific considerations**:
|
|
145
|
+
- **OpenAI models**: Best overall schema adherence and handle complex nesting well
|
|
146
|
+
- **Claude models**: Great for simple schemas, occasional JSON formatting issues with complex structures
|
|
147
|
+
- **Gemini models**: Good general performance, handles array properties well
|
|
148
|
+
- **Llama/Mistral/Deepseek**: Strong with flat schemas, but often ignore nesting structure and provide their own organization
|
|
149
|
+
|
|
150
|
+
4. **For mission-critical applications** requiring schema adherence, use OpenAI models or implement fallback mechanisms.
|
|
121
151
|
|
|
122
152
|
## Setting API Keys
|
|
123
153
|
|
|
@@ -201,6 +231,15 @@ MIT or Apache-2.0, at your option
|
|
|
201
231
|
5. Run type checking: `npm run typecheck`
|
|
202
232
|
6. Create a pull request
|
|
203
233
|
|
|
234
|
+
### Integration Tests
|
|
235
|
+
|
|
236
|
+
The project includes integration tests that make real API calls to verify functionality with actual LLM models:
|
|
237
|
+
|
|
238
|
+
1. Copy `.env.example` to `.env` and add your OpenRouter API key
|
|
239
|
+
2. Run integration tests: `npm run test:integration`
|
|
240
|
+
|
|
241
|
+
Note: Integration tests are excluded from the normal test suite to avoid making API calls during CI/CD. They require a valid API key to execute and will be skipped if no key is provided.
|
|
242
|
+
|
|
204
243
|
### Release Process
|
|
205
244
|
|
|
206
245
|
This library uses GitHub Actions to automate the release process:
|
package/dist/api.d.ts
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core API implementation for call-ai
|
|
3
|
+
*/
|
|
4
|
+
import { CallAIOptions, Message } from "./types";
|
|
5
|
+
/**
|
|
6
|
+
* Make an AI API call with the given options
|
|
7
|
+
* @param prompt User prompt as string or an array of message objects
|
|
8
|
+
* @param options Configuration options including optional schema for structured output
|
|
9
|
+
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
10
|
+
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
11
|
+
*/
|
|
12
|
+
export declare function callAI(prompt: string | Message[], options?: CallAIOptions): Promise<string> | AsyncGenerator<string, string, unknown>;
|
package/dist/api.js
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.callAI = callAI;
|
|
4
|
+
const strategies_1 = require("./strategies");
|
|
5
|
+
/**
|
|
6
|
+
* Make an AI API call with the given options
|
|
7
|
+
* @param prompt User prompt as string or an array of message objects
|
|
8
|
+
* @param options Configuration options including optional schema for structured output
|
|
9
|
+
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
10
|
+
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
11
|
+
*/
|
|
12
|
+
function callAI(prompt, options = {}) {
|
|
13
|
+
// Check if we need to force streaming based on model strategy
|
|
14
|
+
const schemaStrategy = (0, strategies_1.chooseSchemaStrategy)(options.model, options.schema || null);
|
|
15
|
+
// Handle special case: Claude with tools requires streaming
|
|
16
|
+
if (!options.stream && schemaStrategy.shouldForceStream) {
|
|
17
|
+
// Buffer streaming results into a single response
|
|
18
|
+
return bufferStreamingResults(prompt, options);
|
|
19
|
+
}
|
|
20
|
+
// Handle normal non-streaming mode
|
|
21
|
+
if (options.stream !== true) {
|
|
22
|
+
return callAINonStreaming(prompt, options);
|
|
23
|
+
}
|
|
24
|
+
// Handle streaming mode
|
|
25
|
+
return callAIStreaming(prompt, options);
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Buffer streaming results into a single response for cases where
|
|
29
|
+
* we need to use streaming internally but the caller requested non-streaming
|
|
30
|
+
*/
|
|
31
|
+
async function bufferStreamingResults(prompt, options) {
|
|
32
|
+
// Create a copy of options with streaming enabled
|
|
33
|
+
const streamingOptions = {
|
|
34
|
+
...options,
|
|
35
|
+
stream: true,
|
|
36
|
+
};
|
|
37
|
+
try {
|
|
38
|
+
// Get streaming generator
|
|
39
|
+
const generator = callAIStreaming(prompt, streamingOptions);
|
|
40
|
+
// Buffer all chunks
|
|
41
|
+
let finalResult = "";
|
|
42
|
+
let chunkCount = 0;
|
|
43
|
+
for await (const chunk of generator) {
|
|
44
|
+
finalResult = chunk; // Each chunk contains the full accumulated text
|
|
45
|
+
chunkCount++;
|
|
46
|
+
}
|
|
47
|
+
return finalResult;
|
|
48
|
+
}
|
|
49
|
+
catch (error) {
|
|
50
|
+
return handleApiError(error, "Streaming buffer error");
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Standardized API error handler
|
|
55
|
+
*/
|
|
56
|
+
function handleApiError(error, context) {
|
|
57
|
+
console.error(`[callAI:${context}]:`, error);
|
|
58
|
+
return JSON.stringify({
|
|
59
|
+
error: String(error),
|
|
60
|
+
message: `Sorry, I couldn't process that request: ${String(error)}`,
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Prepare request parameters common to both streaming and non-streaming calls
|
|
65
|
+
*/
|
|
66
|
+
function prepareRequestParams(prompt, options) {
|
|
67
|
+
const apiKey = options.apiKey ||
|
|
68
|
+
(typeof window !== "undefined" ? window.CALLAI_API_KEY : null);
|
|
69
|
+
const schema = options.schema || null;
|
|
70
|
+
if (!apiKey) {
|
|
71
|
+
throw new Error("API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY");
|
|
72
|
+
}
|
|
73
|
+
// Select the appropriate strategy based on model and schema
|
|
74
|
+
const schemaStrategy = (0, strategies_1.chooseSchemaStrategy)(options.model, schema);
|
|
75
|
+
const model = schemaStrategy.model;
|
|
76
|
+
const endpoint = options.endpoint || "https://openrouter.ai/api/v1/chat/completions";
|
|
77
|
+
// Handle both string prompts and message arrays for backward compatibility
|
|
78
|
+
const messages = Array.isArray(prompt)
|
|
79
|
+
? prompt
|
|
80
|
+
: [{ role: "user", content: prompt }];
|
|
81
|
+
// Build request parameters
|
|
82
|
+
const requestParams = {
|
|
83
|
+
model: model,
|
|
84
|
+
stream: options.stream === true,
|
|
85
|
+
messages: messages,
|
|
86
|
+
};
|
|
87
|
+
// Support for multimodal content (like images)
|
|
88
|
+
if (options.modalities && options.modalities.length > 0) {
|
|
89
|
+
requestParams.modalities = options.modalities;
|
|
90
|
+
}
|
|
91
|
+
// Apply the strategy's request preparation
|
|
92
|
+
const strategyParams = schemaStrategy.prepareRequest(schema, messages);
|
|
93
|
+
// If the strategy returns custom messages, use those instead
|
|
94
|
+
if (strategyParams.messages) {
|
|
95
|
+
requestParams.messages = strategyParams.messages;
|
|
96
|
+
}
|
|
97
|
+
// Add all other strategy parameters
|
|
98
|
+
Object.entries(strategyParams).forEach(([key, value]) => {
|
|
99
|
+
if (key !== "messages") {
|
|
100
|
+
requestParams[key] = value;
|
|
101
|
+
}
|
|
102
|
+
});
|
|
103
|
+
// Add any other options provided, but exclude internal keys
|
|
104
|
+
Object.entries(options).forEach(([key, value]) => {
|
|
105
|
+
if (!["apiKey", "model", "endpoint", "stream", "schema"].includes(key)) {
|
|
106
|
+
requestParams[key] = value;
|
|
107
|
+
}
|
|
108
|
+
});
|
|
109
|
+
const requestOptions = {
|
|
110
|
+
method: "POST",
|
|
111
|
+
headers: {
|
|
112
|
+
Authorization: `Bearer ${apiKey}`,
|
|
113
|
+
"Content-Type": "application/json",
|
|
114
|
+
},
|
|
115
|
+
body: JSON.stringify(requestParams),
|
|
116
|
+
};
|
|
117
|
+
return { apiKey, model, endpoint, requestOptions, schemaStrategy };
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Internal implementation for non-streaming API calls
|
|
121
|
+
*/
|
|
122
|
+
async function callAINonStreaming(prompt, options = {}) {
|
|
123
|
+
try {
|
|
124
|
+
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, options);
|
|
125
|
+
const response = await fetch(endpoint, requestOptions);
|
|
126
|
+
let result;
|
|
127
|
+
// For Claude, use text() instead of json() to avoid potential hanging
|
|
128
|
+
if (/claude/i.test(model)) {
|
|
129
|
+
try {
|
|
130
|
+
result = await extractClaudeResponse(response);
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
return handleApiError(error, "Claude API response processing failed");
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
else {
|
|
137
|
+
result = await response.json();
|
|
138
|
+
}
|
|
139
|
+
// Handle error responses
|
|
140
|
+
if (result.error) {
|
|
141
|
+
console.error("API returned an error:", result.error);
|
|
142
|
+
return JSON.stringify({
|
|
143
|
+
error: result.error,
|
|
144
|
+
message: result.error.message || "API returned an error",
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
// Extract content from the response
|
|
148
|
+
const content = extractContent(result, schemaStrategy);
|
|
149
|
+
// Process the content based on model type
|
|
150
|
+
return schemaStrategy.processResponse(content);
|
|
151
|
+
}
|
|
152
|
+
catch (error) {
|
|
153
|
+
return handleApiError(error, "Non-streaming API call");
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
/**
|
|
157
|
+
* Extract content from API response accounting for different formats
|
|
158
|
+
*/
|
|
159
|
+
function extractContent(result, schemaStrategy) {
|
|
160
|
+
// Find tool use content or normal content
|
|
161
|
+
let content;
|
|
162
|
+
// Extract tool use content if necessary
|
|
163
|
+
if (schemaStrategy.strategy === "tool_mode" &&
|
|
164
|
+
result.stop_reason === "tool_use") {
|
|
165
|
+
// Try to find tool_use block in different response formats
|
|
166
|
+
if (result.content && Array.isArray(result.content)) {
|
|
167
|
+
const toolUseBlock = result.content.find((block) => block.type === "tool_use");
|
|
168
|
+
if (toolUseBlock) {
|
|
169
|
+
content = toolUseBlock;
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
if (!content && result.choices && Array.isArray(result.choices)) {
|
|
173
|
+
const choice = result.choices[0];
|
|
174
|
+
if (choice.message && Array.isArray(choice.message.content)) {
|
|
175
|
+
const toolUseBlock = choice.message.content.find((block) => block.type === "tool_use");
|
|
176
|
+
if (toolUseBlock) {
|
|
177
|
+
content = toolUseBlock;
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
// If no tool use content was found, use the standard message content
|
|
183
|
+
if (!content) {
|
|
184
|
+
if (!result.choices || !result.choices.length) {
|
|
185
|
+
throw new Error("Invalid response format from API");
|
|
186
|
+
}
|
|
187
|
+
content = result.choices[0]?.message?.content || "";
|
|
188
|
+
}
|
|
189
|
+
return content;
|
|
190
|
+
}
|
|
191
|
+
/**
|
|
192
|
+
* Extract response from Claude API with timeout handling
|
|
193
|
+
*/
|
|
194
|
+
async function extractClaudeResponse(response) {
|
|
195
|
+
let textResponse;
|
|
196
|
+
const textPromise = response.text();
|
|
197
|
+
const timeoutPromise = new Promise((_resolve, reject) => {
|
|
198
|
+
setTimeout(() => {
|
|
199
|
+
reject(new Error("Text extraction timed out after 5 seconds"));
|
|
200
|
+
}, 5000);
|
|
201
|
+
});
|
|
202
|
+
try {
|
|
203
|
+
textResponse = (await Promise.race([
|
|
204
|
+
textPromise,
|
|
205
|
+
timeoutPromise,
|
|
206
|
+
]));
|
|
207
|
+
}
|
|
208
|
+
catch (textError) {
|
|
209
|
+
console.error(`Text extraction timed out or failed:`, textError);
|
|
210
|
+
throw new Error("Claude response text extraction timed out. This is likely an issue with the Claude API's response format.");
|
|
211
|
+
}
|
|
212
|
+
try {
|
|
213
|
+
return JSON.parse(textResponse);
|
|
214
|
+
}
|
|
215
|
+
catch (err) {
|
|
216
|
+
console.error(`Failed to parse Claude response as JSON:`, err);
|
|
217
|
+
throw new Error(`Failed to parse Claude response as JSON: ${err}`);
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Internal implementation for streaming API calls
|
|
222
|
+
*/
|
|
223
|
+
async function* callAIStreaming(prompt, options = {}) {
|
|
224
|
+
try {
|
|
225
|
+
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
|
|
226
|
+
const response = await fetch(endpoint, requestOptions);
|
|
227
|
+
if (!response.ok) {
|
|
228
|
+
const errorText = await response.text();
|
|
229
|
+
console.error(`API Error: ${response.status} ${response.statusText}`, errorText);
|
|
230
|
+
throw new Error(`API returned error ${response.status}: ${response.statusText}`);
|
|
231
|
+
}
|
|
232
|
+
// Handle streaming response
|
|
233
|
+
if (!response.body) {
|
|
234
|
+
throw new Error("Response body is undefined - API endpoint may not support streaming");
|
|
235
|
+
}
|
|
236
|
+
const reader = response.body.getReader();
|
|
237
|
+
const decoder = new TextDecoder();
|
|
238
|
+
let completeText = "";
|
|
239
|
+
let chunkCount = 0;
|
|
240
|
+
let toolCallsAssembled = "";
|
|
241
|
+
while (true) {
|
|
242
|
+
const { done, value } = await reader.read();
|
|
243
|
+
if (done) {
|
|
244
|
+
break;
|
|
245
|
+
}
|
|
246
|
+
const chunk = decoder.decode(value);
|
|
247
|
+
const lines = chunk.split("\n").filter((line) => line.trim() !== "");
|
|
248
|
+
for (const line of lines) {
|
|
249
|
+
if (line.startsWith("data: ")) {
|
|
250
|
+
// Skip [DONE] marker or OPENROUTER PROCESSING lines
|
|
251
|
+
if (line.includes("[DONE]") ||
|
|
252
|
+
line.includes("OPENROUTER PROCESSING")) {
|
|
253
|
+
continue;
|
|
254
|
+
}
|
|
255
|
+
try {
|
|
256
|
+
const jsonLine = line.replace("data: ", "");
|
|
257
|
+
if (!jsonLine.trim()) {
|
|
258
|
+
continue;
|
|
259
|
+
}
|
|
260
|
+
chunkCount++;
|
|
261
|
+
// Parse the JSON chunk
|
|
262
|
+
const json = JSON.parse(jsonLine);
|
|
263
|
+
// Handle tool use response - Claude with schema cases
|
|
264
|
+
const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode";
|
|
265
|
+
if (isClaudeWithSchema) {
|
|
266
|
+
// Claude streaming tool calls - need to assemble arguments
|
|
267
|
+
if (json.choices && json.choices.length > 0) {
|
|
268
|
+
const choice = json.choices[0];
|
|
269
|
+
// Handle finish reason tool_calls
|
|
270
|
+
if (choice.finish_reason === "tool_calls") {
|
|
271
|
+
try {
|
|
272
|
+
// Parse the assembled JSON
|
|
273
|
+
completeText = toolCallsAssembled;
|
|
274
|
+
yield completeText;
|
|
275
|
+
continue;
|
|
276
|
+
}
|
|
277
|
+
catch (e) {
|
|
278
|
+
console.error("[callAIStreaming] Error parsing assembled tool call:", e);
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
// Assemble tool_calls arguments from delta
|
|
282
|
+
if (choice.delta && choice.delta.tool_calls) {
|
|
283
|
+
const toolCall = choice.delta.tool_calls[0];
|
|
284
|
+
if (toolCall &&
|
|
285
|
+
toolCall.function &&
|
|
286
|
+
toolCall.function.arguments !== undefined) {
|
|
287
|
+
toolCallsAssembled += toolCall.function.arguments;
|
|
288
|
+
// We don't yield here to avoid partial JSON
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
// Handle tool use response - old format
|
|
294
|
+
if (isClaudeWithSchema &&
|
|
295
|
+
(json.stop_reason === "tool_use" || json.type === "tool_use")) {
|
|
296
|
+
// First try direct tool use object format
|
|
297
|
+
if (json.type === "tool_use") {
|
|
298
|
+
completeText = schemaStrategy.processResponse(json);
|
|
299
|
+
yield completeText;
|
|
300
|
+
continue;
|
|
301
|
+
}
|
|
302
|
+
// Extract the tool use content
|
|
303
|
+
if (json.content && Array.isArray(json.content)) {
|
|
304
|
+
const toolUseBlock = json.content.find((block) => block.type === "tool_use");
|
|
305
|
+
if (toolUseBlock) {
|
|
306
|
+
completeText = schemaStrategy.processResponse(toolUseBlock);
|
|
307
|
+
yield completeText;
|
|
308
|
+
continue;
|
|
309
|
+
}
|
|
310
|
+
}
|
|
311
|
+
// Find tool_use in assistant's content blocks
|
|
312
|
+
if (json.choices && Array.isArray(json.choices)) {
|
|
313
|
+
const choice = json.choices[0];
|
|
314
|
+
if (choice.message && Array.isArray(choice.message.content)) {
|
|
315
|
+
const toolUseBlock = choice.message.content.find((block) => block.type === "tool_use");
|
|
316
|
+
if (toolUseBlock) {
|
|
317
|
+
completeText = schemaStrategy.processResponse(toolUseBlock);
|
|
318
|
+
yield completeText;
|
|
319
|
+
continue;
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
// Handle case where the tool use is in the delta
|
|
323
|
+
if (choice.delta && Array.isArray(choice.delta.content)) {
|
|
324
|
+
const toolUseBlock = choice.delta.content.find((block) => block.type === "tool_use");
|
|
325
|
+
if (toolUseBlock) {
|
|
326
|
+
completeText = schemaStrategy.processResponse(toolUseBlock);
|
|
327
|
+
yield completeText;
|
|
328
|
+
continue;
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
// Extract content from the delta
|
|
334
|
+
if (json.choices?.[0]?.delta?.content !== undefined) {
|
|
335
|
+
const content = json.choices[0].delta.content || "";
|
|
336
|
+
// Treat all models the same - yield as content arrives
|
|
337
|
+
completeText += content;
|
|
338
|
+
yield schemaStrategy.processResponse(completeText);
|
|
339
|
+
}
|
|
340
|
+
// Handle message content format (non-streaming deltas)
|
|
341
|
+
else if (json.choices?.[0]?.message?.content !== undefined) {
|
|
342
|
+
const content = json.choices[0].message.content || "";
|
|
343
|
+
completeText += content;
|
|
344
|
+
yield schemaStrategy.processResponse(completeText);
|
|
345
|
+
}
|
|
346
|
+
// Handle content blocks for Claude/Anthropic response format
|
|
347
|
+
else if (json.choices?.[0]?.message?.content &&
|
|
348
|
+
Array.isArray(json.choices[0].message.content)) {
|
|
349
|
+
const contentBlocks = json.choices[0].message.content;
|
|
350
|
+
// Find text or tool_use blocks
|
|
351
|
+
for (const block of contentBlocks) {
|
|
352
|
+
if (block.type === "text") {
|
|
353
|
+
completeText += block.text || "";
|
|
354
|
+
}
|
|
355
|
+
else if (isClaudeWithSchema && block.type === "tool_use") {
|
|
356
|
+
completeText = schemaStrategy.processResponse(block);
|
|
357
|
+
break; // We found what we need
|
|
358
|
+
}
|
|
359
|
+
}
|
|
360
|
+
yield schemaStrategy.processResponse(completeText);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
catch (e) {
|
|
364
|
+
console.error(`[callAIStreaming] Error parsing JSON chunk:`, e);
|
|
365
|
+
}
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
// If we have assembled tool calls but haven't yielded them yet
|
|
370
|
+
if (toolCallsAssembled && (!completeText || completeText.length === 0)) {
|
|
371
|
+
return toolCallsAssembled;
|
|
372
|
+
}
|
|
373
|
+
// Ensure the final return has proper, processed content
|
|
374
|
+
return schemaStrategy.processResponse(completeText);
|
|
375
|
+
}
|
|
376
|
+
catch (error) {
|
|
377
|
+
return handleApiError(error, "Streaming API call");
|
|
378
|
+
}
|
|
379
|
+
}
|
package/dist/index.d.ts
CHANGED
|
@@ -1,55 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* call-ai: A lightweight library for making AI API calls
|
|
3
3
|
*/
|
|
4
|
-
export
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
export interface Schema {
|
|
9
|
-
/**
|
|
10
|
-
* Optional schema name - will be sent to OpenRouter if provided
|
|
11
|
-
* If not specified, defaults to "result"
|
|
12
|
-
*/
|
|
13
|
-
name?: string;
|
|
14
|
-
/**
|
|
15
|
-
* Properties defining the structure of your schema
|
|
16
|
-
*/
|
|
17
|
-
properties: Record<string, any>;
|
|
18
|
-
/**
|
|
19
|
-
* Fields that are required in the response (defaults to all properties)
|
|
20
|
-
*/
|
|
21
|
-
required?: string[];
|
|
22
|
-
/**
|
|
23
|
-
* Whether to allow fields not defined in properties (defaults to false)
|
|
24
|
-
*/
|
|
25
|
-
additionalProperties?: boolean;
|
|
26
|
-
/**
|
|
27
|
-
* Any additional schema properties to pass through
|
|
28
|
-
*/
|
|
29
|
-
[key: string]: any;
|
|
30
|
-
}
|
|
31
|
-
export interface CallAIOptions {
|
|
32
|
-
apiKey?: string;
|
|
33
|
-
model?: string;
|
|
34
|
-
endpoint?: string;
|
|
35
|
-
stream?: boolean;
|
|
36
|
-
schema?: Schema | null;
|
|
37
|
-
[key: string]: any;
|
|
38
|
-
}
|
|
39
|
-
export interface AIResponse {
|
|
40
|
-
text: string;
|
|
41
|
-
usage?: {
|
|
42
|
-
promptTokens: number;
|
|
43
|
-
completionTokens: number;
|
|
44
|
-
totalTokens: number;
|
|
45
|
-
};
|
|
46
|
-
model: string;
|
|
47
|
-
}
|
|
48
|
-
/**
|
|
49
|
-
* Make an AI API call with the given options
|
|
50
|
-
* @param prompt User prompt as string or an array of message objects
|
|
51
|
-
* @param options Configuration options including optional schema for structured output
|
|
52
|
-
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
53
|
-
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
54
|
-
*/
|
|
55
|
-
export declare function callAI(prompt: string | Message[], options?: CallAIOptions): Promise<string> | AsyncGenerator<string, string, unknown>;
|
|
4
|
+
export * from "./types";
|
|
5
|
+
export { callAI } from "./api";
|
|
6
|
+
export * from "./strategies";
|
|
7
|
+
export * from "./utils";
|
package/dist/index.js
CHANGED
|
@@ -2,131 +2,27 @@
|
|
|
2
2
|
/**
|
|
3
3
|
* call-ai: A lightweight library for making AI API calls
|
|
4
4
|
*/
|
|
5
|
-
Object.
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
* @param options Configuration options including optional schema for structured output
|
|
11
|
-
* @returns A Promise that resolves to the complete response string when streaming is disabled,
|
|
12
|
-
* or an AsyncGenerator that yields partial responses when streaming is enabled
|
|
13
|
-
*/
|
|
14
|
-
function callAI(prompt, options = {}) {
|
|
15
|
-
// Handle non-streaming mode (default)
|
|
16
|
-
if (options.stream !== true) {
|
|
17
|
-
return callAINonStreaming(prompt, options);
|
|
18
|
-
}
|
|
19
|
-
// Handle streaming mode
|
|
20
|
-
return callAIStreaming(prompt, options);
|
|
21
|
-
}
|
|
22
|
-
/**
|
|
23
|
-
* Prepare request parameters common to both streaming and non-streaming calls
|
|
24
|
-
*/
|
|
25
|
-
function prepareRequestParams(prompt, options) {
|
|
26
|
-
const apiKey = options.apiKey || (typeof window !== 'undefined' ? window.CALLAI_API_KEY : null);
|
|
27
|
-
const model = options.model || 'openrouter/auto';
|
|
28
|
-
const endpoint = options.endpoint || 'https://openrouter.ai/api/v1/chat/completions';
|
|
29
|
-
const schema = options.schema || null;
|
|
30
|
-
if (!apiKey) {
|
|
31
|
-
throw new Error('API key is required. Provide it via options.apiKey or set window.CALLAI_API_KEY');
|
|
5
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
6
|
+
if (k2 === undefined) k2 = k;
|
|
7
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
8
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
9
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
32
10
|
}
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
response_format: {
|
|
52
|
-
type: 'json_schema',
|
|
53
|
-
json_schema: {
|
|
54
|
-
// Always include name, with default "result" if not provided in schema
|
|
55
|
-
name: schema.name || "result",
|
|
56
|
-
type: 'object',
|
|
57
|
-
properties: schema.properties || {},
|
|
58
|
-
required: schema.required || Object.keys(schema.properties || {}),
|
|
59
|
-
additionalProperties: schema.additionalProperties !== undefined
|
|
60
|
-
? schema.additionalProperties
|
|
61
|
-
: false,
|
|
62
|
-
// Copy any additional schema properties (excluding properties we've already handled)
|
|
63
|
-
...Object.fromEntries(Object.entries(schema).filter(([key]) => !['name', 'properties', 'required', 'additionalProperties'].includes(key)))
|
|
64
|
-
}
|
|
65
|
-
}
|
|
66
|
-
})
|
|
67
|
-
})
|
|
68
|
-
};
|
|
69
|
-
return { apiKey, model, endpoint, requestOptions };
|
|
70
|
-
}
|
|
71
|
-
/**
|
|
72
|
-
* Internal implementation for non-streaming API calls
|
|
73
|
-
*/
|
|
74
|
-
async function callAINonStreaming(prompt, options = {}) {
|
|
75
|
-
try {
|
|
76
|
-
const { endpoint, requestOptions } = prepareRequestParams(prompt, options);
|
|
77
|
-
const response = await fetch(endpoint, requestOptions);
|
|
78
|
-
const result = await response.json();
|
|
79
|
-
const content = result.choices[0]?.message?.content || '';
|
|
80
|
-
return content;
|
|
81
|
-
}
|
|
82
|
-
catch (error) {
|
|
83
|
-
console.error("AI call failed:", error);
|
|
84
|
-
return JSON.stringify({
|
|
85
|
-
error,
|
|
86
|
-
message: "Sorry, I couldn't process that request."
|
|
87
|
-
});
|
|
88
|
-
}
|
|
89
|
-
}
|
|
90
|
-
/**
|
|
91
|
-
* Internal implementation for streaming API calls
|
|
92
|
-
*/
|
|
93
|
-
async function* callAIStreaming(prompt, options = {}) {
|
|
94
|
-
try {
|
|
95
|
-
const { endpoint, requestOptions } = prepareRequestParams(prompt, { ...options, stream: true });
|
|
96
|
-
const response = await fetch(endpoint, requestOptions);
|
|
97
|
-
// Handle streaming response
|
|
98
|
-
const reader = response.body.getReader();
|
|
99
|
-
const decoder = new TextDecoder();
|
|
100
|
-
let text = '';
|
|
101
|
-
while (true) {
|
|
102
|
-
const { done, value } = await reader.read();
|
|
103
|
-
if (done)
|
|
104
|
-
break;
|
|
105
|
-
const chunk = decoder.decode(value);
|
|
106
|
-
const lines = chunk.split('\n').filter(line => line.trim() !== '');
|
|
107
|
-
for (const line of lines) {
|
|
108
|
-
if (line.startsWith('data: ')) {
|
|
109
|
-
if (line.includes('[DONE]'))
|
|
110
|
-
continue;
|
|
111
|
-
try {
|
|
112
|
-
const json = JSON.parse(line.replace('data: ', ''));
|
|
113
|
-
const content = json.choices[0]?.delta?.content || '';
|
|
114
|
-
text += content;
|
|
115
|
-
yield text;
|
|
116
|
-
}
|
|
117
|
-
catch (e) {
|
|
118
|
-
console.error("Error parsing chunk:", e);
|
|
119
|
-
}
|
|
120
|
-
}
|
|
121
|
-
}
|
|
122
|
-
}
|
|
123
|
-
return text;
|
|
124
|
-
}
|
|
125
|
-
catch (error) {
|
|
126
|
-
console.error("AI call failed:", error);
|
|
127
|
-
return JSON.stringify({
|
|
128
|
-
error,
|
|
129
|
-
message: "Sorry, I couldn't process that request."
|
|
130
|
-
});
|
|
131
|
-
}
|
|
132
|
-
}
|
|
11
|
+
Object.defineProperty(o, k2, desc);
|
|
12
|
+
}) : (function(o, m, k, k2) {
|
|
13
|
+
if (k2 === undefined) k2 = k;
|
|
14
|
+
o[k2] = m[k];
|
|
15
|
+
}));
|
|
16
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
17
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
18
|
+
};
|
|
19
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
20
|
+
exports.callAI = void 0;
|
|
21
|
+
// Export public types
|
|
22
|
+
__exportStar(require("./types"), exports);
|
|
23
|
+
// Export API function
|
|
24
|
+
var api_1 = require("./api");
|
|
25
|
+
Object.defineProperty(exports, "callAI", { enumerable: true, get: function () { return api_1.callAI; } });
|
|
26
|
+
// Export strategies and utilities for advanced use cases
|
|
27
|
+
__exportStar(require("./strategies"), exports);
|
|
28
|
+
__exportStar(require("./utils"), exports);
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
/**
|
|
18
|
+
* Strategy exports
|
|
19
|
+
*/
|
|
20
|
+
__exportStar(require("./model-strategies"), exports);
|
|
21
|
+
__exportStar(require("./strategy-selector"), exports);
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Model strategies for different AI models
|
|
3
|
+
*/
|
|
4
|
+
import { ModelStrategy } from "../types";
|
|
5
|
+
/**
|
|
6
|
+
* OpenAI/GPT strategy for handling JSON schema
|
|
7
|
+
*/
|
|
8
|
+
export declare const openAIStrategy: ModelStrategy;
|
|
9
|
+
/**
|
|
10
|
+
* Gemini strategy for handling JSON schema (similar to OpenAI)
|
|
11
|
+
*/
|
|
12
|
+
export declare const geminiStrategy: ModelStrategy;
|
|
13
|
+
/**
|
|
14
|
+
* Claude strategy using tool mode for structured output
|
|
15
|
+
*/
|
|
16
|
+
export declare const claudeStrategy: ModelStrategy;
|
|
17
|
+
/**
|
|
18
|
+
* System message approach for other models (Llama, DeepSeek, etc.)
|
|
19
|
+
*/
|
|
20
|
+
export declare const systemMessageStrategy: ModelStrategy;
|
|
21
|
+
/**
|
|
22
|
+
* Default strategy for models without schema
|
|
23
|
+
*/
|
|
24
|
+
export declare const defaultStrategy: ModelStrategy;
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.defaultStrategy = exports.systemMessageStrategy = exports.claudeStrategy = exports.geminiStrategy = exports.openAIStrategy = void 0;
|
|
4
|
+
const utils_1 = require("../utils");
|
|
5
|
+
/**
|
|
6
|
+
* OpenAI/GPT strategy for handling JSON schema
|
|
7
|
+
*/
|
|
8
|
+
exports.openAIStrategy = {
|
|
9
|
+
name: "openai",
|
|
10
|
+
prepareRequest: (schema, messages) => {
|
|
11
|
+
if (!schema)
|
|
12
|
+
return {};
|
|
13
|
+
// Process schema for JSON schema approach
|
|
14
|
+
const requiredFields = schema.required || Object.keys(schema.properties || {});
|
|
15
|
+
const processedSchema = (0, utils_1.recursivelyAddAdditionalProperties)({
|
|
16
|
+
type: "object",
|
|
17
|
+
properties: schema.properties || {},
|
|
18
|
+
required: requiredFields,
|
|
19
|
+
additionalProperties: schema.additionalProperties !== undefined
|
|
20
|
+
? schema.additionalProperties
|
|
21
|
+
: false,
|
|
22
|
+
// Copy any additional schema properties
|
|
23
|
+
...Object.fromEntries(Object.entries(schema).filter(([key]) => ![
|
|
24
|
+
"name",
|
|
25
|
+
"properties",
|
|
26
|
+
"required",
|
|
27
|
+
"additionalProperties",
|
|
28
|
+
].includes(key))),
|
|
29
|
+
});
|
|
30
|
+
return {
|
|
31
|
+
response_format: {
|
|
32
|
+
type: "json_schema",
|
|
33
|
+
json_schema: {
|
|
34
|
+
name: schema.name || "result",
|
|
35
|
+
strict: true,
|
|
36
|
+
schema: processedSchema,
|
|
37
|
+
},
|
|
38
|
+
},
|
|
39
|
+
};
|
|
40
|
+
},
|
|
41
|
+
processResponse: (content) => {
|
|
42
|
+
if (typeof content !== "string") {
|
|
43
|
+
return JSON.stringify(content);
|
|
44
|
+
}
|
|
45
|
+
return content;
|
|
46
|
+
},
|
|
47
|
+
};
|
|
48
|
+
/**
|
|
49
|
+
* Gemini strategy for handling JSON schema (similar to OpenAI)
|
|
50
|
+
*/
|
|
51
|
+
exports.geminiStrategy = {
|
|
52
|
+
name: "gemini",
|
|
53
|
+
prepareRequest: exports.openAIStrategy.prepareRequest,
|
|
54
|
+
processResponse: (content) => {
|
|
55
|
+
if (typeof content !== "string") {
|
|
56
|
+
return JSON.stringify(content);
|
|
57
|
+
}
|
|
58
|
+
// Try to extract JSON from content if it might be wrapped
|
|
59
|
+
const jsonMatch = content.match(/```json\s*([\s\S]*?)\s*```/) ||
|
|
60
|
+
content.match(/```\s*([\s\S]*?)\s*```/) ||
|
|
61
|
+
content.match(/\{[\s\S]*\}/) || [null, content];
|
|
62
|
+
return jsonMatch[1] || content;
|
|
63
|
+
},
|
|
64
|
+
};
|
|
65
|
+
/**
|
|
66
|
+
* Claude strategy using tool mode for structured output
|
|
67
|
+
*/
|
|
68
|
+
exports.claudeStrategy = {
|
|
69
|
+
name: "anthropic",
|
|
70
|
+
shouldForceStream: true,
|
|
71
|
+
prepareRequest: (schema, messages) => {
|
|
72
|
+
if (!schema)
|
|
73
|
+
return {};
|
|
74
|
+
// Process schema for tool use - format for OpenRouter/Claude
|
|
75
|
+
const processedSchema = {
|
|
76
|
+
type: "object",
|
|
77
|
+
properties: schema.properties || {},
|
|
78
|
+
required: schema.required || Object.keys(schema.properties || {}),
|
|
79
|
+
additionalProperties: schema.additionalProperties !== undefined
|
|
80
|
+
? schema.additionalProperties
|
|
81
|
+
: false,
|
|
82
|
+
};
|
|
83
|
+
return {
|
|
84
|
+
tools: [
|
|
85
|
+
{
|
|
86
|
+
type: "function",
|
|
87
|
+
function: {
|
|
88
|
+
name: schema.name || "generate_structured_data",
|
|
89
|
+
description: "Generate data according to the required schema",
|
|
90
|
+
parameters: processedSchema,
|
|
91
|
+
},
|
|
92
|
+
},
|
|
93
|
+
],
|
|
94
|
+
tool_choice: {
|
|
95
|
+
type: "function",
|
|
96
|
+
function: {
|
|
97
|
+
name: schema.name || "generate_structured_data",
|
|
98
|
+
},
|
|
99
|
+
},
|
|
100
|
+
};
|
|
101
|
+
},
|
|
102
|
+
processResponse: (content) => {
|
|
103
|
+
// Handle tool use response
|
|
104
|
+
if (typeof content === "object") {
|
|
105
|
+
if (content.type === "tool_use") {
|
|
106
|
+
return JSON.stringify(content.input);
|
|
107
|
+
}
|
|
108
|
+
// Handle newer tool_calls format
|
|
109
|
+
if (content.tool_calls &&
|
|
110
|
+
Array.isArray(content.tool_calls) &&
|
|
111
|
+
content.tool_calls.length > 0) {
|
|
112
|
+
const toolCall = content.tool_calls[0];
|
|
113
|
+
if (toolCall.function && toolCall.function.arguments) {
|
|
114
|
+
try {
|
|
115
|
+
// Try to parse as JSON first
|
|
116
|
+
return toolCall.function.arguments;
|
|
117
|
+
}
|
|
118
|
+
catch (e) {
|
|
119
|
+
// Return as is if not valid JSON
|
|
120
|
+
return JSON.stringify(toolCall.function.arguments);
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
return JSON.stringify(content);
|
|
125
|
+
}
|
|
126
|
+
if (typeof content !== "string") {
|
|
127
|
+
return JSON.stringify(content);
|
|
128
|
+
}
|
|
129
|
+
// Try to extract JSON from content if it might be wrapped
|
|
130
|
+
const jsonMatch = content.match(/```json\s*([\s\S]*?)\s*```/) ||
|
|
131
|
+
content.match(/```\s*([\s\S]*?)\s*```/) ||
|
|
132
|
+
content.match(/\{[\s\S]*\}/) || [null, content];
|
|
133
|
+
return jsonMatch[1] || content;
|
|
134
|
+
},
|
|
135
|
+
};
|
|
136
|
+
/**
|
|
137
|
+
* System message approach for other models (Llama, DeepSeek, etc.)
|
|
138
|
+
*/
|
|
139
|
+
exports.systemMessageStrategy = {
|
|
140
|
+
name: "system_message",
|
|
141
|
+
prepareRequest: (schema, messages) => {
|
|
142
|
+
if (!schema)
|
|
143
|
+
return { messages };
|
|
144
|
+
// Check if there's already a system message
|
|
145
|
+
const hasSystemMessage = messages.some((m) => m.role === "system");
|
|
146
|
+
if (!hasSystemMessage) {
|
|
147
|
+
// Build a schema description
|
|
148
|
+
const schemaProperties = Object.entries(schema.properties || {})
|
|
149
|
+
.map(([key, value]) => {
|
|
150
|
+
const type = value.type || "string";
|
|
151
|
+
const description = value.description
|
|
152
|
+
? ` // ${value.description}`
|
|
153
|
+
: "";
|
|
154
|
+
return ` "${key}": ${type}${description}`;
|
|
155
|
+
})
|
|
156
|
+
.join(",\n");
|
|
157
|
+
const systemMessage = {
|
|
158
|
+
role: "system",
|
|
159
|
+
content: `Please return your response as JSON following this schema exactly:\n{\n${schemaProperties}\n}\nDo not include any explanation or text outside of the JSON object.`,
|
|
160
|
+
};
|
|
161
|
+
// Return modified messages array with system message prepended
|
|
162
|
+
return { messages: [systemMessage, ...messages] };
|
|
163
|
+
}
|
|
164
|
+
return { messages };
|
|
165
|
+
},
|
|
166
|
+
processResponse: (content) => {
|
|
167
|
+
if (typeof content !== "string") {
|
|
168
|
+
return JSON.stringify(content);
|
|
169
|
+
}
|
|
170
|
+
// Try to extract JSON from content if it might be wrapped
|
|
171
|
+
const jsonMatch = content.match(/```json\s*([\s\S]*?)\s*```/) ||
|
|
172
|
+
content.match(/```\s*([\s\S]*?)\s*```/) ||
|
|
173
|
+
content.match(/\{[\s\S]*\}/) || [null, content];
|
|
174
|
+
return jsonMatch[1] || content;
|
|
175
|
+
},
|
|
176
|
+
};
|
|
177
|
+
/**
|
|
178
|
+
* Default strategy for models without schema
|
|
179
|
+
*/
|
|
180
|
+
exports.defaultStrategy = {
|
|
181
|
+
name: "default",
|
|
182
|
+
prepareRequest: () => ({}),
|
|
183
|
+
processResponse: (content) => typeof content === "string" ? content : JSON.stringify(content),
|
|
184
|
+
};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Strategy selection logic for different AI models
|
|
3
|
+
*/
|
|
4
|
+
import { Schema, SchemaStrategy } from "../types";
|
|
5
|
+
/**
|
|
6
|
+
* Choose the appropriate schema strategy based on model and schema
|
|
7
|
+
*/
|
|
8
|
+
export declare function chooseSchemaStrategy(model: string | undefined, schema: Schema | null): SchemaStrategy;
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.chooseSchemaStrategy = chooseSchemaStrategy;
|
|
4
|
+
const model_strategies_1 = require("./model-strategies");
|
|
5
|
+
/**
|
|
6
|
+
* Choose the appropriate schema strategy based on model and schema
|
|
7
|
+
*/
|
|
8
|
+
function chooseSchemaStrategy(model, schema) {
|
|
9
|
+
// Default model if not provided
|
|
10
|
+
const resolvedModel = model || (schema ? "openai/gpt-4o" : "openrouter/auto");
|
|
11
|
+
// No schema case - use default strategy
|
|
12
|
+
if (!schema) {
|
|
13
|
+
return {
|
|
14
|
+
strategy: "none",
|
|
15
|
+
model: resolvedModel,
|
|
16
|
+
prepareRequest: model_strategies_1.defaultStrategy.prepareRequest,
|
|
17
|
+
processResponse: model_strategies_1.defaultStrategy.processResponse,
|
|
18
|
+
shouldForceStream: false,
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
// Check for Claude models
|
|
22
|
+
if (/claude/i.test(resolvedModel)) {
|
|
23
|
+
return {
|
|
24
|
+
strategy: "tool_mode",
|
|
25
|
+
model: resolvedModel,
|
|
26
|
+
prepareRequest: model_strategies_1.claudeStrategy.prepareRequest,
|
|
27
|
+
processResponse: model_strategies_1.claudeStrategy.processResponse,
|
|
28
|
+
shouldForceStream: !!model_strategies_1.claudeStrategy.shouldForceStream,
|
|
29
|
+
};
|
|
30
|
+
}
|
|
31
|
+
// Check for Gemini models
|
|
32
|
+
if (/gemini/i.test(resolvedModel)) {
|
|
33
|
+
return {
|
|
34
|
+
strategy: "json_schema",
|
|
35
|
+
model: resolvedModel,
|
|
36
|
+
prepareRequest: model_strategies_1.geminiStrategy.prepareRequest,
|
|
37
|
+
processResponse: model_strategies_1.geminiStrategy.processResponse,
|
|
38
|
+
shouldForceStream: !!model_strategies_1.geminiStrategy.shouldForceStream,
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
// Check for GPT-4 Turbo models - use system message approach
|
|
42
|
+
if (/gpt-4-turbo/i.test(resolvedModel)) {
|
|
43
|
+
return {
|
|
44
|
+
strategy: "system_message",
|
|
45
|
+
model: resolvedModel,
|
|
46
|
+
prepareRequest: model_strategies_1.systemMessageStrategy.prepareRequest,
|
|
47
|
+
processResponse: model_strategies_1.systemMessageStrategy.processResponse,
|
|
48
|
+
shouldForceStream: !!model_strategies_1.systemMessageStrategy.shouldForceStream,
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
// Check for OpenAI models
|
|
52
|
+
if (/openai|gpt/i.test(resolvedModel)) {
|
|
53
|
+
return {
|
|
54
|
+
strategy: "json_schema",
|
|
55
|
+
model: resolvedModel,
|
|
56
|
+
prepareRequest: model_strategies_1.openAIStrategy.prepareRequest,
|
|
57
|
+
processResponse: model_strategies_1.openAIStrategy.processResponse,
|
|
58
|
+
shouldForceStream: !!model_strategies_1.openAIStrategy.shouldForceStream,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
// Check for other specific models that need system message approach
|
|
62
|
+
if (/llama-3|deepseek/i.test(resolvedModel)) {
|
|
63
|
+
return {
|
|
64
|
+
strategy: "system_message",
|
|
65
|
+
model: resolvedModel,
|
|
66
|
+
prepareRequest: model_strategies_1.systemMessageStrategy.prepareRequest,
|
|
67
|
+
processResponse: model_strategies_1.systemMessageStrategy.processResponse,
|
|
68
|
+
shouldForceStream: !!model_strategies_1.systemMessageStrategy.shouldForceStream,
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
// Default to system message approach for unknown models with schema
|
|
72
|
+
return {
|
|
73
|
+
strategy: "system_message",
|
|
74
|
+
model: resolvedModel,
|
|
75
|
+
prepareRequest: model_strategies_1.systemMessageStrategy.prepareRequest,
|
|
76
|
+
processResponse: model_strategies_1.systemMessageStrategy.processResponse,
|
|
77
|
+
shouldForceStream: !!model_strategies_1.systemMessageStrategy.shouldForceStream,
|
|
78
|
+
};
|
|
79
|
+
}
|
package/dist/types.d.ts
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Type definitions for call-ai
|
|
3
|
+
*/
|
|
4
|
+
/**
|
|
5
|
+
* Content types for multimodal messages
|
|
6
|
+
*/
|
|
7
|
+
export type ContentItem = {
|
|
8
|
+
type: "text" | "image_url";
|
|
9
|
+
text?: string;
|
|
10
|
+
image_url?: {
|
|
11
|
+
url: string;
|
|
12
|
+
};
|
|
13
|
+
};
|
|
14
|
+
/**
|
|
15
|
+
* Message type supporting both simple string content and multimodal content
|
|
16
|
+
*/
|
|
17
|
+
export type Message = {
|
|
18
|
+
role: "user" | "system" | "assistant";
|
|
19
|
+
content: string | ContentItem[];
|
|
20
|
+
};
|
|
21
|
+
export interface Schema {
|
|
22
|
+
/**
|
|
23
|
+
* Optional schema name - will be sent to OpenRouter if provided
|
|
24
|
+
* If not specified, defaults to "result"
|
|
25
|
+
*/
|
|
26
|
+
name?: string;
|
|
27
|
+
/**
|
|
28
|
+
* Properties defining the structure of your schema
|
|
29
|
+
*/
|
|
30
|
+
properties: Record<string, any>;
|
|
31
|
+
/**
|
|
32
|
+
* Fields that are required in the response (defaults to all properties)
|
|
33
|
+
*/
|
|
34
|
+
required?: string[];
|
|
35
|
+
/**
|
|
36
|
+
* Whether to allow fields not defined in properties (defaults to false)
|
|
37
|
+
*/
|
|
38
|
+
additionalProperties?: boolean;
|
|
39
|
+
/**
|
|
40
|
+
* Any additional schema properties to pass through
|
|
41
|
+
*/
|
|
42
|
+
[key: string]: any;
|
|
43
|
+
}
|
|
44
|
+
/**
|
|
45
|
+
* Strategy interface for handling different model types
|
|
46
|
+
*/
|
|
47
|
+
export interface ModelStrategy {
|
|
48
|
+
name: string;
|
|
49
|
+
prepareRequest: (schema: Schema | null, messages: Message[]) => any;
|
|
50
|
+
processResponse: (content: string | any) => string;
|
|
51
|
+
shouldForceStream?: boolean;
|
|
52
|
+
}
|
|
53
|
+
/**
|
|
54
|
+
* Schema strategies for different model types
|
|
55
|
+
*/
|
|
56
|
+
export type SchemaStrategyType = "json_schema" | "tool_mode" | "system_message" | "none";
|
|
57
|
+
/**
|
|
58
|
+
* Strategy selection result
|
|
59
|
+
*/
|
|
60
|
+
export interface SchemaStrategy {
|
|
61
|
+
strategy: SchemaStrategyType;
|
|
62
|
+
model: string;
|
|
63
|
+
prepareRequest: ModelStrategy["prepareRequest"];
|
|
64
|
+
processResponse: ModelStrategy["processResponse"];
|
|
65
|
+
shouldForceStream: boolean;
|
|
66
|
+
}
|
|
67
|
+
export interface CallAIOptions {
|
|
68
|
+
/**
|
|
69
|
+
* API key for authentication
|
|
70
|
+
*/
|
|
71
|
+
apiKey?: string;
|
|
72
|
+
/**
|
|
73
|
+
* Model ID to use for the request
|
|
74
|
+
*/
|
|
75
|
+
model?: string;
|
|
76
|
+
/**
|
|
77
|
+
* API endpoint to send the request to
|
|
78
|
+
*/
|
|
79
|
+
endpoint?: string;
|
|
80
|
+
/**
|
|
81
|
+
* Whether to stream the response
|
|
82
|
+
*/
|
|
83
|
+
stream?: boolean;
|
|
84
|
+
/**
|
|
85
|
+
* Schema for structured output
|
|
86
|
+
*/
|
|
87
|
+
schema?: Schema | null;
|
|
88
|
+
/**
|
|
89
|
+
* Modalities to enable in the response (e.g., ["image", "text"])
|
|
90
|
+
* Used for multimodal models that can generate images
|
|
91
|
+
*/
|
|
92
|
+
modalities?: string[];
|
|
93
|
+
/**
|
|
94
|
+
* Any additional options to pass to the API
|
|
95
|
+
*/
|
|
96
|
+
[key: string]: any;
|
|
97
|
+
}
|
|
98
|
+
export interface AIResponse {
|
|
99
|
+
text: string;
|
|
100
|
+
usage?: {
|
|
101
|
+
promptTokens: number;
|
|
102
|
+
completionTokens: number;
|
|
103
|
+
totalTokens: number;
|
|
104
|
+
};
|
|
105
|
+
model: string;
|
|
106
|
+
}
|
package/dist/types.js
ADDED
package/dist/utils.d.ts
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Utility functions for call-ai
|
|
3
|
+
*/
|
|
4
|
+
/**
|
|
5
|
+
* Recursively adds additionalProperties: false to all object types in a schema
|
|
6
|
+
* This is needed for OpenAI's strict schema validation in streaming mode
|
|
7
|
+
*/
|
|
8
|
+
export declare function recursivelyAddAdditionalProperties(schema: any): any;
|
package/dist/utils.js
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Utility functions for call-ai
|
|
4
|
+
*/
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.recursivelyAddAdditionalProperties = recursivelyAddAdditionalProperties;
|
|
7
|
+
/**
|
|
8
|
+
* Recursively adds additionalProperties: false to all object types in a schema
|
|
9
|
+
* This is needed for OpenAI's strict schema validation in streaming mode
|
|
10
|
+
*/
|
|
11
|
+
function recursivelyAddAdditionalProperties(schema) {
|
|
12
|
+
// Clone to avoid modifying the original
|
|
13
|
+
const result = { ...schema };
|
|
14
|
+
// If this is an object type, ensure it has additionalProperties: false
|
|
15
|
+
if (result.type === "object") {
|
|
16
|
+
// Set additionalProperties if not already set
|
|
17
|
+
if (result.additionalProperties === undefined) {
|
|
18
|
+
result.additionalProperties = false;
|
|
19
|
+
}
|
|
20
|
+
// Process nested properties if they exist
|
|
21
|
+
if (result.properties) {
|
|
22
|
+
result.properties = { ...result.properties };
|
|
23
|
+
// Set required if not already set - OpenAI requires this for all nested objects
|
|
24
|
+
if (result.required === undefined) {
|
|
25
|
+
result.required = Object.keys(result.properties);
|
|
26
|
+
}
|
|
27
|
+
// Check each property
|
|
28
|
+
Object.keys(result.properties).forEach((key) => {
|
|
29
|
+
const prop = result.properties[key];
|
|
30
|
+
// If property is an object or array type, recursively process it
|
|
31
|
+
if (prop && typeof prop === "object") {
|
|
32
|
+
result.properties[key] = recursivelyAddAdditionalProperties(prop);
|
|
33
|
+
// For nested objects, ensure they also have all properties in their required field
|
|
34
|
+
if (prop.type === "object" && prop.properties) {
|
|
35
|
+
prop.required = Object.keys(prop.properties);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
// Handle nested objects in arrays
|
|
42
|
+
if (result.type === "array" &&
|
|
43
|
+
result.items &&
|
|
44
|
+
typeof result.items === "object") {
|
|
45
|
+
result.items = recursivelyAddAdditionalProperties(result.items);
|
|
46
|
+
// If array items are objects, ensure they have all properties in required
|
|
47
|
+
if (result.items.type === "object" && result.items.properties) {
|
|
48
|
+
result.items.required = Object.keys(result.items.properties);
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
return result;
|
|
52
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "call-ai",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.6.0",
|
|
4
4
|
"description": "Lightweight library for making AI API calls with streaming support",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"browser": "dist/index.js",
|
|
@@ -16,12 +16,6 @@
|
|
|
16
16
|
"bugs": {
|
|
17
17
|
"url": "https://github.com/fireproof-storage/call-ai/issues"
|
|
18
18
|
},
|
|
19
|
-
"scripts": {
|
|
20
|
-
"build": "tsc",
|
|
21
|
-
"test": "jest",
|
|
22
|
-
"prepublishOnly": "npm run build",
|
|
23
|
-
"typecheck": "tsc --noEmit"
|
|
24
|
-
},
|
|
25
19
|
"keywords": [
|
|
26
20
|
"ai",
|
|
27
21
|
"llm",
|
|
@@ -36,11 +30,23 @@
|
|
|
36
30
|
"devDependencies": {
|
|
37
31
|
"@types/jest": "^29.5.3",
|
|
38
32
|
"@types/node": "^20.4.2",
|
|
33
|
+
"@types/node-fetch": "^2.6.12",
|
|
34
|
+
"dotenv": "^16.4.7",
|
|
39
35
|
"jest": "^29.6.1",
|
|
36
|
+
"node-fetch": "^3.3.2",
|
|
37
|
+
"prettier": "^3.5.3",
|
|
40
38
|
"ts-jest": "^29.1.1",
|
|
41
39
|
"typescript": "^5.1.6"
|
|
42
40
|
},
|
|
43
41
|
"engines": {
|
|
44
42
|
"node": ">=14.0.0"
|
|
43
|
+
},
|
|
44
|
+
"scripts": {
|
|
45
|
+
"build": "tsc",
|
|
46
|
+
"test": "jest",
|
|
47
|
+
"test:integration": "jest --testMatch=\"**/test/integration.test.ts\" --testPathIgnorePatterns=''",
|
|
48
|
+
"typecheck": "tsc --noEmit",
|
|
49
|
+
"format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"",
|
|
50
|
+
"coverage": "jest --coverage"
|
|
45
51
|
}
|
|
46
|
-
}
|
|
52
|
+
}
|