@juspay/neurolink 9.49.0 → 9.50.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/browser/neurolink.min.js +270 -270
- package/dist/lib/providers/litellm.js +2 -2
- package/dist/lib/proxy/proxyTracer.d.ts +14 -0
- package/dist/lib/proxy/proxyTracer.js +43 -0
- package/dist/lib/server/routes/claudeProxyRoutes.js +112 -33
- package/dist/lib/utils/imageCompressor.d.ts +45 -0
- package/dist/lib/utils/imageCompressor.js +137 -0
- package/dist/providers/litellm.js +2 -2
- package/dist/proxy/proxyTracer.d.ts +14 -0
- package/dist/proxy/proxyTracer.js +43 -0
- package/dist/server/routes/claudeProxyRoutes.js +112 -33
- package/dist/utils/imageCompressor.d.ts +45 -0
- package/dist/utils/imageCompressor.js +136 -0
- package/package.json +1 -1
|
@@ -15,7 +15,7 @@ import { join } from "node:path";
|
|
|
15
15
|
import { buildStableClaudeCodeBillingHeader, CLAUDE_CLI_USER_AGENT, CLAUDE_CODE_OAUTH_BETAS, getOrCreateClaudeCodeIdentity, parseClaudeCodeUserId, } from "../../auth/anthropicOAuth.js";
|
|
16
16
|
import { parseQuotaHeaders, saveAccountQuota, } from "../../proxy/accountQuota.js";
|
|
17
17
|
import { buildClaudeError, ClaudeStreamSerializer, generateToolUseId, parseClaudeRequest, serializeClaudeResponse, } from "../../proxy/claudeFormat.js";
|
|
18
|
-
import { ProxyTracer } from "../../proxy/proxyTracer.js";
|
|
18
|
+
import { ProxyTracer, recordFallbackAttempt } from "../../proxy/proxyTracer.js";
|
|
19
19
|
import { createRawStreamCapture } from "../../proxy/rawStreamCapture.js";
|
|
20
20
|
import { logBodyCapture, logRequest, logRequestAttempt, logStreamError, } from "../../proxy/requestLogger.js";
|
|
21
21
|
import { createSSEInterceptor } from "../../proxy/sseInterceptor.js";
|
|
@@ -1246,43 +1246,64 @@ async function executeClaudeFallbackTranslation(args) {
|
|
|
1246
1246
|
if (body.stream) {
|
|
1247
1247
|
const streamResult = await ctx.neurolink.stream(options);
|
|
1248
1248
|
const serializer = new ClaudeStreamSerializer(body.model, 0);
|
|
1249
|
-
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1249
|
+
// Eagerly consume stream so errors fire synchronously and the
|
|
1250
|
+
// fallback loop in tryConfiguredClaudeFallbackChain can catch them.
|
|
1251
|
+
const frames = [];
|
|
1252
|
+
let collectedText = "";
|
|
1253
|
+
for (const frame of serializer.start()) {
|
|
1254
|
+
frames.push(frame);
|
|
1255
|
+
}
|
|
1256
|
+
for await (const chunk of streamResult.stream) {
|
|
1257
|
+
const text = extractText(chunk);
|
|
1258
|
+
if (text) {
|
|
1259
|
+
collectedText += text;
|
|
1260
|
+
for (const frame of serializer.pushDelta(text)) {
|
|
1261
|
+
frames.push(frame);
|
|
1261
1262
|
}
|
|
1262
1263
|
}
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
}
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1270
|
-
|
|
1271
|
-
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
|
|
1264
|
+
}
|
|
1265
|
+
const toolCalls = streamResult.toolCalls ?? [];
|
|
1266
|
+
if (!hasTranslatedOutput(collectedText, toolCalls)) {
|
|
1267
|
+
throw new Error(`Translated provider ${providerLabel} returned no content or tool calls`);
|
|
1268
|
+
}
|
|
1269
|
+
if (toolCalls.length) {
|
|
1270
|
+
for (const toolCall of toolCalls) {
|
|
1271
|
+
const toolName = toolCall.toolName ??
|
|
1272
|
+
toolCall.name ??
|
|
1273
|
+
"unknown";
|
|
1274
|
+
for (const frame of serializer.pushToolUse(generateToolUseId(), toolName, extractToolArgs(toolCall))) {
|
|
1275
|
+
frames.push(frame);
|
|
1275
1276
|
}
|
|
1276
1277
|
}
|
|
1277
|
-
const reason = streamResult.finishReason ?? "end_turn";
|
|
1278
|
-
const resolvedUsage = extractUsageFromStreamResult(streamResult.usage);
|
|
1279
|
-
for (const frame of serializer.finish(resolvedUsage.output, reason)) {
|
|
1280
|
-
yield frame;
|
|
1281
|
-
}
|
|
1282
1278
|
}
|
|
1279
|
+
const reason = streamResult.finishReason ?? "end_turn";
|
|
1280
|
+
const resolvedUsage = extractUsageFromStreamResult(streamResult.usage);
|
|
1281
|
+
for (const frame of serializer.finish(resolvedUsage.output, reason)) {
|
|
1282
|
+
frames.push(frame);
|
|
1283
|
+
}
|
|
1284
|
+
// Telemetry AFTER validation — not before like the old lazy path
|
|
1283
1285
|
tracer?.end(200, Date.now() - requestStartTime);
|
|
1284
1286
|
recordFinalSuccess();
|
|
1285
|
-
logFinalRequest(200, "", providerLabel
|
|
1287
|
+
logFinalRequest(200, "", providerLabel, undefined, undefined, {
|
|
1288
|
+
inputTokens: resolvedUsage.input,
|
|
1289
|
+
outputTokens: resolvedUsage.output,
|
|
1290
|
+
});
|
|
1291
|
+
const bufferedBody = frames.join("");
|
|
1292
|
+
logProxyBody({
|
|
1293
|
+
phase: "client_response",
|
|
1294
|
+
headers: { "content-type": "text/event-stream" },
|
|
1295
|
+
body: bufferedBody,
|
|
1296
|
+
bodySize: Buffer.byteLength(bufferedBody, "utf8"),
|
|
1297
|
+
contentType: "text/event-stream",
|
|
1298
|
+
responseStatus: 200,
|
|
1299
|
+
durationMs: Date.now() - requestStartTime,
|
|
1300
|
+
});
|
|
1301
|
+
// Return generator that yields pre-buffered frames
|
|
1302
|
+
async function* sseGenerator() {
|
|
1303
|
+
for (const frame of frames) {
|
|
1304
|
+
yield frame;
|
|
1305
|
+
}
|
|
1306
|
+
}
|
|
1286
1307
|
return sseGenerator();
|
|
1287
1308
|
}
|
|
1288
1309
|
const streamResult = await ctx.neurolink.stream(options);
|
|
@@ -1346,6 +1367,11 @@ async function tryConfiguredClaudeFallbackChain(args) {
|
|
|
1346
1367
|
: "auto-provider";
|
|
1347
1368
|
logger.always(`[proxy] skipping fallback ${label}: ${skipped.reason}`);
|
|
1348
1369
|
}
|
|
1370
|
+
tracer?.setFallbackInfo({
|
|
1371
|
+
triggered: true,
|
|
1372
|
+
attemptCount: fallbackPlan.attempts.slice(1).length,
|
|
1373
|
+
reason: fallbackPolicyReason ?? "all_anthropic_accounts_exhausted",
|
|
1374
|
+
});
|
|
1349
1375
|
for (const fallback of fallbackPlan.attempts.slice(1)) {
|
|
1350
1376
|
if (!fallback.provider || !fallback.model) {
|
|
1351
1377
|
continue;
|
|
@@ -1354,6 +1380,7 @@ async function tryConfiguredClaudeFallbackChain(args) {
|
|
|
1354
1380
|
if (!availability.available) {
|
|
1355
1381
|
logger.always(`[proxy] fallback ${fallback.provider}/${fallback.model} health-check failed (${availability.reason ?? "provider unavailable"}), attempting anyway`);
|
|
1356
1382
|
}
|
|
1383
|
+
const fallbackStart = Date.now();
|
|
1357
1384
|
try {
|
|
1358
1385
|
logger.always(`[proxy] fallback → ${fallback.provider}/${fallback.model}`);
|
|
1359
1386
|
const options = buildProxyFallbackOptions(parsedFallbackRequest, {
|
|
@@ -1370,13 +1397,57 @@ async function tryConfiguredClaudeFallbackChain(args) {
|
|
|
1370
1397
|
options: options,
|
|
1371
1398
|
providerLabel: fallback.provider,
|
|
1372
1399
|
});
|
|
1400
|
+
recordFallbackAttempt({
|
|
1401
|
+
provider: fallback.provider,
|
|
1402
|
+
model: fallback.model,
|
|
1403
|
+
status: "success",
|
|
1404
|
+
durationMs: Date.now() - fallbackStart,
|
|
1405
|
+
});
|
|
1406
|
+
tracer?.setFallbackInfo({
|
|
1407
|
+
triggered: true,
|
|
1408
|
+
provider: fallback.provider,
|
|
1409
|
+
model: fallback.model,
|
|
1410
|
+
attemptCount: fallbackPlan.attempts.slice(1).length,
|
|
1411
|
+
reason: "fallback_success",
|
|
1412
|
+
});
|
|
1373
1413
|
return {
|
|
1374
1414
|
response,
|
|
1375
1415
|
fallbackPolicyReason,
|
|
1376
1416
|
};
|
|
1377
1417
|
}
|
|
1378
1418
|
catch (fallbackErr) {
|
|
1379
|
-
|
|
1419
|
+
const errMsg = fallbackErr instanceof Error
|
|
1420
|
+
? fallbackErr.message
|
|
1421
|
+
: String(fallbackErr);
|
|
1422
|
+
let errorClass = "unknown";
|
|
1423
|
+
if (errMsg.includes("Rate limit") ||
|
|
1424
|
+
errMsg.includes("rate_limit") ||
|
|
1425
|
+
errMsg.includes("max_parallel_requests")) {
|
|
1426
|
+
errorClass = "rate_limit";
|
|
1427
|
+
}
|
|
1428
|
+
else if (errMsg.includes("context length") ||
|
|
1429
|
+
errMsg.includes("ContextWindowExceeded")) {
|
|
1430
|
+
errorClass = "context_overflow";
|
|
1431
|
+
}
|
|
1432
|
+
else if (errMsg.includes("no content or tool calls") ||
|
|
1433
|
+
errMsg.includes("NoOutputGenerated")) {
|
|
1434
|
+
errorClass = "empty_response";
|
|
1435
|
+
}
|
|
1436
|
+
else if (errMsg.includes("thinking_level") ||
|
|
1437
|
+
errMsg.includes("Field required")) {
|
|
1438
|
+
errorClass = "schema_mismatch";
|
|
1439
|
+
}
|
|
1440
|
+
else if (errMsg.includes("Resource exhausted")) {
|
|
1441
|
+
errorClass = "provider_quota";
|
|
1442
|
+
}
|
|
1443
|
+
logger.always(`[proxy] fallback ${fallback.provider}/${fallback.model} failed [${errorClass}]: ${errMsg}`);
|
|
1444
|
+
recordFallbackAttempt({
|
|
1445
|
+
provider: fallback.provider,
|
|
1446
|
+
model: fallback.model,
|
|
1447
|
+
status: "failure",
|
|
1448
|
+
errorMessage: `[${errorClass}] ${errMsg}`,
|
|
1449
|
+
durationMs: Date.now() - fallbackStart,
|
|
1450
|
+
});
|
|
1380
1451
|
}
|
|
1381
1452
|
}
|
|
1382
1453
|
return {
|
|
@@ -3541,7 +3612,15 @@ function shouldOmitImagesForTarget(provider, model) {
|
|
|
3541
3612
|
return provider === "litellm" && model === "open-large";
|
|
3542
3613
|
}
|
|
3543
3614
|
function shouldOmitThinkingConfigForTarget(provider, model) {
|
|
3544
|
-
|
|
3615
|
+
if (provider === "litellm") {
|
|
3616
|
+
return true;
|
|
3617
|
+
}
|
|
3618
|
+
if (provider !== "vertex") {
|
|
3619
|
+
return false;
|
|
3620
|
+
}
|
|
3621
|
+
// Only Gemini 2.5+ and 3.x support thinking_level on Vertex.
|
|
3622
|
+
const m = model?.toLowerCase() ?? "";
|
|
3623
|
+
return !/gemini-(2\.5|3)/.test(m);
|
|
3545
3624
|
}
|
|
3546
3625
|
function extractToolArgs(toolCall) {
|
|
3547
3626
|
return (toolCall.args ??
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import type { ProviderName } from "../types/providers.js";
|
|
2
|
+
declare const SUPPORTED_FORMATS: readonly ["jpeg", "png", "webp"];
|
|
3
|
+
type SupportedFormat = (typeof SUPPORTED_FORMATS)[number];
|
|
4
|
+
/**
|
|
5
|
+
* Provider-specific image size limits in bytes
|
|
6
|
+
*/
|
|
7
|
+
export declare const PROVIDER_IMAGE_LIMITS: Record<ProviderName, number>;
|
|
8
|
+
export interface CompressionOptions {
|
|
9
|
+
provider: ProviderName;
|
|
10
|
+
quality?: number;
|
|
11
|
+
maxDimension?: number;
|
|
12
|
+
format?: SupportedFormat;
|
|
13
|
+
}
|
|
14
|
+
export interface CompressionResult {
|
|
15
|
+
buffer: Buffer;
|
|
16
|
+
originalSize: number;
|
|
17
|
+
compressedSize: number;
|
|
18
|
+
compressionRatio: number;
|
|
19
|
+
metadata: {
|
|
20
|
+
width: number;
|
|
21
|
+
height: number;
|
|
22
|
+
format: string;
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
/**
|
|
26
|
+
* Compress an image to meet provider-specific size limits
|
|
27
|
+
* @param imageBuffer - Input image buffer
|
|
28
|
+
* @param options - Compression options including provider name
|
|
29
|
+
* @returns Compressed image buffer with metadata
|
|
30
|
+
*/
|
|
31
|
+
export declare function compressImage(imageBuffer: Buffer, options: CompressionOptions): Promise<CompressionResult>;
|
|
32
|
+
/**
|
|
33
|
+
* Check if an image needs compression for a specific provider
|
|
34
|
+
* @param imageBuffer - Input image buffer
|
|
35
|
+
* @param provider - AI provider name
|
|
36
|
+
* @returns True if compression is needed
|
|
37
|
+
*/
|
|
38
|
+
export declare function needsCompression(imageBuffer: Buffer, provider: ProviderName): boolean;
|
|
39
|
+
/**
|
|
40
|
+
* Get the size limit for a specific provider
|
|
41
|
+
* @param provider - AI provider name
|
|
42
|
+
* @returns Size limit in bytes
|
|
43
|
+
*/
|
|
44
|
+
export declare function getProviderSizeLimit(provider: ProviderName): number;
|
|
45
|
+
export {};
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
import sharp from "sharp";
|
|
2
|
+
import { withTimeout } from "./async/index.js";
|
|
3
|
+
const SUPPORTED_FORMATS = ["jpeg", "png", "webp"];
|
|
4
|
+
const IMAGE_COMPRESSION_TIMEOUT_MS = 30_000;
|
|
5
|
+
/**
|
|
6
|
+
* Provider-specific image size limits in bytes
|
|
7
|
+
*/
|
|
8
|
+
export const PROVIDER_IMAGE_LIMITS = {
|
|
9
|
+
openai: 20 * 1024 * 1024, // 20MB
|
|
10
|
+
"openai-compatible": 20 * 1024 * 1024, // 20MB (same as OpenAI)
|
|
11
|
+
anthropic: 5 * 1024 * 1024, // 5MB
|
|
12
|
+
"google-ai": 4 * 1024 * 1024, // 4MB
|
|
13
|
+
vertex: 4 * 1024 * 1024, // 4MB
|
|
14
|
+
bedrock: 5 * 1024 * 1024, // 5MB
|
|
15
|
+
azure: 20 * 1024 * 1024, // 20MB
|
|
16
|
+
mistral: 5 * 1024 * 1024, // 5MB
|
|
17
|
+
huggingface: 10 * 1024 * 1024, // 10MB
|
|
18
|
+
ollama: 100 * 1024 * 1024, // 100MB (local, no strict limit)
|
|
19
|
+
openrouter: 20 * 1024 * 1024, // 20MB
|
|
20
|
+
sagemaker: 5 * 1024 * 1024, // 5MB
|
|
21
|
+
litellm: 20 * 1024 * 1024, // 20MB (proxy, use OpenAI default)
|
|
22
|
+
auto: 5 * 1024 * 1024, // 5MB (conservative fallback)
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Compress an image to meet provider-specific size limits
|
|
26
|
+
* @param imageBuffer - Input image buffer
|
|
27
|
+
* @param options - Compression options including provider name
|
|
28
|
+
* @returns Compressed image buffer with metadata
|
|
29
|
+
*/
|
|
30
|
+
export async function compressImage(imageBuffer, options) {
|
|
31
|
+
const { provider, quality = 80, maxDimension, format } = options;
|
|
32
|
+
const sizeLimit = PROVIDER_IMAGE_LIMITS[provider];
|
|
33
|
+
const originalSize = imageBuffer.length;
|
|
34
|
+
// Get original metadata
|
|
35
|
+
const image = sharp(imageBuffer);
|
|
36
|
+
const metadata = await withTimeout(image.metadata(), IMAGE_COMPRESSION_TIMEOUT_MS, "Timed out reading image metadata");
|
|
37
|
+
if (!metadata.width || !metadata.height) {
|
|
38
|
+
throw new Error("Unable to read image dimensions");
|
|
39
|
+
}
|
|
40
|
+
// If image is already under limit and no format conversion needed, return as-is
|
|
41
|
+
if (originalSize <= sizeLimit && !format && !maxDimension) {
|
|
42
|
+
return {
|
|
43
|
+
buffer: imageBuffer,
|
|
44
|
+
originalSize,
|
|
45
|
+
compressedSize: originalSize,
|
|
46
|
+
compressionRatio: 1,
|
|
47
|
+
metadata: {
|
|
48
|
+
width: metadata.width,
|
|
49
|
+
height: metadata.height,
|
|
50
|
+
format: metadata.format ?? "unknown",
|
|
51
|
+
},
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
// Prepare compression pipeline
|
|
55
|
+
let pipeline = sharp(imageBuffer);
|
|
56
|
+
// Resize if needed
|
|
57
|
+
if (maxDimension) {
|
|
58
|
+
const needsResize = metadata.width > maxDimension || metadata.height > maxDimension;
|
|
59
|
+
if (needsResize) {
|
|
60
|
+
pipeline = pipeline.resize(maxDimension, maxDimension, {
|
|
61
|
+
fit: "inside",
|
|
62
|
+
withoutEnlargement: true,
|
|
63
|
+
});
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
// Resolve target format — validate metadata.format against supported set
|
|
67
|
+
const rawFormat = metadata.format;
|
|
68
|
+
const targetFormat = format ??
|
|
69
|
+
(SUPPORTED_FORMATS.includes(rawFormat)
|
|
70
|
+
? rawFormat
|
|
71
|
+
: "jpeg");
|
|
72
|
+
const applyFormat = (p, q) => {
|
|
73
|
+
switch (targetFormat) {
|
|
74
|
+
case "jpeg":
|
|
75
|
+
return p.jpeg({ quality: q, mozjpeg: true });
|
|
76
|
+
case "png":
|
|
77
|
+
return p.png({ quality: q, compressionLevel: 9 });
|
|
78
|
+
case "webp":
|
|
79
|
+
return p.webp({ quality: q });
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
// Compress
|
|
83
|
+
let compressedBuffer = await withTimeout(applyFormat(pipeline, quality).toBuffer(), IMAGE_COMPRESSION_TIMEOUT_MS, "Timed out compressing image");
|
|
84
|
+
let currentQuality = quality;
|
|
85
|
+
// Iteratively reduce quality if still over limit
|
|
86
|
+
// Note: the sharp pipeline must be rebuilt on each iteration because
|
|
87
|
+
// sharp does not support modifying quality settings after creation.
|
|
88
|
+
while (compressedBuffer.length > sizeLimit && currentQuality > 10) {
|
|
89
|
+
currentQuality -= 10;
|
|
90
|
+
let p = sharp(imageBuffer);
|
|
91
|
+
if (maxDimension) {
|
|
92
|
+
p = p.resize(maxDimension, maxDimension, {
|
|
93
|
+
fit: "inside",
|
|
94
|
+
withoutEnlargement: true,
|
|
95
|
+
});
|
|
96
|
+
}
|
|
97
|
+
compressedBuffer = await withTimeout(applyFormat(p, currentQuality).toBuffer(), IMAGE_COMPRESSION_TIMEOUT_MS, "Timed out compressing image");
|
|
98
|
+
}
|
|
99
|
+
// Final check
|
|
100
|
+
if (compressedBuffer.length > sizeLimit) {
|
|
101
|
+
throw new Error(`Unable to compress image to ${sizeLimit} bytes for provider ${provider}. ` +
|
|
102
|
+
`Final size: ${compressedBuffer.length} bytes. ` +
|
|
103
|
+
`Try using a smaller image or lower maxDimension.`);
|
|
104
|
+
}
|
|
105
|
+
// Get final metadata
|
|
106
|
+
const finalMetadata = await withTimeout(sharp(compressedBuffer).metadata(), IMAGE_COMPRESSION_TIMEOUT_MS, "Timed out reading compressed image metadata");
|
|
107
|
+
return {
|
|
108
|
+
buffer: compressedBuffer,
|
|
109
|
+
originalSize,
|
|
110
|
+
compressedSize: compressedBuffer.length,
|
|
111
|
+
compressionRatio: originalSize / compressedBuffer.length,
|
|
112
|
+
metadata: {
|
|
113
|
+
width: finalMetadata.width ?? 0,
|
|
114
|
+
height: finalMetadata.height ?? 0,
|
|
115
|
+
format: targetFormat,
|
|
116
|
+
},
|
|
117
|
+
};
|
|
118
|
+
}
|
|
119
|
+
/**
|
|
120
|
+
* Check if an image needs compression for a specific provider
|
|
121
|
+
* @param imageBuffer - Input image buffer
|
|
122
|
+
* @param provider - AI provider name
|
|
123
|
+
* @returns True if compression is needed
|
|
124
|
+
*/
|
|
125
|
+
export function needsCompression(imageBuffer, provider) {
|
|
126
|
+
const sizeLimit = PROVIDER_IMAGE_LIMITS[provider];
|
|
127
|
+
return imageBuffer.length > sizeLimit;
|
|
128
|
+
}
|
|
129
|
+
/**
|
|
130
|
+
* Get the size limit for a specific provider
|
|
131
|
+
* @param provider - AI provider name
|
|
132
|
+
* @returns Size limit in bytes
|
|
133
|
+
*/
|
|
134
|
+
export function getProviderSizeLimit(provider) {
|
|
135
|
+
return PROVIDER_IMAGE_LIMITS[provider];
|
|
136
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "9.
|
|
3
|
+
"version": "9.50.1",
|
|
4
4
|
"packageManager": "pnpm@10.15.1",
|
|
5
5
|
"description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
|
|
6
6
|
"author": {
|