@juspay/neurolink 9.50.0 → 9.50.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/browser/neurolink.min.js +292 -292
  3. package/dist/context/contextCompactor.js +2 -2
  4. package/dist/context/stages/slidingWindowTruncator.d.ts +1 -1
  5. package/dist/context/stages/slidingWindowTruncator.js +3 -3
  6. package/dist/core/modules/Utilities.d.ts +5 -0
  7. package/dist/core/modules/Utilities.js +29 -18
  8. package/dist/lib/context/contextCompactor.js +2 -2
  9. package/dist/lib/context/stages/slidingWindowTruncator.d.ts +1 -1
  10. package/dist/lib/context/stages/slidingWindowTruncator.js +3 -3
  11. package/dist/lib/core/modules/Utilities.d.ts +5 -0
  12. package/dist/lib/core/modules/Utilities.js +29 -18
  13. package/dist/lib/mcp/externalServerManager.d.ts +5 -0
  14. package/dist/lib/mcp/externalServerManager.js +24 -2
  15. package/dist/lib/neurolink.js +37 -3
  16. package/dist/lib/providers/litellm.js +2 -2
  17. package/dist/lib/proxy/proxyTracer.d.ts +14 -0
  18. package/dist/lib/proxy/proxyTracer.js +43 -0
  19. package/dist/lib/server/routes/claudeProxyRoutes.js +112 -33
  20. package/dist/lib/services/server/ai/observability/instrumentation.js +39 -1
  21. package/dist/lib/types/externalMcp.d.ts +7 -0
  22. package/dist/mcp/externalServerManager.d.ts +5 -0
  23. package/dist/mcp/externalServerManager.js +24 -2
  24. package/dist/neurolink.js +37 -3
  25. package/dist/providers/litellm.js +2 -2
  26. package/dist/proxy/proxyTracer.d.ts +14 -0
  27. package/dist/proxy/proxyTracer.js +43 -0
  28. package/dist/server/routes/claudeProxyRoutes.js +112 -33
  29. package/dist/services/server/ai/observability/instrumentation.js +39 -1
  30. package/dist/types/externalMcp.d.ts +7 -0
  31. package/package.json +1 -1
@@ -15,7 +15,7 @@ import { join } from "node:path";
15
15
  import { buildStableClaudeCodeBillingHeader, CLAUDE_CLI_USER_AGENT, CLAUDE_CODE_OAUTH_BETAS, getOrCreateClaudeCodeIdentity, parseClaudeCodeUserId, } from "../../auth/anthropicOAuth.js";
16
16
  import { parseQuotaHeaders, saveAccountQuota, } from "../../proxy/accountQuota.js";
17
17
  import { buildClaudeError, ClaudeStreamSerializer, generateToolUseId, parseClaudeRequest, serializeClaudeResponse, } from "../../proxy/claudeFormat.js";
18
- import { ProxyTracer } from "../../proxy/proxyTracer.js";
18
+ import { ProxyTracer, recordFallbackAttempt } from "../../proxy/proxyTracer.js";
19
19
  import { createRawStreamCapture } from "../../proxy/rawStreamCapture.js";
20
20
  import { logBodyCapture, logRequest, logRequestAttempt, logStreamError, } from "../../proxy/requestLogger.js";
21
21
  import { createSSEInterceptor } from "../../proxy/sseInterceptor.js";
@@ -1246,43 +1246,64 @@ async function executeClaudeFallbackTranslation(args) {
1246
1246
  if (body.stream) {
1247
1247
  const streamResult = await ctx.neurolink.stream(options);
1248
1248
  const serializer = new ClaudeStreamSerializer(body.model, 0);
1249
- async function* sseGenerator() {
1250
- for (const frame of serializer.start()) {
1251
- yield frame;
1252
- }
1253
- let collectedText = "";
1254
- for await (const chunk of streamResult.stream) {
1255
- const text = extractText(chunk);
1256
- if (text) {
1257
- collectedText += text;
1258
- for (const frame of serializer.pushDelta(text)) {
1259
- yield frame;
1260
- }
1249
+ // Eagerly consume stream so errors fire synchronously and the
1250
+ // fallback loop in tryConfiguredClaudeFallbackChain can catch them.
1251
+ const frames = [];
1252
+ let collectedText = "";
1253
+ for (const frame of serializer.start()) {
1254
+ frames.push(frame);
1255
+ }
1256
+ for await (const chunk of streamResult.stream) {
1257
+ const text = extractText(chunk);
1258
+ if (text) {
1259
+ collectedText += text;
1260
+ for (const frame of serializer.pushDelta(text)) {
1261
+ frames.push(frame);
1261
1262
  }
1262
1263
  }
1263
- const toolCalls = streamResult.toolCalls ?? [];
1264
- if (!hasTranslatedOutput(collectedText, toolCalls)) {
1265
- throw new Error(`Translated provider ${providerLabel} returned no content or tool calls`);
1266
- }
1267
- if (toolCalls.length) {
1268
- for (const toolCall of toolCalls) {
1269
- const toolName = toolCall.toolName ??
1270
- toolCall.name ??
1271
- "unknown";
1272
- for (const frame of serializer.pushToolUse(generateToolUseId(), toolName, extractToolArgs(toolCall))) {
1273
- yield frame;
1274
- }
1264
+ }
1265
+ const toolCalls = streamResult.toolCalls ?? [];
1266
+ if (!hasTranslatedOutput(collectedText, toolCalls)) {
1267
+ throw new Error(`Translated provider ${providerLabel} returned no content or tool calls`);
1268
+ }
1269
+ if (toolCalls.length) {
1270
+ for (const toolCall of toolCalls) {
1271
+ const toolName = toolCall.toolName ??
1272
+ toolCall.name ??
1273
+ "unknown";
1274
+ for (const frame of serializer.pushToolUse(generateToolUseId(), toolName, extractToolArgs(toolCall))) {
1275
+ frames.push(frame);
1275
1276
  }
1276
1277
  }
1277
- const reason = streamResult.finishReason ?? "end_turn";
1278
- const resolvedUsage = extractUsageFromStreamResult(streamResult.usage);
1279
- for (const frame of serializer.finish(resolvedUsage.output, reason)) {
1280
- yield frame;
1281
- }
1282
1278
  }
1279
+ const reason = streamResult.finishReason ?? "end_turn";
1280
+ const resolvedUsage = extractUsageFromStreamResult(streamResult.usage);
1281
+ for (const frame of serializer.finish(resolvedUsage.output, reason)) {
1282
+ frames.push(frame);
1283
+ }
1284
+ // Telemetry AFTER validation — not before like the old lazy path
1283
1285
  tracer?.end(200, Date.now() - requestStartTime);
1284
1286
  recordFinalSuccess();
1285
- logFinalRequest(200, "", providerLabel);
1287
+ logFinalRequest(200, "", providerLabel, undefined, undefined, {
1288
+ inputTokens: resolvedUsage.input,
1289
+ outputTokens: resolvedUsage.output,
1290
+ });
1291
+ const bufferedBody = frames.join("");
1292
+ logProxyBody({
1293
+ phase: "client_response",
1294
+ headers: { "content-type": "text/event-stream" },
1295
+ body: bufferedBody,
1296
+ bodySize: Buffer.byteLength(bufferedBody, "utf8"),
1297
+ contentType: "text/event-stream",
1298
+ responseStatus: 200,
1299
+ durationMs: Date.now() - requestStartTime,
1300
+ });
1301
+ // Return generator that yields pre-buffered frames
1302
+ async function* sseGenerator() {
1303
+ for (const frame of frames) {
1304
+ yield frame;
1305
+ }
1306
+ }
1286
1307
  return sseGenerator();
1287
1308
  }
1288
1309
  const streamResult = await ctx.neurolink.stream(options);
@@ -1346,6 +1367,11 @@ async function tryConfiguredClaudeFallbackChain(args) {
1346
1367
  : "auto-provider";
1347
1368
  logger.always(`[proxy] skipping fallback ${label}: ${skipped.reason}`);
1348
1369
  }
1370
+ tracer?.setFallbackInfo({
1371
+ triggered: true,
1372
+ attemptCount: fallbackPlan.attempts.slice(1).length,
1373
+ reason: fallbackPolicyReason ?? "all_anthropic_accounts_exhausted",
1374
+ });
1349
1375
  for (const fallback of fallbackPlan.attempts.slice(1)) {
1350
1376
  if (!fallback.provider || !fallback.model) {
1351
1377
  continue;
@@ -1354,6 +1380,7 @@ async function tryConfiguredClaudeFallbackChain(args) {
1354
1380
  if (!availability.available) {
1355
1381
  logger.always(`[proxy] fallback ${fallback.provider}/${fallback.model} health-check failed (${availability.reason ?? "provider unavailable"}), attempting anyway`);
1356
1382
  }
1383
+ const fallbackStart = Date.now();
1357
1384
  try {
1358
1385
  logger.always(`[proxy] fallback → ${fallback.provider}/${fallback.model}`);
1359
1386
  const options = buildProxyFallbackOptions(parsedFallbackRequest, {
@@ -1370,13 +1397,57 @@ async function tryConfiguredClaudeFallbackChain(args) {
1370
1397
  options: options,
1371
1398
  providerLabel: fallback.provider,
1372
1399
  });
1400
+ recordFallbackAttempt({
1401
+ provider: fallback.provider,
1402
+ model: fallback.model,
1403
+ status: "success",
1404
+ durationMs: Date.now() - fallbackStart,
1405
+ });
1406
+ tracer?.setFallbackInfo({
1407
+ triggered: true,
1408
+ provider: fallback.provider,
1409
+ model: fallback.model,
1410
+ attemptCount: fallbackPlan.attempts.slice(1).length,
1411
+ reason: "fallback_success",
1412
+ });
1373
1413
  return {
1374
1414
  response,
1375
1415
  fallbackPolicyReason,
1376
1416
  };
1377
1417
  }
1378
1418
  catch (fallbackErr) {
1379
- logger.always(`[proxy] fallback ${fallback.provider}/${fallback.model} failed: ${fallbackErr instanceof Error ? fallbackErr.message : String(fallbackErr)}`);
1419
+ const errMsg = fallbackErr instanceof Error
1420
+ ? fallbackErr.message
1421
+ : String(fallbackErr);
1422
+ let errorClass = "unknown";
1423
+ if (errMsg.includes("Rate limit") ||
1424
+ errMsg.includes("rate_limit") ||
1425
+ errMsg.includes("max_parallel_requests")) {
1426
+ errorClass = "rate_limit";
1427
+ }
1428
+ else if (errMsg.includes("context length") ||
1429
+ errMsg.includes("ContextWindowExceeded")) {
1430
+ errorClass = "context_overflow";
1431
+ }
1432
+ else if (errMsg.includes("no content or tool calls") ||
1433
+ errMsg.includes("NoOutputGenerated")) {
1434
+ errorClass = "empty_response";
1435
+ }
1436
+ else if (errMsg.includes("thinking_level") ||
1437
+ errMsg.includes("Field required")) {
1438
+ errorClass = "schema_mismatch";
1439
+ }
1440
+ else if (errMsg.includes("Resource exhausted")) {
1441
+ errorClass = "provider_quota";
1442
+ }
1443
+ logger.always(`[proxy] fallback ${fallback.provider}/${fallback.model} failed [${errorClass}]: ${errMsg}`);
1444
+ recordFallbackAttempt({
1445
+ provider: fallback.provider,
1446
+ model: fallback.model,
1447
+ status: "failure",
1448
+ errorMessage: `[${errorClass}] ${errMsg}`,
1449
+ durationMs: Date.now() - fallbackStart,
1450
+ });
1380
1451
  }
1381
1452
  }
1382
1453
  return {
@@ -3541,7 +3612,15 @@ function shouldOmitImagesForTarget(provider, model) {
3541
3612
  return provider === "litellm" && model === "open-large";
3542
3613
  }
3543
3614
  function shouldOmitThinkingConfigForTarget(provider, model) {
3544
- return provider === "vertex" && model === "gemini-2.5-flash";
3615
+ if (provider === "litellm") {
3616
+ return true;
3617
+ }
3618
+ if (provider !== "vertex") {
3619
+ return false;
3620
+ }
3621
+ // Only Gemini 2.5+ and 3.x support thinking_level on Vertex.
3622
+ const m = model?.toLowerCase() ?? "";
3623
+ return !/gemini-(2\.5|3)/.test(m);
3545
3624
  }
3546
3625
  function extractToolArgs(toolCall) {
3547
3626
  return (toolCall.args ??
@@ -445,7 +445,45 @@ function initializeExternalOpenTelemetryMode(config, resource, otlpEndpoint, ser
445
445
  const provider = globalProvider;
446
446
  if (globalProvider && typeof provider.addSpanProcessor === "function") {
447
447
  provider.addSpanProcessor(new ContextEnricher());
448
- const skipLangfuse = config.skipLangfuseSpanProcessor === true || !langfuseProcessor;
448
+ // Auto-detect: skip if consumer already registered a LangfuseSpanProcessor.
449
+ //
450
+ // Detection strategy (ordered by robustness):
451
+ // 1. `instanceof LangfuseSpanProcessor` — reliable when both sides use
452
+ // the same @langfuse/otel package instance (same module identity).
453
+ // 2. Duck-type check for Langfuse-specific public member
454
+ // (`langfuseClient` property) — survives minification.
455
+ // 3. `constructor.name === "LangfuseSpanProcessor"` — last resort,
456
+ // brittle under minification or bundler renaming.
457
+ //
458
+ // NOTE: `_registeredSpanProcessors` is an internal OpenTelemetry field.
459
+ // If the OTel SDK removes or renames it, the array defaults to [] and
460
+ // `hasExistingLangfuse` is false — NeuroLink registers its own processor
461
+ // (same behavior as before this check). Consumers can always force skip
462
+ // via `skipLangfuseSpanProcessor: true`.
463
+ const existingProcessors = provider
464
+ ._registeredSpanProcessors ?? [];
465
+ const hasExistingLangfuse = existingProcessors.some((p) => {
466
+ if (p === null || p === undefined || typeof p !== "object") {
467
+ return false;
468
+ }
469
+ // Prefer instanceof — works when same @langfuse/otel package is shared
470
+ if (p instanceof LangfuseSpanProcessor) {
471
+ return true;
472
+ }
473
+ // Duck-type: Langfuse processor exposes a langfuseClient property
474
+ if ("langfuseClient" in p) {
475
+ return true;
476
+ }
477
+ // Fallback: constructor name (brittle under minification)
478
+ return (p.constructor?.name ===
479
+ "LangfuseSpanProcessor");
480
+ });
481
+ const skipLangfuse = config.skipLangfuseSpanProcessor === true ||
482
+ !langfuseProcessor ||
483
+ hasExistingLangfuse;
484
+ if (hasExistingLangfuse && !config.skipLangfuseSpanProcessor) {
485
+ logger.info(`${LOG_PREFIX} Auto-detected existing LangfuseSpanProcessor — skipping SDK registration to avoid duplicates`);
486
+ }
449
487
  if (!skipLangfuse && langfuseProcessor) {
450
488
  provider.addSpanProcessor(langfuseProcessor);
451
489
  }
@@ -228,6 +228,7 @@ export type ExternalMCPServerEvents = {
228
228
  /** Server status changed */
229
229
  statusChanged: {
230
230
  serverId: string;
231
+ serverName: string;
231
232
  oldStatus: ExternalMCPServerStatus;
232
233
  newStatus: ExternalMCPServerStatus;
233
234
  timestamp: Date;
@@ -235,24 +236,28 @@ export type ExternalMCPServerEvents = {
235
236
  /** Server connected successfully */
236
237
  connected: {
237
238
  serverId: string;
239
+ serverName: string;
238
240
  toolCount: number;
239
241
  timestamp: Date;
240
242
  };
241
243
  /** Server disconnected */
242
244
  disconnected: {
243
245
  serverId: string;
246
+ serverName: string;
244
247
  reason?: string;
245
248
  timestamp: Date;
246
249
  };
247
250
  /** Server failed */
248
251
  failed: {
249
252
  serverId: string;
253
+ serverName: string;
250
254
  error: string;
251
255
  timestamp: Date;
252
256
  };
253
257
  /** Tool discovered */
254
258
  toolDiscovered: {
255
259
  serverId: string;
260
+ serverName: string;
256
261
  toolName: string;
257
262
  toolInfo: ExternalMCPToolInfo;
258
263
  timestamp: Date;
@@ -260,12 +265,14 @@ export type ExternalMCPServerEvents = {
260
265
  /** Tool removed */
261
266
  toolRemoved: {
262
267
  serverId: string;
268
+ serverName: string;
263
269
  toolName: string;
264
270
  timestamp: Date;
265
271
  };
266
272
  /** Health check completed */
267
273
  healthCheck: {
268
274
  serverId: string;
275
+ serverName: string;
269
276
  health: ExternalMCPServerHealth;
270
277
  timestamp: Date;
271
278
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "9.50.0",
3
+ "version": "9.50.2",
4
4
  "packageManager": "pnpm@10.15.1",
5
5
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 13 providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
6
6
  "author": {