@juspay/neurolink 9.55.9 → 9.55.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/dist/browser/neurolink.min.js +507 -378
- package/dist/core/modules/StreamHandler.js +12 -0
- package/dist/core/modules/ToolsManager.js +4 -0
- package/dist/index.d.ts +2 -2
- package/dist/index.js +4 -1
- package/dist/lib/core/modules/StreamHandler.js +12 -0
- package/dist/lib/core/modules/ToolsManager.js +4 -0
- package/dist/lib/index.d.ts +2 -2
- package/dist/lib/index.js +4 -1
- package/dist/lib/mcp/toolDiscoveryService.js +99 -3
- package/dist/lib/mcp/toolRegistry.js +3 -0
- package/dist/lib/neurolink.js +8 -23
- package/dist/lib/processors/media/AudioProcessor.js +22 -3
- package/dist/lib/processors/media/VideoProcessor.js +48 -11
- package/dist/lib/services/server/ai/observability/instrumentation.d.ts +26 -0
- package/dist/lib/services/server/ai/observability/instrumentation.js +98 -15
- package/dist/lib/types/processor.d.ts +27 -0
- package/dist/lib/utils/mcpErrorText.d.ts +10 -0
- package/dist/lib/utils/mcpErrorText.js +36 -0
- package/dist/lib/utils/timeout.js +6 -0
- package/dist/mcp/toolDiscoveryService.js +99 -3
- package/dist/mcp/toolRegistry.js +3 -0
- package/dist/neurolink.js +8 -23
- package/dist/processors/media/AudioProcessor.js +22 -3
- package/dist/processors/media/VideoProcessor.js +48 -11
- package/dist/services/server/ai/observability/instrumentation.d.ts +26 -0
- package/dist/services/server/ai/observability/instrumentation.js +98 -15
- package/dist/types/processor.d.ts +27 -0
- package/dist/utils/mcpErrorText.d.ts +10 -0
- package/dist/utils/mcpErrorText.js +35 -0
- package/dist/utils/timeout.js +6 -0
- package/package.json +4 -4
|
@@ -18,6 +18,7 @@ import { BatchSpanProcessor, } from "@opentelemetry/sdk-trace-base";
|
|
|
18
18
|
import { NodeTracerProvider } from "@opentelemetry/sdk-trace-node";
|
|
19
19
|
import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, } from "@opentelemetry/semantic-conventions";
|
|
20
20
|
import { AsyncLocalStorage } from "async_hooks";
|
|
21
|
+
import { extractMcpErrorText } from "../../../../utils/mcpErrorText.js";
|
|
21
22
|
import { logger } from "../../../../utils/logger.js";
|
|
22
23
|
const LOG_PREFIX = "[OpenTelemetry]";
|
|
23
24
|
function createOtelResource(config, serviceName) {
|
|
@@ -131,6 +132,64 @@ function _hasExternalTracerProvider() {
|
|
|
131
132
|
return false;
|
|
132
133
|
}
|
|
133
134
|
}
|
|
135
|
+
/**
|
|
136
|
+
* Parse `ai.toolCall.result` on a Vercel AI SDK tool span and surface any
|
|
137
|
+
* embedded MCP `{ isError: true }` as a Langfuse ERROR + status message.
|
|
138
|
+
*/
|
|
139
|
+
function applyToolCallIsErrorStatus(attrs) {
|
|
140
|
+
const resultAttr = attrs["ai.toolCall.result"];
|
|
141
|
+
if (typeof resultAttr !== "string" || resultAttr.length === 0) {
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
let parsed;
|
|
145
|
+
try {
|
|
146
|
+
parsed = JSON.parse(resultAttr);
|
|
147
|
+
}
|
|
148
|
+
catch {
|
|
149
|
+
return;
|
|
150
|
+
}
|
|
151
|
+
if (!parsed ||
|
|
152
|
+
typeof parsed !== "object" ||
|
|
153
|
+
parsed.isError !== true) {
|
|
154
|
+
return;
|
|
155
|
+
}
|
|
156
|
+
attrs["langfuse.level"] = "ERROR";
|
|
157
|
+
// Always set a status_message, even when the MCP payload has non-text or
|
|
158
|
+
// empty content. Without a fallback the Curator P0-1 gap reappears for
|
|
159
|
+
// those failures (level=ERROR but statusMessage=null).
|
|
160
|
+
const errorText = extractMcpErrorText(parsed);
|
|
161
|
+
const toolName = typeof attrs["ai.toolCall.name"] === "string"
|
|
162
|
+
? attrs["ai.toolCall.name"]
|
|
163
|
+
: "tool";
|
|
164
|
+
attrs["langfuse.status_message"] =
|
|
165
|
+
errorText || `MCP ${toolName} returned isError=true`;
|
|
166
|
+
}
|
|
167
|
+
/**
|
|
168
|
+
* Map non-ERROR span conditions (content-filter, length, client abort, SDK
|
|
169
|
+
* timeout, empty output) onto Langfuse WARNING/ERROR levels. Mutates `attrs`.
|
|
170
|
+
*/
|
|
171
|
+
function applyNonErrorLangfuseLevel(attrs) {
|
|
172
|
+
const finishReason = attrs["ai.finishReason"] ?? attrs["gen_ai.response.finish_reasons"];
|
|
173
|
+
const reasonStr = Array.isArray(finishReason)
|
|
174
|
+
? finishReason.join(",")
|
|
175
|
+
: String(finishReason ?? "");
|
|
176
|
+
if (reasonStr.includes("content-filter") || reasonStr === "length") {
|
|
177
|
+
attrs["langfuse.level"] = "WARNING";
|
|
178
|
+
attrs["langfuse.status_message"] =
|
|
179
|
+
`Generation stopped: finishReason=${reasonStr}`;
|
|
180
|
+
return;
|
|
181
|
+
}
|
|
182
|
+
if (attrs["neurolink.no_output"] === true) {
|
|
183
|
+
attrs["langfuse.level"] = "WARNING";
|
|
184
|
+
attrs["langfuse.status_message"] =
|
|
185
|
+
"Stream produced no output (NoOutputGeneratedError)";
|
|
186
|
+
return;
|
|
187
|
+
}
|
|
188
|
+
if (reasonStr === "aborted") {
|
|
189
|
+
attrs["langfuse.level"] = "WARNING";
|
|
190
|
+
attrs["langfuse.status_message"] = "Generation aborted by client";
|
|
191
|
+
}
|
|
192
|
+
}
|
|
134
193
|
/**
|
|
135
194
|
* Span processor that enriches spans with user and session context from AsyncLocalStorage
|
|
136
195
|
* Also extracts GenAI semantic convention attributes for Langfuse integration
|
|
@@ -459,26 +518,23 @@ class ContextEnricher {
|
|
|
459
518
|
const readableStatus = span.status;
|
|
460
519
|
try {
|
|
461
520
|
const mutableAttrs = span.attributes;
|
|
521
|
+
// Curator P0-1/P0-2: detect MCP isError pattern on AI SDK tool call spans.
|
|
522
|
+
// The AI SDK's `ai.toolCall` span stays status=UNSET when the tool
|
|
523
|
+
// *returns* { isError:true } (no exception thrown), so Langfuse sees
|
|
524
|
+
// level=DEFAULT and no status message. Parse the stringified result
|
|
525
|
+
// and surface the embedded error text.
|
|
526
|
+
if (readableSpan.name === "ai.toolCall" &&
|
|
527
|
+
readableStatus?.code !== SpanStatusCode.ERROR) {
|
|
528
|
+
applyToolCallIsErrorStatus(mutableAttrs);
|
|
529
|
+
}
|
|
462
530
|
if (readableStatus?.code === SpanStatusCode.ERROR) {
|
|
463
531
|
mutableAttrs["langfuse.level"] = "ERROR";
|
|
464
532
|
if (readableStatus.message) {
|
|
465
533
|
mutableAttrs["langfuse.status_message"] = readableStatus.message;
|
|
466
534
|
}
|
|
467
535
|
}
|
|
468
|
-
else {
|
|
469
|
-
|
|
470
|
-
// The AI SDK sets ai.finishReason on its spans; content-filter and
|
|
471
|
-
// length finish reasons indicate partial failures that deserve WARNING.
|
|
472
|
-
const finishReason = mutableAttrs["ai.finishReason"] ??
|
|
473
|
-
mutableAttrs["gen_ai.response.finish_reasons"];
|
|
474
|
-
const reasonStr = Array.isArray(finishReason)
|
|
475
|
-
? finishReason.join(",")
|
|
476
|
-
: String(finishReason ?? "");
|
|
477
|
-
if (reasonStr.includes("content-filter") || reasonStr === "length") {
|
|
478
|
-
mutableAttrs["langfuse.level"] = "WARNING";
|
|
479
|
-
mutableAttrs["langfuse.status_message"] =
|
|
480
|
-
`Generation stopped: finishReason=${reasonStr}`;
|
|
481
|
-
}
|
|
536
|
+
else if (mutableAttrs["langfuse.level"] === undefined) {
|
|
537
|
+
applyNonErrorLangfuseLevel(mutableAttrs);
|
|
482
538
|
}
|
|
483
539
|
}
|
|
484
540
|
catch {
|
|
@@ -520,9 +576,36 @@ async function createLangfuseProcessor(config) {
|
|
|
520
576
|
baseUrl: config.baseUrl || "https://cloud.langfuse.com",
|
|
521
577
|
environment: config.environment || "dev",
|
|
522
578
|
release: config.release || "v1.0.0",
|
|
523
|
-
|
|
579
|
+
// Curator P1-3: skip internal wrapper spans that duplicate ai.toolCall /
|
|
580
|
+
// ai.generateText observations in Langfuse. Wrappers still emit OTel spans
|
|
581
|
+
// for internal metrics; they just aren't forwarded to Langfuse.
|
|
582
|
+
shouldExportSpan: langfuseShouldExportSpan,
|
|
524
583
|
});
|
|
525
584
|
}
|
|
585
|
+
/**
|
|
586
|
+
* True when a span is an internal NeuroLink wrapper that should NOT be sent to
|
|
587
|
+
* Langfuse. Internal wrappers carry the `langfuse.internal: true` attribute.
|
|
588
|
+
*
|
|
589
|
+
* Exposed so host apps that bring their own `LangfuseSpanProcessor` (e.g.
|
|
590
|
+
* `skipLangfuseSpanProcessor: true`, or manual registration on an existing
|
|
591
|
+
* TracerProvider) can apply the same filter and avoid duplicate observations.
|
|
592
|
+
*/
|
|
593
|
+
export function isLangfuseInternalSpan(span) {
|
|
594
|
+
return span.attributes?.["langfuse.internal"] === true;
|
|
595
|
+
}
|
|
596
|
+
/**
|
|
597
|
+
* Drop-in `shouldExportSpan` predicate for a `LangfuseSpanProcessor` that
|
|
598
|
+
* filters out NeuroLink internal wrapper spans.
|
|
599
|
+
*
|
|
600
|
+
* Usage in host apps:
|
|
601
|
+
* ```ts
|
|
602
|
+
* import { langfuseShouldExportSpan } from "@juspay/neurolink";
|
|
603
|
+
* new LangfuseSpanProcessor({ ..., shouldExportSpan: langfuseShouldExportSpan });
|
|
604
|
+
* ```
|
|
605
|
+
*/
|
|
606
|
+
export function langfuseShouldExportSpan({ otelSpan, }) {
|
|
607
|
+
return !isLangfuseInternalSpan(otelSpan);
|
|
608
|
+
}
|
|
526
609
|
async function initializeExternalOpenTelemetryMode(config, resource, otlpEndpoint, serviceName, langfuseRequested, hasLangfuseCreds) {
|
|
527
610
|
if (langfuseRequested && !hasLangfuseCreds) {
|
|
528
611
|
if (!otlpEndpoint) {
|
|
@@ -543,6 +543,33 @@ export type ProcessedYaml = ProcessedFileBase & {
|
|
|
543
543
|
/** YAML content converted to JSON string for AI consumption */
|
|
544
544
|
asJson: string | null;
|
|
545
545
|
};
|
|
546
|
+
/**
|
|
547
|
+
* Structural types for fluent-ffmpeg probe data.
|
|
548
|
+
* Defined here so the optional fluent-ffmpeg package is not required at typecheck time.
|
|
549
|
+
*/
|
|
550
|
+
export type FfprobeStream = {
|
|
551
|
+
codec_type?: string;
|
|
552
|
+
codec_name?: string;
|
|
553
|
+
width?: number;
|
|
554
|
+
height?: number;
|
|
555
|
+
r_frame_rate?: string;
|
|
556
|
+
avg_frame_rate?: string;
|
|
557
|
+
bit_rate?: number | string;
|
|
558
|
+
channels?: number;
|
|
559
|
+
sample_rate?: number | string;
|
|
560
|
+
tags?: Record<string, string | number>;
|
|
561
|
+
[key: string]: unknown;
|
|
562
|
+
};
|
|
563
|
+
export type FfprobeData = {
|
|
564
|
+
streams: FfprobeStream[];
|
|
565
|
+
format: {
|
|
566
|
+
duration?: number;
|
|
567
|
+
size?: number | string;
|
|
568
|
+
bit_rate?: number | string;
|
|
569
|
+
tags?: Record<string, string | number>;
|
|
570
|
+
[key: string]: unknown;
|
|
571
|
+
};
|
|
572
|
+
};
|
|
546
573
|
/**
|
|
547
574
|
* Structural types for exceljs objects.
|
|
548
575
|
* Defined here so the optional exceljs package is not required at typecheck time.
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Extract a human-readable error string from an MCP isError result object.
|
|
3
|
+
*
|
|
4
|
+
* Shared utility — no side effects, no dependencies on other SDK modules —
|
|
5
|
+
* so it can be imported from the neurolink.ts event loop, the telemetry
|
|
6
|
+
* instrumentation (which loads earlier), and the MCP discovery layer without
|
|
7
|
+
* creating circular imports. Any change to truncation or content-type parsing
|
|
8
|
+
* must happen here and propagate to all three surfaces.
|
|
9
|
+
*/
|
|
10
|
+
export declare function extractMcpErrorText(raw: unknown): string;
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Extract a human-readable error string from an MCP isError result object.
|
|
3
|
+
*
|
|
4
|
+
* Shared utility — no side effects, no dependencies on other SDK modules —
|
|
5
|
+
* so it can be imported from the neurolink.ts event loop, the telemetry
|
|
6
|
+
* instrumentation (which loads earlier), and the MCP discovery layer without
|
|
7
|
+
* creating circular imports. Any change to truncation or content-type parsing
|
|
8
|
+
* must happen here and propagate to all three surfaces.
|
|
9
|
+
*/
|
|
10
|
+
export function extractMcpErrorText(raw) {
|
|
11
|
+
let resultObj;
|
|
12
|
+
try {
|
|
13
|
+
resultObj = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
14
|
+
}
|
|
15
|
+
catch {
|
|
16
|
+
return "";
|
|
17
|
+
}
|
|
18
|
+
if (!resultObj || typeof resultObj !== "object") {
|
|
19
|
+
return "";
|
|
20
|
+
}
|
|
21
|
+
const content = resultObj.content;
|
|
22
|
+
if (!Array.isArray(content)) {
|
|
23
|
+
return "";
|
|
24
|
+
}
|
|
25
|
+
// Fail closed on malformed entries (e.g. `content: [null]`) rather than
|
|
26
|
+
// throwing — the caller expects an empty string for unparseable input.
|
|
27
|
+
const texts = content
|
|
28
|
+
.filter((c) => c !== null &&
|
|
29
|
+
typeof c === "object" &&
|
|
30
|
+
c.type === "text" &&
|
|
31
|
+
typeof c.text === "string" &&
|
|
32
|
+
c.text.length > 0)
|
|
33
|
+
.map((c) => c.text);
|
|
34
|
+
return texts.join(" ").substring(0, 500);
|
|
35
|
+
}
|
|
36
|
+
//# sourceMappingURL=mcpErrorText.js.map
|
|
@@ -313,6 +313,12 @@ export function createTimeoutController(timeout, provider, operation) {
|
|
|
313
313
|
}
|
|
314
314
|
const controller = new AbortController();
|
|
315
315
|
const timer = setTimeout(() => {
|
|
316
|
+
// NOTE: we cannot stamp the AI SDK's ai.streamText/ai.generateText span
|
|
317
|
+
// from here — the setTimeout callback runs in the async context captured
|
|
318
|
+
// at schedule time, which is BEFORE the AI SDK span exists. Instead we
|
|
319
|
+
// rely on the AI SDK propagating the TimeoutError through its recordSpan
|
|
320
|
+
// wrapper, which sets span.status = ERROR + message. ContextEnricher's
|
|
321
|
+
// SpanStatusCode.ERROR branch then surfaces level=ERROR + status_message.
|
|
316
322
|
controller.abort(new TimeoutError(`${provider} ${operation} operation timed out after ${timeout}`, timeoutMs, provider, operation));
|
|
317
323
|
}, timeoutMs);
|
|
318
324
|
const cleanup = () => {
|
|
@@ -9,10 +9,72 @@ import { globalCircuitBreakerManager, CircuitBreakerOpenError, } from "./mcpCirc
|
|
|
9
9
|
import { isObject, isNullish } from "../utils/typeUtils.js";
|
|
10
10
|
import { validateToolName, validateToolDescription, } from "../utils/parameterValidation.js";
|
|
11
11
|
import { withTimeout } from "../utils/errorHandling.js";
|
|
12
|
+
import { extractMcpErrorText } from "../utils/mcpErrorText.js";
|
|
12
13
|
import { SpanKind, SpanStatusCode } from "@opentelemetry/api";
|
|
13
14
|
import { tracers } from "../telemetry/tracers.js";
|
|
14
15
|
import { withSpan } from "../telemetry/withSpan.js";
|
|
15
16
|
const mcpTracer = tracers.mcp;
|
|
17
|
+
/**
|
|
18
|
+
* JSON-stringify a value for a Langfuse input/output preview attribute,
|
|
19
|
+
* truncated to a hard cap to stay under span attribute size limits. The
|
|
20
|
+
* returned string is guaranteed to be ≤ maxLen characters; when truncated,
|
|
21
|
+
* the last character is replaced with an ellipsis.
|
|
22
|
+
*/
|
|
23
|
+
function safeJsonStringify(value, maxLen) {
|
|
24
|
+
if (maxLen <= 0) {
|
|
25
|
+
return "";
|
|
26
|
+
}
|
|
27
|
+
try {
|
|
28
|
+
const str = JSON.stringify(value);
|
|
29
|
+
if (typeof str !== "string") {
|
|
30
|
+
return "";
|
|
31
|
+
}
|
|
32
|
+
if (str.length <= maxLen) {
|
|
33
|
+
return str;
|
|
34
|
+
}
|
|
35
|
+
return str.slice(0, Math.max(0, maxLen - 1)) + "…";
|
|
36
|
+
}
|
|
37
|
+
catch {
|
|
38
|
+
return "";
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
/**
|
|
42
|
+
* Match property names that commonly hold secrets. Values under these keys
|
|
43
|
+
* are replaced with `[REDACTED]` before serialization. Case-insensitive.
|
|
44
|
+
* Conservative list — anything matching *here* is masked; the rest of the
|
|
45
|
+
* structure is preserved so Langfuse still gets a meaningful preview.
|
|
46
|
+
*/
|
|
47
|
+
const SENSITIVE_KEY_PATTERN = /^(password|passwd|secret|token|api[_-]?key|apikey|access[_-]?key|authorization|auth|bearer|credential|cookie|session[_-]?id|private[_-]?key|client[_-]?secret|refresh[_-]?token|x-api-key)$/i;
|
|
48
|
+
/**
|
|
49
|
+
* Walk a value, producing a structurally-equivalent copy with sensitive-key
|
|
50
|
+
* values masked. Unlike `transformParamsForLogging` (which collapses objects
|
|
51
|
+
* to a "N params" string), this preserves non-sensitive content so Langfuse
|
|
52
|
+
* input/output previews stay useful. Bounded depth guards against cycles.
|
|
53
|
+
*/
|
|
54
|
+
function redactForPreview(value, depth = 0) {
|
|
55
|
+
if (depth > 10) {
|
|
56
|
+
return "[...]";
|
|
57
|
+
}
|
|
58
|
+
if (value === null || value === undefined) {
|
|
59
|
+
return value;
|
|
60
|
+
}
|
|
61
|
+
if (typeof value !== "object") {
|
|
62
|
+
return value;
|
|
63
|
+
}
|
|
64
|
+
if (Array.isArray(value)) {
|
|
65
|
+
return value.map((v) => redactForPreview(v, depth + 1));
|
|
66
|
+
}
|
|
67
|
+
const out = {};
|
|
68
|
+
for (const [k, v] of Object.entries(value)) {
|
|
69
|
+
if (SENSITIVE_KEY_PATTERN.test(k)) {
|
|
70
|
+
out[k] = "[REDACTED]";
|
|
71
|
+
}
|
|
72
|
+
else {
|
|
73
|
+
out[k] = redactForPreview(v, depth + 1);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return out;
|
|
77
|
+
}
|
|
16
78
|
/**
|
|
17
79
|
* Default timeout for MCP tool execution operations in milliseconds.
|
|
18
80
|
* Configurable via MCP_TOOL_TIMEOUT env var.
|
|
@@ -376,6 +438,18 @@ export class ToolDiscoveryService extends EventEmitter {
|
|
|
376
438
|
"mcp.server_id": serverId,
|
|
377
439
|
"mcp.tool_name": toolName,
|
|
378
440
|
"mcp.timeout_ms": effectiveTimeout,
|
|
441
|
+
// Curator P1-4: Langfuse observations rely on ai.*/gen_ai.*
|
|
442
|
+
// attributes for tool name and I/O previews. Provide them so
|
|
443
|
+
// the SPAN observation in Langfuse is legible without
|
|
444
|
+
// timestamp-joining against the parent ai.toolCall. Redact
|
|
445
|
+
// parameters via the existing secret-stripping helper so
|
|
446
|
+
// tokens/credentials/paths don't leave the process.
|
|
447
|
+
"ai.tool.name": toolName,
|
|
448
|
+
"gen_ai.tool.name": toolName,
|
|
449
|
+
"gen_ai.request": safeJsonStringify({
|
|
450
|
+
name: toolName,
|
|
451
|
+
arguments: redactForPreview(parameters),
|
|
452
|
+
}, 2048),
|
|
379
453
|
},
|
|
380
454
|
}, async (callSpan) => {
|
|
381
455
|
try {
|
|
@@ -384,11 +458,26 @@ export class ToolDiscoveryService extends EventEmitter {
|
|
|
384
458
|
name: toolName,
|
|
385
459
|
arguments: parameters,
|
|
386
460
|
}), timeout, new Error(`Tool execution timeout: ${toolName}`));
|
|
387
|
-
|
|
461
|
+
// Curator P0-1/P0-2: the MCP client does NOT throw on protocol
|
|
462
|
+
// errors — it returns { isError: true, content: [...] }. Detect
|
|
463
|
+
// that pattern so the span status reflects reality.
|
|
464
|
+
const resultObj = callResult;
|
|
465
|
+
if (resultObj && resultObj.isError === true) {
|
|
466
|
+
const errorText = extractMcpErrorText(resultObj);
|
|
467
|
+
callSpan.setStatus({
|
|
468
|
+
code: SpanStatusCode.ERROR,
|
|
469
|
+
message: errorText || `Tool ${toolName} returned isError`,
|
|
470
|
+
});
|
|
471
|
+
}
|
|
472
|
+
else {
|
|
473
|
+
callSpan.setStatus({ code: SpanStatusCode.OK });
|
|
474
|
+
}
|
|
388
475
|
// ── MCP output normalization ──────────────────────────────────
|
|
389
476
|
// Intercept here — after receive, before cache, before memory,
|
|
390
477
|
// before LLM context injection. Returns a compact surrogate when
|
|
391
478
|
// the payload exceeds mcp.outputLimits.maxBytes.
|
|
479
|
+
let resultForPreview = callResult;
|
|
480
|
+
let resultForReturn = callResult;
|
|
392
481
|
if (this.outputNormalizer) {
|
|
393
482
|
try {
|
|
394
483
|
const normalized = await this.outputNormalizer.normalize(callResult, { toolName, serverId });
|
|
@@ -396,7 +485,8 @@ export class ToolDiscoveryService extends EventEmitter {
|
|
|
396
485
|
if (normalized.isExternalized) {
|
|
397
486
|
callSpan.setAttribute("mcp.output.original_bytes", normalized.originalBytes);
|
|
398
487
|
}
|
|
399
|
-
|
|
488
|
+
resultForPreview = normalized.result;
|
|
489
|
+
resultForReturn = normalized.result;
|
|
400
490
|
}
|
|
401
491
|
catch (normErr) {
|
|
402
492
|
mcpLogger.warn(`[ToolDiscoveryService] McpOutputNormalizer failed for ` +
|
|
@@ -405,7 +495,13 @@ export class ToolDiscoveryService extends EventEmitter {
|
|
|
405
495
|
}
|
|
406
496
|
}
|
|
407
497
|
// ── end normalization ─────────────────────────────────────────
|
|
408
|
-
|
|
498
|
+
// Curator P1-4: build gen_ai.response AFTER normalization so
|
|
499
|
+
// large payloads use the compact surrogate instead of the raw
|
|
500
|
+
// result (avoids redundant stringify + memory hit on payloads
|
|
501
|
+
// that were specifically externalized to Redis). Redact via the
|
|
502
|
+
// same secret-stripping path used for request parameters.
|
|
503
|
+
callSpan.setAttribute("gen_ai.response", safeJsonStringify(redactForPreview(resultForPreview), 2048));
|
|
504
|
+
return resultForReturn;
|
|
409
505
|
}
|
|
410
506
|
catch (err) {
|
|
411
507
|
callSpan.setStatus({
|
package/dist/mcp/toolRegistry.js
CHANGED
|
@@ -257,6 +257,9 @@ export class MCPToolRegistry extends MCPRegistry {
|
|
|
257
257
|
attributes: {
|
|
258
258
|
[ATTR.GEN_AI_TOOL_NAME]: toolName,
|
|
259
259
|
[ATTR.MCP_SERVER_ID]: preResolvedServerId || "builtin",
|
|
260
|
+
// Curator P1-3: registry-level wrapper — duplicates ai.toolCall in
|
|
261
|
+
// Langfuse. Retained for OTel/metrics; skipped for Langfuse export.
|
|
262
|
+
"langfuse.internal": true,
|
|
260
263
|
},
|
|
261
264
|
}, async (span) => {
|
|
262
265
|
try {
|
package/dist/neurolink.js
CHANGED
|
@@ -64,6 +64,7 @@ import { CircuitBreaker, ERROR_CODES, ErrorFactory, isAbortError, isRetriableErr
|
|
|
64
64
|
// Factory processing imports
|
|
65
65
|
import { createCleanStreamOptions, enhanceTextGenerationOptions, processFactoryOptions, processStreamingFactoryOptions, validateFactoryConfig, } from "./utils/factoryProcessing.js";
|
|
66
66
|
import { logger, mcpLogger } from "./utils/logger.js";
|
|
67
|
+
import { extractMcpErrorText } from "./utils/mcpErrorText.js";
|
|
67
68
|
import { createCustomToolServerInfo, detectCategory, } from "./utils/mcpDefaults.js";
|
|
68
69
|
import { resolveModel } from "./utils/modelAliasResolver.js";
|
|
69
70
|
// Import orchestration components
|
|
@@ -133,29 +134,6 @@ function mcpCategoryToErrorCategory(mcpCategory) {
|
|
|
133
134
|
return ErrorCategory.EXECUTION;
|
|
134
135
|
}
|
|
135
136
|
}
|
|
136
|
-
/**
|
|
137
|
-
* Extract a human-readable error string from an MCP isError result object.
|
|
138
|
-
* Returns an empty string if nothing useful can be extracted.
|
|
139
|
-
*/
|
|
140
|
-
function extractMcpErrorText(raw) {
|
|
141
|
-
try {
|
|
142
|
-
const resultObj = typeof raw === "string" ? JSON.parse(raw) : raw;
|
|
143
|
-
if (!resultObj || typeof resultObj !== "object") {
|
|
144
|
-
return "";
|
|
145
|
-
}
|
|
146
|
-
const content = resultObj.content;
|
|
147
|
-
if (!Array.isArray(content)) {
|
|
148
|
-
return "";
|
|
149
|
-
}
|
|
150
|
-
const texts = content
|
|
151
|
-
.filter((c) => c.type === "text" && c.text)
|
|
152
|
-
.map((c) => c.text);
|
|
153
|
-
return texts.join(" ").substring(0, 500);
|
|
154
|
-
}
|
|
155
|
-
catch {
|
|
156
|
-
return "";
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
137
|
/**
|
|
160
138
|
* Check if an error is a non-retryable provider error that should immediately
|
|
161
139
|
* stop the retry/fallback chain. These errors represent permanent failures
|
|
@@ -6267,6 +6245,13 @@ Current user's request: ${currentInput}`;
|
|
|
6267
6245
|
"tool.type": executionContext.toolType,
|
|
6268
6246
|
"tool.input_size": executionContext.inputSize,
|
|
6269
6247
|
"tool.input_preview": executionContext.truncatedInput,
|
|
6248
|
+
// NOT marked langfuse.internal: this is the public entrypoint for
|
|
6249
|
+
// `NeuroLink.executeTool()`. Direct API callers (not going through
|
|
6250
|
+
// the AI SDK) would otherwise produce zero Langfuse observations —
|
|
6251
|
+
// the lower-level registry/discovery spans are internal wrappers.
|
|
6252
|
+
// AI-SDK-initiated custom tools will produce both ai.toolCall and
|
|
6253
|
+
// this span, which is the accepted tradeoff for keeping direct
|
|
6254
|
+
// invocations observable.
|
|
6270
6255
|
},
|
|
6271
6256
|
}, (toolSpan) => this.executeToolWithSpan(toolName, params, options, executionContext, toolSpan));
|
|
6272
6257
|
}
|
|
@@ -36,10 +36,27 @@
|
|
|
36
36
|
* }
|
|
37
37
|
* ```
|
|
38
38
|
*/
|
|
39
|
-
import { parseBuffer, selectCover } from "music-metadata";
|
|
40
39
|
import { BaseFileProcessor } from "../base/BaseFileProcessor.js";
|
|
41
40
|
import { SIZE_LIMITS_MB } from "../config/index.js";
|
|
42
41
|
import { FileErrorCode } from "../errors/index.js";
|
|
42
|
+
let _musicMetadata = null;
|
|
43
|
+
async function loadMusicMetadata() {
|
|
44
|
+
if (_musicMetadata) {
|
|
45
|
+
return _musicMetadata;
|
|
46
|
+
}
|
|
47
|
+
try {
|
|
48
|
+
_musicMetadata = await import(/* @vite-ignore */ "music-metadata");
|
|
49
|
+
return _musicMetadata;
|
|
50
|
+
}
|
|
51
|
+
catch (err) {
|
|
52
|
+
const e = err instanceof Error ? err : null;
|
|
53
|
+
if (e?.code === "ERR_MODULE_NOT_FOUND" &&
|
|
54
|
+
e.message.includes("music-metadata")) {
|
|
55
|
+
throw new Error('Audio processing requires the "music-metadata" package. Install it with:\n pnpm add music-metadata', { cause: err });
|
|
56
|
+
}
|
|
57
|
+
throw err;
|
|
58
|
+
}
|
|
59
|
+
}
|
|
43
60
|
// =============================================================================
|
|
44
61
|
// TYPES
|
|
45
62
|
// =============================================================================
|
|
@@ -239,7 +256,7 @@ export class AudioProcessor extends BaseFileProcessor {
|
|
|
239
256
|
// Step 5: Extract tags from common metadata
|
|
240
257
|
const tags = this.extractTags(audioMetadata);
|
|
241
258
|
// Step 6: Extract embedded cover art if present
|
|
242
|
-
const coverArt = this.extractCoverArt(audioMetadata);
|
|
259
|
+
const coverArt = await this.extractCoverArt(audioMetadata);
|
|
243
260
|
// Step 7: Attempt transcription if API key is available
|
|
244
261
|
const filename = this.getFilename(fileInfo);
|
|
245
262
|
const transcriptionResult = await this.attemptTranscription(buffer, filename, fileInfo.mimetype);
|
|
@@ -404,6 +421,7 @@ export class AudioProcessor extends BaseFileProcessor {
|
|
|
404
421
|
// parseBuffer accepts (Uint8Array, fileInfo?: IFileInfo | string, options?)
|
|
405
422
|
// where string is interpreted as MIME type.
|
|
406
423
|
const mimeType = fileInfo.mimetype || undefined;
|
|
424
|
+
const { parseBuffer } = await loadMusicMetadata();
|
|
407
425
|
return parseBuffer(buffer, mimeType);
|
|
408
426
|
}
|
|
409
427
|
/**
|
|
@@ -467,11 +485,12 @@ export class AudioProcessor extends BaseFileProcessor {
|
|
|
467
485
|
* @param audioMetadata - Parsed audio metadata from music-metadata
|
|
468
486
|
* @returns Cover art as Buffer, or null if no cover art is embedded
|
|
469
487
|
*/
|
|
470
|
-
extractCoverArt(audioMetadata) {
|
|
488
|
+
async extractCoverArt(audioMetadata) {
|
|
471
489
|
const pictures = audioMetadata.common.picture;
|
|
472
490
|
if (!pictures || pictures.length === 0) {
|
|
473
491
|
return null;
|
|
474
492
|
}
|
|
493
|
+
const { selectCover } = await loadMusicMetadata();
|
|
475
494
|
const cover = selectCover(pictures);
|
|
476
495
|
if (!cover) {
|
|
477
496
|
return null;
|
|
@@ -44,9 +44,7 @@
|
|
|
44
44
|
* ```
|
|
45
45
|
*/
|
|
46
46
|
import { randomUUID } from "crypto";
|
|
47
|
-
import ffmpegCommand from "fluent-ffmpeg";
|
|
48
47
|
import { createWriteStream, existsSync, promises as fs } from "fs";
|
|
49
|
-
import { Input, FilePathSource, ALL_FORMATS } from "mediabunny";
|
|
50
48
|
import { tmpdir } from "os";
|
|
51
49
|
import { join } from "path";
|
|
52
50
|
import { Readable } from "stream";
|
|
@@ -56,6 +54,40 @@ import { SIZE_LIMITS_MB } from "../config/index.js";
|
|
|
56
54
|
import { FileErrorCode } from "../errors/index.js";
|
|
57
55
|
import { tracers, ATTR, withSpan } from "../../telemetry/index.js";
|
|
58
56
|
import { logger } from "../../utils/logger.js";
|
|
57
|
+
// fluent-ffmpeg's default export is callable + has static methods — avoid caching
|
|
58
|
+
// the module type (it confuses TS); Node's module cache handles dedup.
|
|
59
|
+
async function loadFluentFfmpeg() {
|
|
60
|
+
try {
|
|
61
|
+
const mod = await import(/* @vite-ignore */ "fluent-ffmpeg");
|
|
62
|
+
return mod.default;
|
|
63
|
+
}
|
|
64
|
+
catch (err) {
|
|
65
|
+
const e = err instanceof Error ? err : null;
|
|
66
|
+
if (e?.code === "ERR_MODULE_NOT_FOUND" &&
|
|
67
|
+
e.message.includes("fluent-ffmpeg")) {
|
|
68
|
+
throw new Error('Video processing requires the "fluent-ffmpeg" package. Install it with:\n pnpm add fluent-ffmpeg', { cause: err });
|
|
69
|
+
}
|
|
70
|
+
throw err;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
let _mediabunny = null;
|
|
74
|
+
async function loadMediaBunny() {
|
|
75
|
+
if (_mediabunny) {
|
|
76
|
+
return _mediabunny;
|
|
77
|
+
}
|
|
78
|
+
try {
|
|
79
|
+
_mediabunny = await import(/* @vite-ignore */ "mediabunny");
|
|
80
|
+
return _mediabunny;
|
|
81
|
+
}
|
|
82
|
+
catch (err) {
|
|
83
|
+
const e = err instanceof Error ? err : null;
|
|
84
|
+
if (e?.code === "ERR_MODULE_NOT_FOUND" &&
|
|
85
|
+
e.message.includes("mediabunny")) {
|
|
86
|
+
throw new Error('Video processing requires the "mediabunny" package. Install it with:\n pnpm add mediabunny', { cause: err });
|
|
87
|
+
}
|
|
88
|
+
throw err;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
59
91
|
// =============================================================================
|
|
60
92
|
// FFMPEG PATH INITIALIZATION
|
|
61
93
|
// =============================================================================
|
|
@@ -90,7 +122,8 @@ async function initFfmpegPaths() {
|
|
|
90
122
|
const ffmpegStatic = await import("ffmpeg-static");
|
|
91
123
|
const ffmpegPath = ffmpegStatic.default;
|
|
92
124
|
if (typeof ffmpegPath === "string" && existsSync(ffmpegPath)) {
|
|
93
|
-
|
|
125
|
+
const ff = await loadFluentFfmpeg();
|
|
126
|
+
ff.setFfmpegPath(ffmpegPath);
|
|
94
127
|
}
|
|
95
128
|
}
|
|
96
129
|
catch {
|
|
@@ -469,7 +502,8 @@ export class VideoProcessor extends BaseFileProcessor {
|
|
|
469
502
|
* @param filePath - Path to the video file
|
|
470
503
|
* @returns Success result with probe data or error message
|
|
471
504
|
*/
|
|
472
|
-
probeVideo(filePath) {
|
|
505
|
+
async probeVideo(filePath) {
|
|
506
|
+
const ffmpeg = await loadFluentFfmpeg();
|
|
473
507
|
return new Promise((resolve) => {
|
|
474
508
|
const timeoutId = setTimeout(() => {
|
|
475
509
|
resolve({
|
|
@@ -477,7 +511,7 @@ export class VideoProcessor extends BaseFileProcessor {
|
|
|
477
511
|
error: `ffprobe timed out after ${VIDEO_CONFIG.FFPROBE_TIMEOUT_MS}ms`,
|
|
478
512
|
});
|
|
479
513
|
}, VIDEO_CONFIG.FFPROBE_TIMEOUT_MS);
|
|
480
|
-
|
|
514
|
+
ffmpeg.ffprobe(filePath, (err, data) => {
|
|
481
515
|
clearTimeout(timeoutId);
|
|
482
516
|
if (err) {
|
|
483
517
|
resolve({
|
|
@@ -496,11 +530,12 @@ export class VideoProcessor extends BaseFileProcessor {
|
|
|
496
530
|
* Falls back to ffprobe if mediabunny fails or doesn't support the format.
|
|
497
531
|
*/
|
|
498
532
|
async probeVideoWithMediabunny(filePath) {
|
|
533
|
+
const mb = await loadMediaBunny();
|
|
499
534
|
let input;
|
|
500
535
|
try {
|
|
501
|
-
input = new Input({
|
|
502
|
-
source: new FilePathSource(filePath),
|
|
503
|
-
formats: [...ALL_FORMATS],
|
|
536
|
+
input = new mb.Input({
|
|
537
|
+
source: new mb.FilePathSource(filePath),
|
|
538
|
+
formats: [...mb.ALL_FORMATS],
|
|
504
539
|
});
|
|
505
540
|
const duration = await input.computeDuration();
|
|
506
541
|
const videoTrack = await input.getPrimaryVideoTrack();
|
|
@@ -671,7 +706,8 @@ export class VideoProcessor extends BaseFileProcessor {
|
|
|
671
706
|
* @param outputDir - Directory to write frame files
|
|
672
707
|
* @param timestamps - Array of timestamps in seconds
|
|
673
708
|
*/
|
|
674
|
-
runFfmpegFrameExtraction(videoPath, outputDir, timestamps, intervalSec) {
|
|
709
|
+
async runFfmpegFrameExtraction(videoPath, outputDir, timestamps, intervalSec) {
|
|
710
|
+
const ff = await loadFluentFfmpeg();
|
|
675
711
|
return new Promise((resolve, reject) => {
|
|
676
712
|
// Improved select expression to pick exactly one frame per interval
|
|
677
713
|
// instead of multiple frames within a 0.5s window.
|
|
@@ -679,7 +715,7 @@ export class VideoProcessor extends BaseFileProcessor {
|
|
|
679
715
|
const timeoutId = setTimeout(() => {
|
|
680
716
|
reject(new Error(`ffmpeg frame extraction timed out after ${VIDEO_CONFIG.FFMPEG_TIMEOUT_MS}ms`));
|
|
681
717
|
}, VIDEO_CONFIG.FFMPEG_TIMEOUT_MS);
|
|
682
|
-
|
|
718
|
+
ff(videoPath)
|
|
683
719
|
.outputOptions([
|
|
684
720
|
"-vf",
|
|
685
721
|
`select='${selectExpr}',scale='min(${VIDEO_CONFIG.FRAME_MAX_DIMENSION}\\,iw):-2'`,
|
|
@@ -740,11 +776,12 @@ export class VideoProcessor extends BaseFileProcessor {
|
|
|
740
776
|
*/
|
|
741
777
|
async extractSubtitles(videoPath, tempDir) {
|
|
742
778
|
const subtitlePath = join(tempDir, "subtitles.srt");
|
|
779
|
+
const ffSub = await loadFluentFfmpeg();
|
|
743
780
|
await new Promise((resolve, reject) => {
|
|
744
781
|
const timeoutId = setTimeout(() => {
|
|
745
782
|
reject(new Error(`ffmpeg subtitle extraction timed out after ${VIDEO_CONFIG.FFMPEG_TIMEOUT_MS}ms`));
|
|
746
783
|
}, VIDEO_CONFIG.FFMPEG_TIMEOUT_MS);
|
|
747
|
-
|
|
784
|
+
ffSub(videoPath)
|
|
748
785
|
.outputOptions(["-map", "0:s:0", "-c:s", "srt"])
|
|
749
786
|
.output(subtitlePath)
|
|
750
787
|
.on("end", () => {
|