@visibe.ai/node 0.1.16 → 0.1.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/esm/index.js CHANGED
@@ -4,30 +4,35 @@ import { Visibe } from './client.js';
4
4
  // ---------------------------------------------------------------------------
5
5
  let _globalClient = null;
6
6
  let _shutdownRegistered = false;
7
- // Saved original constructors so shutdown() can restore them.
8
- // Each is typed as `any` because we need to reassign imported class bindings.
7
+ // Saved originals so shutdown() can restore them.
9
8
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
10
9
  let _originalOpenAI = null;
11
10
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
12
- let _originalBedrockClient = null;
13
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
14
- let _originalCompiledStateGraph = null;
15
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
16
11
  let _originalAnthropic = null;
12
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
13
+ let _originalBedrockClient = null;
14
+ // Prototype-restore functions returned by patch helpers.
15
+ let _lgRestore = null;
16
+ let _lcRestore = null;
17
17
  let _vercelAIRestore = null;
18
+ let _autoPatchedFrameworks = [];
19
+ const ALL_FRAMEWORKS = ['openai', 'anthropic', 'bedrock', 'langgraph', 'langchain', 'vercel_ai'];
18
20
  // ---------------------------------------------------------------------------
19
- // detectFrameworks()
21
+ // detectFrameworks() — synchronous, CJS-only legacy helper
22
+ // Auto-patching now uses dynamic import() and works in both CJS and ESM.
20
23
  // ---------------------------------------------------------------------------
21
- function tryRequire(pkg) {
22
- try {
23
- require(pkg);
24
- return true;
25
- }
26
- catch {
27
- return false;
28
- }
29
- }
30
24
  export function detectFrameworks() {
25
+ const tryRequire = (pkg) => {
26
+ try {
27
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
28
+ if (typeof require !== 'undefined') {
29
+ require(pkg);
30
+ return true;
31
+ }
32
+ }
33
+ catch { /* not installed */ }
34
+ return false;
35
+ };
31
36
  return {
32
37
  openai: tryRequire('openai'),
33
38
  langchain: tryRequire('@langchain/core'),
@@ -35,20 +40,32 @@ export function detectFrameworks() {
35
40
  bedrock: tryRequire('@aws-sdk/client-bedrock-runtime'),
36
41
  vercel_ai: tryRequire('ai'),
37
42
  anthropic: tryRequire('@anthropic-ai/sdk'),
38
- // crewai and autogen are Python-only — no Node.js equivalent
39
43
  };
40
44
  }
41
45
  // ---------------------------------------------------------------------------
42
- // patchFramework() — auto-instruments a framework at the constructor level
46
+ // patchFramework() — async, uses dynamic import() for CJS + ESM compat.
47
+ //
48
+ // Key design notes:
49
+ // - dynamic import() in CJS builds: TypeScript compiles to require()-based
50
+ // Promise, so we get the same mutable module object as require().
51
+ // - dynamic import() in ESM builds: gives the live ESM namespace (same
52
+ // instance as the user's own `import`).
53
+ // - Prototype patching (langgraph, langchain) works in both envs — prototypes
54
+ // are shared mutable objects regardless of CJS/ESM.
55
+ // - Module-export patching (openai, anthropic, bedrock) only works in CJS
56
+ // because ESM namespace objects are sealed. We try and silently skip in ESM.
43
57
  // ---------------------------------------------------------------------------
44
- function patchFramework(framework, client) {
58
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
59
+ const _setProp = (obj, key, val) => Object.defineProperty(obj, key, { value: val, configurable: true, writable: true, enumerable: true });
60
+ async function patchFramework(framework, client) {
45
61
  try {
46
62
  switch (framework) {
47
63
  case 'openai': {
48
- const openaiModule = require('openai');
49
- _originalOpenAI = openaiModule.OpenAI;
50
- // Named 'OpenAI' so client.constructor.name === 'OpenAI' after construction.
51
- // applyIntegration() in client.ts uses constructor.name to detect the client type.
64
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
65
+ const openaiModule = await import('openai');
66
+ _originalOpenAI = openaiModule.OpenAI ?? openaiModule.default;
67
+ if (!_originalOpenAI)
68
+ return;
52
69
  const PatchedOpenAI = class OpenAI extends _originalOpenAI {
53
70
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
54
71
  constructor(...args) {
@@ -59,19 +76,24 @@ function patchFramework(framework, client) {
59
76
  catch { /* never crash new OpenAI() */ }
60
77
  }
61
78
  };
62
- // Use defineProperty because openai exports OpenAI/default as getter-only properties
63
- // (no setter). Direct assignment silently fails in that case.
64
- const setProp = (obj, key, val) => Object.defineProperty(obj, key, { value: val, configurable: true, writable: true, enumerable: true });
65
- setProp(openaiModule, 'OpenAI', PatchedOpenAI);
66
- // Also patch .default so that `import OpenAI from 'openai'` (esModuleInterop)
67
- // picks up the instrumented class — TypeScript compiles default imports to .default.
68
- setProp(openaiModule, 'default', PatchedOpenAI);
79
+ // In ESM, the namespace object is sealed defineProperty throws TypeError.
80
+ // We catch that and skip (user must call client.instrument() explicitly).
81
+ try {
82
+ _setProp(openaiModule, 'OpenAI', PatchedOpenAI);
83
+ _setProp(openaiModule, 'default', PatchedOpenAI);
84
+ }
85
+ catch {
86
+ _originalOpenAI = null;
87
+ return;
88
+ }
69
89
  break;
70
90
  }
71
91
  case 'anthropic': {
72
- const anthropicModule = require('@anthropic-ai/sdk');
73
- _originalAnthropic = anthropicModule.Anthropic;
74
- // Named 'Anthropic' so constructor.name check in applyIntegration() matches.
92
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
93
+ const anthropicModule = await import('@anthropic-ai/sdk');
94
+ _originalAnthropic = anthropicModule.Anthropic ?? anthropicModule.default;
95
+ if (!_originalAnthropic)
96
+ return;
75
97
  const PatchedAnthropic = class Anthropic extends _originalAnthropic {
76
98
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
77
99
  constructor(...args) {
@@ -82,17 +104,23 @@ function patchFramework(framework, client) {
82
104
  catch { /* never crash new Anthropic() */ }
83
105
  }
84
106
  };
85
- // Same getter-only issue as openai — use defineProperty.
86
- const setAnthropicProp = (obj, key, val) => Object.defineProperty(obj, key, { value: val, configurable: true, writable: true, enumerable: true });
87
- setAnthropicProp(anthropicModule, 'Anthropic', PatchedAnthropic);
88
- // Also patch .default for esModuleInterop default import support.
89
- setAnthropicProp(anthropicModule, 'default', PatchedAnthropic);
107
+ try {
108
+ _setProp(anthropicModule, 'Anthropic', PatchedAnthropic);
109
+ _setProp(anthropicModule, 'default', PatchedAnthropic);
110
+ }
111
+ catch {
112
+ _originalAnthropic = null;
113
+ return;
114
+ }
90
115
  break;
91
116
  }
92
117
  case 'bedrock': {
93
- const bedrockModule = require('@aws-sdk/client-bedrock-runtime');
118
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
119
+ const bedrockModule = await import('@aws-sdk/client-bedrock-runtime');
94
120
  _originalBedrockClient = bedrockModule.BedrockRuntimeClient;
95
- bedrockModule.BedrockRuntimeClient = class BedrockRuntimeClient extends _originalBedrockClient {
121
+ if (!_originalBedrockClient)
122
+ return;
123
+ const PatchedBedrock = class BedrockRuntimeClient extends _originalBedrockClient {
96
124
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
97
125
  constructor(...args) {
98
126
  super(...args);
@@ -102,39 +130,61 @@ function patchFramework(framework, client) {
102
130
  catch { /* never crash new BedrockRuntimeClient() */ }
103
131
  }
104
132
  };
133
+ try {
134
+ bedrockModule.BedrockRuntimeClient = PatchedBedrock;
135
+ }
136
+ catch {
137
+ _originalBedrockClient = null;
138
+ return;
139
+ }
105
140
  break;
106
141
  }
107
142
  case 'langgraph': {
108
- const lgModule = require('@langchain/langgraph');
109
- _originalCompiledStateGraph = lgModule.CompiledStateGraph;
110
- // LangGraph instrumentation is applied via LangChainCallback at the class level.
111
- // The actual patching happens inside the langgraph integration module.
112
- const { patchCompiledStateGraph } = require('./integrations/langgraph');
113
- patchCompiledStateGraph(lgModule, client);
143
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
144
+ const lgModule = await import('@langchain/langgraph');
145
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
146
+ const { patchCompiledStateGraph } = await import('./integrations/langgraph.js');
147
+ // patchCompiledStateGraph modifies CompiledStateGraph.prototype — prototype
148
+ // patching works in ESM because prototypes are shared mutable objects.
149
+ _lgRestore = patchCompiledStateGraph(lgModule, client);
114
150
  break;
115
151
  }
116
152
  case 'langchain': {
117
- // LangChain is instrumented via RunnableSequence constructor patching.
118
- const { patchRunnableSequence } = require('./integrations/langchain');
119
- const lcModule = require('@langchain/core/runnables');
120
- patchRunnableSequence(lcModule, client);
153
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
154
+ const { patchRunnableSequence } = await import('./integrations/langchain.js');
155
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
156
+ const lcModule = await import('@langchain/core/runnables');
157
+ const result = patchRunnableSequence(lcModule, client);
158
+ if (typeof result === 'function')
159
+ _lcRestore = result;
121
160
  break;
122
161
  }
123
162
  case 'vercel_ai': {
124
- const { patchVercelAI } = require('./integrations/vercel-ai');
125
- const aiModule = require('ai');
163
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
164
+ const { patchVercelAI } = await import('./integrations/vercel-ai.js');
165
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
166
+ const aiModule = await import('ai');
126
167
  _vercelAIRestore = patchVercelAI(aiModule, client);
127
168
  break;
128
169
  }
129
170
  }
130
- // Record which frameworks were successfully patched for the startup log.
131
171
  _autoPatchedFrameworks.push(framework);
132
172
  }
133
173
  catch {
134
174
  // Package not installed or patch failed — skip silently.
135
175
  }
136
176
  }
137
- let _autoPatchedFrameworks = [];
177
+ // ---------------------------------------------------------------------------
178
+ // _autoPatch() — async; fires from init() without blocking it
179
+ // ---------------------------------------------------------------------------
180
+ async function _autoPatch(client, frameworks) {
181
+ for (const fw of frameworks) {
182
+ await patchFramework(fw, client);
183
+ }
184
+ if (_autoPatchedFrameworks.length > 0) {
185
+ console.log(`[Visibe] Auto-instrumented: ${_autoPatchedFrameworks.join(', ')}`);
186
+ }
187
+ }
138
188
  // ---------------------------------------------------------------------------
139
189
  // init()
140
190
  // ---------------------------------------------------------------------------
@@ -144,17 +194,11 @@ export function init(options) {
144
194
  return _globalClient;
145
195
  }
146
196
  _globalClient = new Visibe(options ?? {});
147
- const detected = detectFrameworks();
148
- const toInstrument = options?.frameworks
149
- ?? Object.keys(detected).filter(k => detected[k]);
150
- for (const fw of toInstrument) {
151
- patchFramework(fw, _globalClient);
152
- }
153
- // Register graceful shutdown handlers.
154
- // NOTE: process.on('exit') fires synchronously — async HTTP requests cannot
155
- // complete there. SIGTERM is what Docker/Kubernetes send before killing a
156
- // container; without handling it all buffered spans are lost.
157
- // We await shutdown() so the batcher's 300 ms window completes before exit.
197
+ // Fire async patching — works in both CJS and ESM via dynamic import().
198
+ // Patching typically completes within microseconds (cached modules) so there
199
+ // is no practical race condition for normal usage patterns.
200
+ const frameworksToTry = options?.frameworks ?? ALL_FRAMEWORKS;
201
+ _autoPatch(_globalClient, frameworksToTry).catch(() => { });
158
202
  if (!_shutdownRegistered) {
159
203
  const graceful = async () => { await shutdown(); process.exit(0); };
160
204
  process.on('SIGTERM', graceful);
@@ -162,9 +206,6 @@ export function init(options) {
162
206
  process.on('beforeExit', () => { shutdown().catch(() => { }); });
163
207
  _shutdownRegistered = true;
164
208
  }
165
- if (_autoPatchedFrameworks.length > 0) {
166
- console.log(`[Visibe] Auto-instrumented: ${_autoPatchedFrameworks.join(', ')}`);
167
- }
168
209
  return _globalClient;
169
210
  }
170
211
  // ---------------------------------------------------------------------------
@@ -173,53 +214,65 @@ export function init(options) {
173
214
  export async function shutdown() {
174
215
  if (_globalClient === null)
175
216
  return;
176
- // Capture the client reference and clear global state immediately so that
177
- // re-init() calls work without needing to await this function.
178
217
  const client = _globalClient;
179
218
  _globalClient = null;
180
219
  _autoPatchedFrameworks = [];
181
- // Restore patched constructors so the SDK leaves no trace after shutdown.
182
- try {
183
- if (_originalOpenAI) {
184
- const m = require('openai');
220
+ // Restore patched module exports (works in CJS; silently no-ops in ESM).
221
+ if (_originalOpenAI) {
222
+ try {
223
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
224
+ const m = await import('openai');
185
225
  Object.defineProperty(m, 'OpenAI', { value: _originalOpenAI, configurable: true, writable: true, enumerable: true });
186
226
  Object.defineProperty(m, 'default', { value: _originalOpenAI, configurable: true, writable: true, enumerable: true });
187
- _originalOpenAI = null;
188
227
  }
228
+ catch { /* ignore */ }
229
+ _originalOpenAI = null;
189
230
  }
190
- catch { /* package may have been unloaded */ }
191
- try {
192
- if (_originalAnthropic) {
193
- const m = require('@anthropic-ai/sdk');
231
+ if (_originalAnthropic) {
232
+ try {
233
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
234
+ const m = await import('@anthropic-ai/sdk');
194
235
  Object.defineProperty(m, 'Anthropic', { value: _originalAnthropic, configurable: true, writable: true, enumerable: true });
195
236
  Object.defineProperty(m, 'default', { value: _originalAnthropic, configurable: true, writable: true, enumerable: true });
196
- _originalAnthropic = null;
197
237
  }
238
+ catch { /* ignore */ }
239
+ _originalAnthropic = null;
198
240
  }
199
- catch { /* package may have been unloaded */ }
200
- try {
201
- if (_originalBedrockClient) {
202
- require('@aws-sdk/client-bedrock-runtime').BedrockRuntimeClient = _originalBedrockClient;
203
- _originalBedrockClient = null;
241
+ if (_originalBedrockClient) {
242
+ try {
243
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
244
+ const m = await import('@aws-sdk/client-bedrock-runtime');
245
+ m.BedrockRuntimeClient = _originalBedrockClient;
204
246
  }
247
+ catch { /* ignore */ }
248
+ _originalBedrockClient = null;
205
249
  }
206
- catch { /* package may have been unloaded */ }
207
- try {
208
- if (_originalCompiledStateGraph) {
209
- require('@langchain/langgraph').CompiledStateGraph = _originalCompiledStateGraph;
210
- _originalCompiledStateGraph = null;
250
+ // Restore prototype patches via stored cleanup functions.
251
+ if (_lgRestore) {
252
+ try {
253
+ _lgRestore();
211
254
  }
255
+ catch { /* ignore */ }
256
+ ;
257
+ _lgRestore = null;
212
258
  }
213
- catch { /* package may have been unloaded */ }
214
- try {
215
- if (_vercelAIRestore) {
259
+ if (_lcRestore) {
260
+ try {
261
+ _lcRestore();
262
+ }
263
+ catch { /* ignore */ }
264
+ ;
265
+ _lcRestore = null;
266
+ }
267
+ if (_vercelAIRestore) {
268
+ try {
216
269
  _vercelAIRestore();
217
- _vercelAIRestore = null;
218
270
  }
271
+ catch { /* ignore */ }
272
+ ;
273
+ _vercelAIRestore = null;
219
274
  }
220
- catch { /* package may have been unloaded */ }
221
- // Flush buffered spans and wait up to 300 ms for in-flight HTTP requests to
222
- // complete. This prevents spans from being lost on SIGTERM.
275
+ // Flush buffered spans and wait up to 300 ms for in-flight HTTP requests.
223
276
  await client.batcher.shutdown();
224
277
  }
225
278
  // ---------------------------------------------------------------------------
@@ -7,19 +7,19 @@ export const activeLangChainStorage = new AsyncLocalStorage();
7
7
  // ---------------------------------------------------------------------------
8
8
  // LangChain token extraction
9
9
  // Different providers nest token usage in different locations.
10
- // Check in the order specified by the spec.
11
10
  // ---------------------------------------------------------------------------
12
11
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
13
12
  function extractTokenUsage(output) {
14
13
  const usage = output?.llmOutput?.tokenUsage
15
14
  ?? output?.llmOutput?.usage
16
15
  ?? output?.generations?.[0]?.[0]?.generationInfo?.usage;
17
- // Use ?? not || so token counts of 0 are preserved correctly.
18
16
  return {
19
17
  inputTokens: usage?.promptTokens ?? usage?.input_tokens ?? 0,
20
18
  outputTokens: usage?.completionTokens ?? usage?.output_tokens ?? 0,
21
19
  };
22
20
  }
21
+ // Internal LangGraph system node names — never emit agent_start spans for these.
22
+ export const LANGGRAPH_INTERNAL_NODES = new Set(['__start__', '__end__', 'LangGraph']);
23
23
  // ---------------------------------------------------------------------------
24
24
  // LangChainCallback
25
25
  // ---------------------------------------------------------------------------
@@ -30,23 +30,21 @@ export class LangChainCallback {
30
30
  constructor(options) {
31
31
  // Required by @langchain/core v1+ for proper callback registration.
32
32
  // Without `name`, ensureHandler() wraps via fromMethods() which drops prototype methods.
33
- // Without `awaitHandlers`, callbacks run in a background queue (p-queue) and fire
34
- // after model.invoke() returns — causing spans to be missed on flush/completeTrace.
33
+ // Without `awaitHandlers`, callbacks run in a background queue and fire after
34
+ // model.invoke() returns — causing spans to be missed on flush/completeTrace.
35
35
  this.name = 'visibe-langchain-callback';
36
36
  this.awaitHandlers = true;
37
37
  this.raiseError = false;
38
38
  // Maps LangChain runId → our spanId so we can set parent_span_id.
39
39
  this.runIdToSpanId = new Map();
40
- // Tracks start times so we can compute durationMs.
41
- this.pendingLLMCalls = new Map(); // runId → startMs
40
+ // Pending LLM calls: runId { startMs, model, inputText }
41
+ this.pendingLLMCalls = new Map();
42
42
  this.pendingToolCalls = new Map();
43
43
  this.stepCounter = 0;
44
- // Agents we have already emitted agent_start spans for.
45
- this.seenAgents = new Set();
46
- // Token / call accumulators — updated by handleLLMEnd, read by patchCompiledStateGraph
47
- // and patchRunnableSequence to populate completeTrace totals.
44
+ // Token / call / cost accumulators read by patchCompiledStateGraph / patchRunnableSequence.
48
45
  this.totalInputTokens = 0;
49
46
  this.totalOutputTokens = 0;
47
+ this.totalCost = 0;
50
48
  this.llmCallCount = 0;
51
49
  this.visibe = options.visibe;
52
50
  this.traceId = options.traceId;
@@ -55,17 +53,40 @@ export class LangChainCallback {
55
53
  // ---------------------------------------------------------------------------
56
54
  // LLM events
57
55
  // ---------------------------------------------------------------------------
56
+ // Called for text-completion models.
58
57
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
59
- async handleLLMStart(_llm, _messages, runId) {
60
- this.pendingLLMCalls.set(runId, Date.now());
58
+ async handleLLMStart(llm, prompts, runId) {
59
+ const model = llm?.kwargs?.model ?? llm?.kwargs?.model_name ?? undefined;
60
+ const inputText = Array.isArray(prompts) ? prompts.join('\n') : '';
61
+ this.pendingLLMCalls.set(runId, { startMs: Date.now(), model, inputText });
62
+ }
63
+ // Called for chat models (ChatOpenAI, ChatAnthropic, etc.).
64
+ // messages is BaseMessage[][] — one array per parallel completion request.
65
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
66
+ async handleChatModelStart(llm, messages, runId) {
67
+ const model = llm?.kwargs?.model ?? llm?.kwargs?.model_name ?? undefined;
68
+ let inputText = '';
69
+ try {
70
+ const msgs = messages?.[0] ?? [];
71
+ inputText = msgs.map((m) => {
72
+ const role = m?.getType?.() ?? m?._getType?.() ?? m?.role ?? 'user';
73
+ const content = typeof m?.content === 'string'
74
+ ? m.content
75
+ : JSON.stringify(m?.content ?? '');
76
+ return `${role}: ${content}`;
77
+ }).join('\n');
78
+ }
79
+ catch { /* ignore serialisation errors */ }
80
+ this.pendingLLMCalls.set(runId, { startMs: Date.now(), model, inputText });
61
81
  }
62
82
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
63
83
  async handleLLMEnd(output, runId, parentRunId) {
64
- const startMs = this.pendingLLMCalls.get(runId) ?? Date.now();
84
+ const pending = this.pendingLLMCalls.get(runId) ?? { startMs: Date.now() };
65
85
  this.pendingLLMCalls.delete(runId);
66
86
  const { inputTokens, outputTokens } = extractTokenUsage(output);
67
87
  const gen = output?.generations?.[0]?.[0];
68
- const model = gen?.generationInfo?.model ?? this.agentName;
88
+ // Prefer model saved at LLM-start, fall back to generationInfo, then agentName.
89
+ const model = pending.model ?? gen?.generationInfo?.model ?? this.agentName;
69
90
  const cost = calculateCost(model, inputTokens, outputTokens);
70
91
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
71
92
  const rawText = gen?.text ?? gen?.message?.content ?? '';
@@ -81,16 +102,15 @@ export class LangChainCallback {
81
102
  status: 'success',
82
103
  inputTokens,
83
104
  outputTokens,
84
- inputText: '', // LangChain doesn't surface the raw prompt here
105
+ inputText: pending.inputText ?? '',
85
106
  outputText,
86
- durationMs: Date.now() - startMs,
107
+ durationMs: Date.now() - pending.startMs,
87
108
  });
88
109
  this.visibe.batcher.add(this.traceId, span);
89
- // Update local accumulators (used by patchCompiledStateGraph / patchRunnableSequence).
90
110
  this.totalInputTokens += inputTokens;
91
111
  this.totalOutputTokens += outputTokens;
112
+ this.totalCost += cost;
92
113
  this.llmCallCount++;
93
- // Notify track() accumulator if running inside a group tracker.
94
114
  this._onLLMSpan?.(inputTokens, outputTokens, cost);
95
115
  }
96
116
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -139,16 +159,15 @@ export class LangChainCallback {
139
159
  }
140
160
  // ---------------------------------------------------------------------------
141
161
  // Chain events
162
+ //
163
+ // In LangGraph v1.2+, the node key is passed as the 8th `name` parameter, NOT
164
+ // via chain.name. We always store runId → spanId so nested LLM/tool calls get
165
+ // a valid parent_span_id regardless of whether the chain is a user-defined node.
142
166
  // ---------------------------------------------------------------------------
143
167
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
144
- async handleChainStart(chain, _inputs, runId, parentRunId) {
145
- // Emit an agent_start span the first time we see a named chain.
146
- const chainName = chain?.id?.at(-1) ?? '';
147
- if (chainName && !this.seenAgents.has(chainName)) {
148
- this.seenAgents.add(chainName);
149
- const spanId = this.nextSpanId();
150
- this.runIdToSpanId.set(runId, spanId);
151
- void parentRunId; // suppress unused warning
168
+ async handleChainStart(_chain, _inputs, runId, _parentRunId, _tags, _metadata, _runType, _name) {
169
+ if (!this.runIdToSpanId.has(runId)) {
170
+ this.runIdToSpanId.set(runId, this.nextSpanId());
152
171
  }
153
172
  }
154
173
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
@@ -174,7 +193,6 @@ export function patchRunnableSequence(lcModule, visibe) {
174
193
  const originalStream = RunnableSequence.prototype.stream;
175
194
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
176
195
  RunnableSequence.prototype.invoke = async function (input, config) {
177
- // If already inside a LangChain trace, pass through.
178
196
  if (activeLangChainStorage.getStore() !== undefined) {
179
197
  return originalInvoke.call(this, input, config);
180
198
  }
@@ -189,6 +207,7 @@ export function patchRunnableSequence(lcModule, visibe) {
189
207
  ...(visibe.sessionId ? { session_id: visibe.sessionId } : {}),
190
208
  });
191
209
  const cb = new LangChainCallback({ visibe, traceId, agentName: 'langchain' });
210
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
192
211
  let result;
193
212
  let status = 'completed';
194
213
  try {
@@ -205,6 +224,7 @@ export function patchRunnableSequence(lcModule, visibe) {
205
224
  ended_at: new Date().toISOString(),
206
225
  duration_ms: Date.now() - startMs,
207
226
  llm_call_count: cb.llmCallCount,
227
+ total_cost: cb.totalCost,
208
228
  total_tokens: cb.totalInputTokens + cb.totalOutputTokens,
209
229
  total_input_tokens: cb.totalInputTokens,
210
230
  total_output_tokens: cb.totalOutputTokens,
@@ -231,9 +251,6 @@ export function patchRunnableSequence(lcModule, visibe) {
231
251
  const cb = new LangChainCallback({ visibe, traceId, agentName: 'langchain' });
232
252
  let status = 'completed';
233
253
  try {
234
- // RunnableSequence.stream() is an async function (not async generator) returning
235
- // a Promise<AsyncIterable>. activeLangChainStorage.run returns that Promise,
236
- // so we must await before yield*.
237
254
  const gen = await activeLangChainStorage.run(cb, () => originalStream.call(this, input, _mergeCallbacks(config, cb)));
238
255
  yield* gen;
239
256
  }
@@ -248,6 +265,7 @@ export function patchRunnableSequence(lcModule, visibe) {
248
265
  ended_at: new Date().toISOString(),
249
266
  duration_ms: Date.now() - startMs,
250
267
  llm_call_count: cb.llmCallCount,
268
+ total_cost: cb.totalCost,
251
269
  total_tokens: cb.totalInputTokens + cb.totalOutputTokens,
252
270
  total_input_tokens: cb.totalInputTokens,
253
271
  total_output_tokens: cb.totalOutputTokens,
@@ -262,7 +280,6 @@ export function patchRunnableSequence(lcModule, visibe) {
262
280
  // ---------------------------------------------------------------------------
263
281
  // Private helpers
264
282
  // ---------------------------------------------------------------------------
265
- // Merge our callback into an existing LangChain config object.
266
283
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
267
284
  function _mergeCallbacks(config, cb) {
268
285
  if (!config)
@@ -1,5 +1,5 @@
1
1
  import { randomUUID } from 'node:crypto';
2
- import { LangChainCallback, activeLangChainStorage } from './langchain.js';
2
+ import { LangChainCallback, activeLangChainStorage, LANGGRAPH_INTERNAL_NODES } from './langchain.js';
3
3
  // ---------------------------------------------------------------------------
4
4
  // LangGraphCallback
5
5
  // Extends LangChainCallback and adds node-level agent_start spans.
@@ -9,24 +9,26 @@ export class LangGraphCallback extends LangChainCallback {
9
9
  super(options);
10
10
  this.nodeNames = new Set(options.nodeNames ?? []);
11
11
  }
12
- // Override handleChainStart to emit agent_start for known graph nodes.
12
+ // Override handleChainStart to emit agent_start spans for LangGraph nodes.
13
+ //
14
+ // In LangGraph v1.2+, the node key is the 8th `name` parameter (not chain.name).
15
+ // Internal system nodes (__start__, __end__, LangGraph) are filtered out.
13
16
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
14
- async handleChainStart(chain, inputs, runId, parentRunId) {
15
- // LangGraph may surface the node key in chain.name (run name) or chain.id[-1] (class name).
16
- const chainName = chain?.name ?? chain?.id?.at(-1) ?? '';
17
- if (chainName && this.nodeNames.has(chainName)) {
18
- const spanId = this.nextSpanId();
17
+ async handleChainStart(chain, inputs, runId, parentRunId, tags, metadata, runType, name) {
18
+ // Always track the runId so child LLM calls can resolve parent_span_id.
19
+ await super.handleChainStart(chain, inputs, runId, parentRunId, tags, metadata, runType, name);
20
+ // The node key is passed as the 8th `name` parameter in LangGraph v1.2+.
21
+ // Fall back to chain.name for older versions.
22
+ const nodeName = name ?? chain?.name ?? '';
23
+ if (nodeName && !LANGGRAPH_INTERNAL_NODES.has(nodeName)) {
24
+ // Use the spanId already assigned by super for this runId.
25
+ const spanId = this.runIdToSpanId.get(runId) ?? this.nextSpanId();
19
26
  this.runIdToSpanId.set(runId, spanId);
20
- // Emit an agent_start span for this node.
21
- // type MUST be exactly "agent_start" — the backend validates this string.
22
27
  this.visibe.batcher.add(this.traceId, this.visibe.buildAgentStartSpan({
23
28
  spanId,
24
- agentName: chainName,
29
+ agentName: nodeName,
25
30
  }));
26
- // Don't call super — we've already set the runId mapping.
27
- return;
28
31
  }
29
- await super.handleChainStart(chain, inputs, runId, parentRunId);
30
32
  }
31
33
  }
32
34
  // ---------------------------------------------------------------------------
@@ -82,6 +84,7 @@ export function patchCompiledStateGraph(lgModule, visibe) {
82
84
  ended_at: new Date().toISOString(),
83
85
  duration_ms: Date.now() - startMs,
84
86
  llm_call_count: cb.llmCallCount,
87
+ total_cost: cb.totalCost,
85
88
  total_tokens: cb.totalInputTokens + cb.totalOutputTokens,
86
89
  total_input_tokens: cb.totalInputTokens,
87
90
  total_output_tokens: cb.totalOutputTokens,
@@ -133,6 +136,7 @@ export function patchCompiledStateGraph(lgModule, visibe) {
133
136
  ended_at: new Date().toISOString(),
134
137
  duration_ms: Date.now() - startMs,
135
138
  llm_call_count: cb.llmCallCount,
139
+ total_cost: cb.totalCost,
136
140
  total_tokens: cb.totalInputTokens + cb.totalOutputTokens,
137
141
  total_input_tokens: cb.totalInputTokens,
138
142
  total_output_tokens: cb.totalOutputTokens,