@visibe.ai/node 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +330 -0
  2. package/dist/cjs/api.js +92 -0
  3. package/dist/cjs/client.js +242 -0
  4. package/dist/cjs/index.js +216 -0
  5. package/dist/cjs/integrations/anthropic.js +277 -0
  6. package/dist/cjs/integrations/base.js +32 -0
  7. package/dist/cjs/integrations/bedrock.js +442 -0
  8. package/dist/cjs/integrations/group-context.js +10 -0
  9. package/dist/cjs/integrations/langchain.js +274 -0
  10. package/dist/cjs/integrations/langgraph.js +173 -0
  11. package/dist/cjs/integrations/openai.js +447 -0
  12. package/dist/cjs/integrations/vercel-ai.js +261 -0
  13. package/dist/cjs/types/index.js +5 -0
  14. package/dist/cjs/utils.js +122 -0
  15. package/dist/esm/api.js +87 -0
  16. package/dist/esm/client.js +238 -0
  17. package/dist/esm/index.js +209 -0
  18. package/dist/esm/integrations/anthropic.js +272 -0
  19. package/dist/esm/integrations/base.js +28 -0
  20. package/dist/esm/integrations/bedrock.js +438 -0
  21. package/dist/esm/integrations/group-context.js +7 -0
  22. package/dist/esm/integrations/langchain.js +269 -0
  23. package/dist/esm/integrations/langgraph.js +168 -0
  24. package/dist/esm/integrations/openai.js +442 -0
  25. package/dist/esm/integrations/vercel-ai.js +258 -0
  26. package/dist/esm/types/index.js +4 -0
  27. package/dist/esm/utils.js +116 -0
  28. package/dist/types/api.d.ts +27 -0
  29. package/dist/types/client.d.ts +50 -0
  30. package/dist/types/index.d.ts +7 -0
  31. package/dist/types/integrations/anthropic.d.ts +9 -0
  32. package/dist/types/integrations/base.d.ts +17 -0
  33. package/dist/types/integrations/bedrock.d.ts +11 -0
  34. package/dist/types/integrations/group-context.d.ts +12 -0
  35. package/dist/types/integrations/langchain.d.ts +40 -0
  36. package/dist/types/integrations/langgraph.d.ts +13 -0
  37. package/dist/types/integrations/openai.d.ts +11 -0
  38. package/dist/types/integrations/vercel-ai.d.ts +2 -0
  39. package/dist/types/types/index.d.ts +21 -0
  40. package/dist/types/utils.d.ts +23 -0
  41. package/package.json +80 -0
@@ -0,0 +1,258 @@
1
+ import { randomUUID } from 'node:crypto';
2
+ import { activeGroupTraceStorage } from './group-context';
3
+ import { calculateCost } from '../utils';
4
+ // ---------------------------------------------------------------------------
5
+ // Provider detection — Vercel AI SDK exposes model.provider as e.g.
6
+ // "openai.chat", "anthropic.messages". Strip the suffix to get a clean name.
7
+ // ---------------------------------------------------------------------------
8
+ // Vercel AI SDK gives bare provider names like "openai", "anthropic" — no model
9
+ // prefix — so we can't use detectProvider() which expects full model IDs.
10
+ // Map directly and fall back to detectProvider() for model-style strings.
11
+ const _VERCEL_PROVIDER_MAP = {
12
+ openai: 'openai',
13
+ anthropic: 'anthropic',
14
+ amazon: 'amazon',
15
+ meta: 'meta',
16
+ mistral: 'mistral',
17
+ cohere: 'cohere',
18
+ ai21: 'ai21',
19
+ google: 'google',
20
+ deepseek: 'deepseek',
21
+ };
22
+ function detectVercelProvider(modelProvider) {
23
+ if (!modelProvider)
24
+ return 'unknown';
25
+ const prefix = modelProvider.split('.')[0].toLowerCase();
26
+ // If not in the map, return the prefix as-is — the Vercel AI SDK already
27
+ // provides a clean provider string (e.g. 'mock-provider', 'anthropic').
28
+ return _VERCEL_PROVIDER_MAP[prefix] ?? prefix;
29
+ }
30
+ // ---------------------------------------------------------------------------
31
+ // Shared span sender — used by all three wrappers
32
+ // ---------------------------------------------------------------------------
33
+ async function sendLLMTrace(visibe, agentName, model, provider, inputTokens, outputTokens, inputText, outputText, durationMs) {
34
+ // Check if we are inside a track() group — route into the shared trace if so.
35
+ const groupCtx = activeGroupTraceStorage.getStore();
36
+ const traceId = groupCtx?.traceId ?? randomUUID();
37
+ if (!groupCtx) {
38
+ await visibe.apiClient.createTrace({
39
+ trace_id: traceId,
40
+ name: agentName,
41
+ framework: 'vercel_ai',
42
+ started_at: new Date().toISOString(),
43
+ ...(visibe.sessionId ? { session_id: visibe.sessionId } : {}),
44
+ });
45
+ }
46
+ const cost = calculateCost(model, inputTokens, outputTokens);
47
+ // Build span manually so we can override the provider from Vercel's metadata.
48
+ const span = {
49
+ span_id: `step_1`,
50
+ type: 'llm_call',
51
+ timestamp: new Date().toISOString(),
52
+ agent_name: agentName,
53
+ model,
54
+ provider,
55
+ status: 'success',
56
+ description: `LLM Call using ${model}`,
57
+ input_tokens: inputTokens,
58
+ output_tokens: outputTokens,
59
+ cost,
60
+ duration_ms: durationMs,
61
+ input_text: inputText.slice(0, visibe.contentLimit),
62
+ output_text: outputText.slice(0, visibe.contentLimit),
63
+ };
64
+ visibe.batcher.add(traceId, span);
65
+ // Notify the group tracker (if inside track()) about this LLM span.
66
+ groupCtx?.onLLMSpan(inputTokens, outputTokens, cost);
67
+ if (!groupCtx) {
68
+ visibe.batcher.flush();
69
+ const sent = await visibe.apiClient.completeTrace(traceId, {
70
+ status: 'completed',
71
+ ended_at: new Date().toISOString(),
72
+ duration_ms: durationMs,
73
+ llm_call_count: 1,
74
+ prompt: inputText.slice(0, visibe.contentLimit),
75
+ model,
76
+ total_cost: cost,
77
+ total_tokens: inputTokens + outputTokens,
78
+ total_input_tokens: inputTokens,
79
+ total_output_tokens: outputTokens,
80
+ });
81
+ const tokens = (inputTokens + outputTokens).toLocaleString();
82
+ const sentStr = sent ? 'OK' : 'FAILED';
83
+ console.log(`[Visibe] Trace: ${agentName} | 1 LLM calls | ${tokens} tokens | $${cost.toFixed(6)} | ${(durationMs / 1000).toFixed(1)}s | 0 tool calls | status: completed | model: ${model} | sent: ${sentStr}`);
84
+ }
85
+ }
86
+ // ---------------------------------------------------------------------------
87
+ // Extract prompt text from Vercel AI SDK params
88
+ // ---------------------------------------------------------------------------
89
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
90
+ function extractVercelPrompt(params) {
91
+ if (typeof params.prompt === 'string')
92
+ return params.prompt;
93
+ if (Array.isArray(params.messages)) {
94
+ const msgs = params.messages;
95
+ for (let i = msgs.length - 1; i >= 0; i--) {
96
+ const msg = msgs[i];
97
+ if (msg?.role === 'user') {
98
+ if (typeof msg.content === 'string')
99
+ return msg.content;
100
+ if (Array.isArray(msg.content)) {
101
+ const part = msg.content.find((p) => p.type === 'text');
102
+ return part?.text ?? '';
103
+ }
104
+ }
105
+ }
106
+ }
107
+ return '';
108
+ }
109
+ // ---------------------------------------------------------------------------
110
+ // patchVercelAI — patches the ai module using a require.cache Proxy.
111
+ //
112
+ // ai@6 exports are non-configurable ESM→CJS getters — direct assignment and
113
+ // Object.defineProperty both fail. The only reliable approach is to replace
114
+ // require.cache[key].exports with a Proxy that intercepts property reads, so
115
+ // that any subsequent require('ai') call (including destructuring) returns the
116
+ // patched wrappers.
117
+ //
118
+ // Called from index.ts patchFramework(). Returns a restore function.
119
+ // ---------------------------------------------------------------------------
120
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
121
+ export function patchVercelAI(aiModule, visibe, agentName = 'vercel-ai') {
122
+ const origGenerateText = aiModule.generateText;
123
+ const origStreamText = aiModule.streamText;
124
+ const origGenerateObject = aiModule.generateObject;
125
+ // Build patched wrappers (declared as variables so the Proxy's get trap can close over them).
126
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
127
+ let patchedGenerateText;
128
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
129
+ let patchedStreamText;
130
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
131
+ let patchedGenerateObject;
132
+ // --- generateText ---
133
+ if (origGenerateText) {
134
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
135
+ patchedGenerateText = async (params) => {
136
+ const start = Date.now();
137
+ const result = await origGenerateText(params);
138
+ const model = params.model?.modelId ?? 'unknown';
139
+ const provider = detectVercelProvider(params.model?.provider);
140
+ // Await so the trace is guaranteed to be created before this function returns.
141
+ // This ensures test helpers (captureTraceId) capture the traceId reliably.
142
+ await sendLLMTrace(visibe, agentName, model, provider,
143
+ // ai@3/4 uses promptTokens; ai@6 uses inputTokens
144
+ result.usage?.promptTokens ?? result.usage?.inputTokens ?? 0, result.usage?.completionTokens ?? result.usage?.outputTokens ?? 0, extractVercelPrompt(params), result.text ?? '', Date.now() - start).catch(() => { });
145
+ return result;
146
+ };
147
+ }
148
+ // --- streamText ---
149
+ if (origStreamText) {
150
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
151
+ patchedStreamText = (params) => {
152
+ const start = Date.now();
153
+ const stream = origStreamText(params);
154
+ const model = params.model?.modelId ?? 'unknown';
155
+ const provider = detectVercelProvider(params.model?.provider);
156
+ // The Vercel AI SDK's streamText result has a `.usage` promise that
157
+ // resolves after the stream is consumed.
158
+ if (stream?.usage && typeof stream.usage.then === 'function') {
159
+ stream.usage.then((usage) => {
160
+ sendLLMTrace(visibe, agentName, model, provider,
161
+ // ai@3/4 uses promptTokens; ai@6 uses inputTokens
162
+ usage?.promptTokens ?? usage?.inputTokens ?? 0, usage?.completionTokens ?? usage?.outputTokens ?? 0, extractVercelPrompt(params), '', // full text not easily available without consuming the stream
163
+ Date.now() - start).catch(() => { });
164
+ }).catch(() => { });
165
+ }
166
+ return stream;
167
+ };
168
+ }
169
+ // --- generateObject ---
170
+ if (origGenerateObject) {
171
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
172
+ patchedGenerateObject = async (params) => {
173
+ const start = Date.now();
174
+ const result = await origGenerateObject(params);
175
+ const model = params.model?.modelId ?? 'unknown';
176
+ const provider = detectVercelProvider(params.model?.provider);
177
+ await sendLLMTrace(visibe, agentName, model, provider,
178
+ // ai@3/4 uses promptTokens; ai@6 uses inputTokens
179
+ result.usage?.promptTokens ?? result.usage?.inputTokens ?? 0, result.usage?.completionTokens ?? result.usage?.outputTokens ?? 0, extractVercelPrompt(params), JSON.stringify(result.object ?? ''), Date.now() - start).catch(() => { });
180
+ return result;
181
+ };
182
+ }
183
+ // ---------------------------------------------------------------------------
184
+ // 1. Try direct property assignment first.
185
+ // Works for plain objects (tests) and ai@3/4 where exports are configurable.
186
+ // For ai@6 with non-configurable exports this silently fails; the Proxy (below)
187
+ // handles that case.
188
+ // ---------------------------------------------------------------------------
189
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
190
+ const directRestores = [];
191
+ try {
192
+ if (patchedGenerateText) {
193
+ const orig = aiModule.generateText;
194
+ aiModule.generateText = patchedGenerateText;
195
+ directRestores.push(() => { aiModule.generateText = orig; });
196
+ }
197
+ }
198
+ catch { /* non-configurable */ }
199
+ try {
200
+ if (patchedStreamText) {
201
+ const orig = aiModule.streamText;
202
+ aiModule.streamText = patchedStreamText;
203
+ directRestores.push(() => { aiModule.streamText = orig; });
204
+ }
205
+ }
206
+ catch { /* non-configurable */ }
207
+ try {
208
+ if (patchedGenerateObject) {
209
+ const orig = aiModule.generateObject;
210
+ aiModule.generateObject = patchedGenerateObject;
211
+ directRestores.push(() => { aiModule.generateObject = orig; });
212
+ }
213
+ }
214
+ catch { /* non-configurable */ }
215
+ // ---------------------------------------------------------------------------
216
+ // 2. Replace require.cache exports with a Proxy (ai@6 fallback).
217
+ //
218
+ // ai@6's exports are Object.defineProperty'd with configurable:false — direct
219
+ // assignment silently fails and Object.defineProperty throws. By replacing the
220
+ // module's cache entry with a Proxy we intercept property reads on future
221
+ // require('ai') calls (including destructuring) and return our wrappers.
222
+ // ---------------------------------------------------------------------------
223
+ let cacheKey;
224
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
225
+ let originalExports;
226
+ try {
227
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
228
+ cacheKey = Object.keys(require.cache).find(k => require.cache[k]?.exports === aiModule);
229
+ if (cacheKey && require.cache[cacheKey]) {
230
+ originalExports = require.cache[cacheKey].exports;
231
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
232
+ require.cache[cacheKey].exports = new Proxy(aiModule, {
233
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
234
+ get(target, prop) {
235
+ if (prop === 'generateText' && patchedGenerateText)
236
+ return patchedGenerateText;
237
+ if (prop === 'streamText' && patchedStreamText)
238
+ return patchedStreamText;
239
+ if (prop === 'generateObject' && patchedGenerateObject)
240
+ return patchedGenerateObject;
241
+ return Reflect.get(target, prop);
242
+ },
243
+ });
244
+ }
245
+ }
246
+ catch { /* require.cache not available in non-CJS contexts (e.g. pure ESM) */ }
247
+ // Restore function — undo direct assignments and reset require.cache.
248
+ return () => {
249
+ for (const restore of directRestores)
250
+ restore();
251
+ try {
252
+ if (cacheKey && require.cache[cacheKey]) {
253
+ require.cache[cacheKey].exports = originalExports;
254
+ }
255
+ }
256
+ catch { /* package may have been unloaded */ }
257
+ };
258
+ }
@@ -0,0 +1,4 @@
1
+ // ---------------------------------------------------------------------------
2
+ // Public SDK options
3
+ // ---------------------------------------------------------------------------
4
+ export {};
@@ -0,0 +1,116 @@
1
+ // Prices are per 1,000 tokens in USD.
2
+ // Keys must stay sorted longest-first within the object for readability,
3
+ // but calculateCost() re-sorts at runtime so insertion order doesn't matter.
4
+ export const MODEL_PRICING = {
5
+ // OpenAI
6
+ 'gpt-4o-mini': { input: 0.00015, output: 0.0006 },
7
+ 'gpt-4.1-mini': { input: 0.0004, output: 0.0016 },
8
+ 'gpt-4.1-nano': { input: 0.0001, output: 0.0004 },
9
+ 'gpt-4.1': { input: 0.002, output: 0.008 },
10
+ 'gpt-4-turbo': { input: 0.01, output: 0.03 },
11
+ 'gpt-4o': { input: 0.0025, output: 0.01 },
12
+ 'gpt-4': { input: 0.03, output: 0.06 },
13
+ 'gpt-3.5-turbo': { input: 0.0015, output: 0.002 },
14
+ 'o1-mini': { input: 0.003, output: 0.012 },
15
+ 'o3-mini': { input: 0.0011, output: 0.0044 },
16
+ 'o1': { input: 0.015, output: 0.06 },
17
+ // Anthropic (direct API)
18
+ 'claude-3.5-sonnet': { input: 0.003, output: 0.015 },
19
+ 'claude-3.5-haiku': { input: 0.0008, output: 0.004 },
20
+ 'claude-3-opus': { input: 0.015, output: 0.075 },
21
+ 'claude-3-sonnet': { input: 0.003, output: 0.015 },
22
+ 'claude-3-haiku': { input: 0.00025, output: 0.00125 },
23
+ // Google (via OpenAI compat)
24
+ 'gemini-1.5-pro': { input: 0.00125, output: 0.005 },
25
+ 'gemini-1.5-flash': { input: 0.000075, output: 0.0003 },
26
+ 'gemini-2.0-flash': { input: 0.0001, output: 0.0004 },
27
+ // DeepSeek
28
+ 'deepseek-reasoner': { input: 0.00055, output: 0.00219 },
29
+ 'deepseek-chat': { input: 0.00027, output: 0.0011 },
30
+ // AWS Bedrock — Anthropic
31
+ 'anthropic.claude-3.5-sonnet': { input: 0.003, output: 0.015 },
32
+ 'anthropic.claude-3.5-haiku': { input: 0.0008, output: 0.004 },
33
+ 'anthropic.claude-3-opus': { input: 0.015, output: 0.075 },
34
+ 'anthropic.claude-3-sonnet': { input: 0.003, output: 0.015 },
35
+ 'anthropic.claude-3-haiku': { input: 0.00025, output: 0.00125 },
36
+ 'anthropic.claude-4-sonnet': { input: 0.003, output: 0.015 },
37
+ 'anthropic.claude-4-opus': { input: 0.015, output: 0.075 },
38
+ // AWS Bedrock — Meta
39
+ 'meta.llama3-1-405b-instruct': { input: 0.00532, output: 0.016 },
40
+ 'meta.llama3-1-70b-instruct': { input: 0.00265, output: 0.0035 },
41
+ 'meta.llama3-1-8b-instruct': { input: 0.0003, output: 0.0006 },
42
+ 'meta.llama3-70b-instruct': { input: 0.00265, output: 0.0035 },
43
+ 'meta.llama3-8b-instruct': { input: 0.0003, output: 0.0006 },
44
+ // AWS Bedrock — Amazon
45
+ 'amazon.titan-text-express': { input: 0.0002, output: 0.0006 },
46
+ 'amazon.titan-text-lite': { input: 0.00015, output: 0.0002 },
47
+ 'amazon.nova-pro': { input: 0.0008, output: 0.0032 },
48
+ 'amazon.nova-lite': { input: 0.00006, output: 0.00024 },
49
+ 'amazon.nova-micro': { input: 0.000035, output: 0.00014 },
50
+ // AWS Bedrock — Mistral / Cohere / AI21
51
+ 'mistral.mistral-large': { input: 0.004, output: 0.012 },
52
+ 'mistral.mistral-small': { input: 0.001, output: 0.003 },
53
+ 'mistral.mixtral-8x7b-instruct': { input: 0.00045, output: 0.0007 },
54
+ 'cohere.command-r-plus': { input: 0.003, output: 0.015 },
55
+ 'cohere.command-r': { input: 0.0005, output: 0.0015 },
56
+ 'ai21.jamba-1.5-large': { input: 0.002, output: 0.008 },
57
+ 'ai21.jamba-1.5-mini': { input: 0.0002, output: 0.0004 },
58
+ };
59
+ // Sorted keys (longest first) built once at module load — avoids re-sorting on every call.
60
+ const _pricingKeys = Object.keys(MODEL_PRICING).sort((a, b) => b.length - a.length);
61
+ /**
62
+ * Calculate the USD cost for a single LLM call.
63
+ * Uses longest-prefix matching so "gpt-4o-mini" matches before "gpt-4".
64
+ * Always returns parseFloat(x.toFixed(6)) — toFixed(4) rounds tiny costs to $0.
65
+ */
66
+ export function calculateCost(model, inputTokens, outputTokens) {
67
+ const modelLower = model.toLowerCase();
68
+ const match = _pricingKeys.find(k => modelLower.includes(k));
69
+ if (!match)
70
+ return 0;
71
+ const { input, output } = MODEL_PRICING[match];
72
+ const cost = (inputTokens / 1000) * input + (outputTokens / 1000) * output;
73
+ return parseFloat(cost.toFixed(6));
74
+ }
75
+ /**
76
+ * Truncate text to a maximum character limit.
77
+ * Returns '' for null/undefined. Appends '...' when truncated.
78
+ * Uses ?? not || so that empty strings are preserved (not converted to '').
79
+ */
80
+ export function truncate(text, limit) {
81
+ if (text == null)
82
+ return '';
83
+ if (limit === 0)
84
+ return '';
85
+ if (text.length <= limit)
86
+ return text;
87
+ return text.slice(0, limit) + '...';
88
+ }
89
+ /**
90
+ * Detect the LLM provider from a model ID string.
91
+ * The result is used as the `provider` field on llm_call spans.
92
+ */
93
+ export function detectProvider(model) {
94
+ const m = model.toLowerCase();
95
+ if (m.startsWith('gpt-') || m.startsWith('o1') || m.startsWith('o3'))
96
+ return 'openai';
97
+ if (m.startsWith('claude-') || m.startsWith('anthropic.'))
98
+ return 'anthropic';
99
+ if (m.startsWith('amazon.') || m.startsWith('nova') || m.startsWith('titan'))
100
+ return 'amazon';
101
+ if (m.startsWith('meta.') || m.startsWith('llama'))
102
+ return 'meta';
103
+ if (m.startsWith('mistral.') || m.startsWith('mixtral'))
104
+ return 'mistral';
105
+ if (m.startsWith('cohere.'))
106
+ return 'cohere';
107
+ if (m.startsWith('ai21.'))
108
+ return 'ai21';
109
+ if (m.startsWith('gemini'))
110
+ return 'google';
111
+ if (m.startsWith('deepseek'))
112
+ return 'deepseek';
113
+ return 'unknown';
114
+ }
115
+ export const LLM_CONTENT_LIMIT = 1000; // default for llm_call spans
116
+ export const TOOL_CONTENT_LIMIT = 500; // default for tool_call spans
@@ -0,0 +1,27 @@
1
+ export declare class APIClient {
2
+ readonly apiUrl: string;
3
+ private readonly apiKey;
4
+ private readonly timeout;
5
+ readonly _enabled: boolean;
6
+ constructor(options: {
7
+ apiUrl?: string;
8
+ apiKey?: string;
9
+ timeout?: number;
10
+ });
11
+ private _request;
12
+ createTrace(data: object): Promise<boolean>;
13
+ sendSpan(traceId: string, span: object): Promise<boolean>;
14
+ sendSpansBatch(traceId: string, spans: object[]): Promise<boolean>;
15
+ completeTrace(traceId: string, data: object): Promise<boolean>;
16
+ }
17
+ export declare class SpanBatcher {
18
+ private readonly api;
19
+ private buffer;
20
+ private readonly batchSize;
21
+ private readonly flushInterval;
22
+ private timer;
23
+ constructor(api: APIClient);
24
+ add(traceId: string, span: object): void;
25
+ flush(): void;
26
+ shutdown(): Promise<void>;
27
+ }
@@ -0,0 +1,50 @@
1
+ import { APIClient, SpanBatcher } from './api';
2
+ import type { InitOptions } from './types';
3
+ export type { InitOptions };
4
+ export declare class Visibe {
5
+ readonly apiClient: APIClient;
6
+ readonly batcher: SpanBatcher;
7
+ readonly contentLimit: number;
8
+ readonly sessionId: string | undefined;
9
+ readonly debug: boolean;
10
+ private readonly _instrumented;
11
+ constructor(options: InitOptions);
12
+ instrument(client: object, options?: {
13
+ name?: string;
14
+ }): void;
15
+ uninstrument(client: object): void;
16
+ track<T>(client: object, name: string, fn: () => Promise<T>): Promise<T>;
17
+ flushSpans(): void;
18
+ buildLLMSpan(opts: {
19
+ spanId: string;
20
+ parentSpanId?: string;
21
+ agentName: string;
22
+ model: string;
23
+ status: 'success' | 'failed';
24
+ inputTokens: number;
25
+ outputTokens: number;
26
+ inputText: string;
27
+ outputText: string;
28
+ durationMs: number;
29
+ }): object;
30
+ buildToolSpan(opts: {
31
+ spanId: string;
32
+ parentSpanId?: string;
33
+ toolName: string;
34
+ agentName: string;
35
+ status: 'success' | 'failed';
36
+ durationMs: number;
37
+ inputText: string;
38
+ outputText: string;
39
+ }): object;
40
+ buildErrorSpan(opts: {
41
+ spanId: string;
42
+ errorType: string;
43
+ errorMessage: string;
44
+ durationMs?: number;
45
+ }): object;
46
+ buildAgentStartSpan(opts: {
47
+ spanId: string;
48
+ agentName: string;
49
+ }): object;
50
+ }
@@ -0,0 +1,7 @@
1
+ import { Visibe } from './client';
2
+ import type { InitOptions } from './types';
3
+ export declare function detectFrameworks(): Record<string, boolean>;
4
+ export declare function init(options?: InitOptions): Visibe;
5
+ export declare function shutdown(): Promise<void>;
6
+ export { Visibe } from './client';
7
+ export type { InitOptions, TraceSummary, SpanType, SpanStatus, TraceStatus, LLMProvider } from './types';
@@ -0,0 +1,9 @@
1
+ import { BaseIntegration } from './base';
2
+ import type { Visibe } from '../client';
3
+ export declare class AnthropicIntegration extends BaseIntegration {
4
+ private _insideStream;
5
+ patchClient(client: any, agentName: string): () => void;
6
+ private _wrapCreate;
7
+ private _wrapStream;
8
+ }
9
+ export declare function patchAnthropicClient(client: any, agentName: string, visibe: Visibe): () => void;
@@ -0,0 +1,17 @@
1
+ import type { Visibe } from '../client';
2
+ /**
3
+ * Shared base for all framework integrations.
4
+ *
5
+ * Each integration subclass implements `patchClient()` which patches a specific
6
+ * client instance and returns a restore function that undoes the patch.
7
+ * The restore function is stored by `Visibe.instrument()` and called on
8
+ * `Visibe.uninstrument()`.
9
+ */
10
+ export declare abstract class BaseIntegration {
11
+ protected readonly visibe: Visibe;
12
+ constructor(visibe: Visibe);
13
+ abstract patchClient(client: object, agentName: string): () => void;
14
+ private _stepCounter;
15
+ protected nextSpanId(): string;
16
+ protected newTraceId(): string;
17
+ }
@@ -0,0 +1,11 @@
1
+ import { BaseIntegration } from './base';
2
+ import { detectProvider } from '../utils';
3
+ import type { Visibe } from '../client';
4
+ export declare class BedrockIntegration extends BaseIntegration {
5
+ patchClient(client: any, agentName: string): () => void;
6
+ private _traceConverse;
7
+ private _traceInvokeModel;
8
+ private _traceConverseStream;
9
+ }
10
+ export declare function patchBedrockClient(client: any, agentName: string, visibe: Visibe): () => void;
11
+ export { detectProvider };
@@ -0,0 +1,12 @@
1
+ import { AsyncLocalStorage } from 'node:async_hooks';
2
+ export interface GroupContext {
3
+ readonly traceId: string;
4
+ onLLMSpan: (inputTokens: number, outputTokens: number, cost: number) => void;
5
+ onToolSpan: () => void;
6
+ }
7
+ /**
8
+ * Set by Visibe.track() around the user's fn().
9
+ * All integrations read from this store to route spans into the shared group trace
10
+ * and report token/cost totals back to the track() accumulator.
11
+ */
12
+ export declare const activeGroupTraceStorage: AsyncLocalStorage<GroupContext>;
@@ -0,0 +1,40 @@
1
+ import { AsyncLocalStorage } from 'node:async_hooks';
2
+ import type { Visibe } from '../client';
3
+ export declare const activeLangChainStorage: AsyncLocalStorage<LangChainCallback>;
4
+ export declare class LangChainCallback {
5
+ readonly name = "visibe-langchain-callback";
6
+ readonly awaitHandlers = true;
7
+ readonly raiseError = false;
8
+ protected readonly visibe: Visibe;
9
+ protected readonly traceId: string;
10
+ protected readonly agentName: string;
11
+ protected runIdToSpanId: Map<string, string>;
12
+ protected pendingLLMCalls: Map<string, number>;
13
+ protected pendingToolCalls: Map<string, {
14
+ startMs: number;
15
+ inputText: string;
16
+ }>;
17
+ protected stepCounter: number;
18
+ protected nextSpanId(): string;
19
+ protected seenAgents: Set<string>;
20
+ totalInputTokens: number;
21
+ totalOutputTokens: number;
22
+ llmCallCount: number;
23
+ constructor(options: {
24
+ visibe: Visibe;
25
+ traceId: string;
26
+ agentName: string;
27
+ });
28
+ handleLLMStart(_llm: any, _messages: any[], runId: string): Promise<void>;
29
+ handleLLMEnd(output: any, runId: string, parentRunId?: string): Promise<void>;
30
+ handleLLMError(err: Error, runId: string): Promise<void>;
31
+ handleToolStart(_tool: any, input: string, runId: string): Promise<void>;
32
+ handleToolEnd(output: string, runId: string, parentRunId?: string): Promise<void>;
33
+ handleToolError(err: Error, runId: string): Promise<void>;
34
+ handleChainStart(chain: any, _inputs: any, runId: string, parentRunId?: string): Promise<void>;
35
+ handleChainEnd(_outputs: any, _runId: string): Promise<void>;
36
+ handleChainError(err: Error, _runId: string): Promise<void>;
37
+ _onLLMSpan?: (inputTokens: number, outputTokens: number, cost: number) => void;
38
+ _onToolSpan?: () => void;
39
+ }
40
+ export declare function patchRunnableSequence(lcModule: any, visibe: Visibe): () => void;
@@ -0,0 +1,13 @@
1
+ import { LangChainCallback } from './langchain';
2
+ import type { Visibe } from '../client';
3
+ export declare class LangGraphCallback extends LangChainCallback {
4
+ private readonly nodeNames;
5
+ constructor(options: {
6
+ visibe: Visibe;
7
+ traceId: string;
8
+ agentName: string;
9
+ nodeNames?: string[];
10
+ });
11
+ handleChainStart(chain: any, inputs: any, runId: string, parentRunId?: string): Promise<void>;
12
+ }
13
+ export declare function patchCompiledStateGraph(lgModule: any, visibe: Visibe): () => void;
@@ -0,0 +1,11 @@
1
+ import { AsyncLocalStorage } from 'node:async_hooks';
2
+ import { BaseIntegration } from './base';
3
+ import type { Visibe } from '../client';
4
+ export declare const activeLangChainStorage: AsyncLocalStorage<any>;
5
+ export declare class OpenAIIntegration extends BaseIntegration {
6
+ patchClient(client: any, agentName: string): () => void;
7
+ private _wrapCreate;
8
+ private _wrapStream;
9
+ private _wrapResponsesCreate;
10
+ }
11
+ export declare function patchOpenAIClient(client: any, agentName: string, visibe: Visibe): () => void;
@@ -0,0 +1,2 @@
1
+ import type { Visibe } from '../client';
2
+ export declare function patchVercelAI(aiModule: any, visibe: Visibe, agentName?: string): () => void;
@@ -0,0 +1,21 @@
1
+ export interface InitOptions {
2
+ apiKey?: string;
3
+ apiUrl?: string;
4
+ frameworks?: string[];
5
+ contentLimit?: number;
6
+ sessionId?: string;
7
+ debug?: boolean;
8
+ }
9
+ export type SpanType = 'llm_call' | 'tool_call' | 'error' | 'agent_start';
10
+ export type SpanStatus = 'success' | 'failed';
11
+ export type TraceStatus = 'completed' | 'failed';
12
+ export type LLMProvider = 'openai' | 'anthropic' | 'amazon' | 'meta' | 'mistral' | 'cohere' | 'ai21' | 'google' | 'deepseek' | 'unknown';
13
+ export interface TraceSummary {
14
+ name: string;
15
+ llmCallCount: number;
16
+ toolCallCount: number;
17
+ totalTokens: number;
18
+ totalCost: number;
19
+ durationMs: number;
20
+ status: TraceStatus;
21
+ }
@@ -0,0 +1,23 @@
1
+ export declare const MODEL_PRICING: Record<string, {
2
+ input: number;
3
+ output: number;
4
+ }>;
5
+ /**
6
+ * Calculate the USD cost for a single LLM call.
7
+ * Uses longest-prefix matching so "gpt-4o-mini" matches before "gpt-4".
8
+ * Always returns parseFloat(x.toFixed(6)) — toFixed(4) rounds tiny costs to $0.
9
+ */
10
+ export declare function calculateCost(model: string, inputTokens: number, outputTokens: number): number;
11
+ /**
12
+ * Truncate text to a maximum character limit.
13
+ * Returns '' for null/undefined. Appends '...' when truncated.
14
+ * Uses ?? not || so that empty strings are preserved (not converted to '').
15
+ */
16
+ export declare function truncate(text: string | null | undefined, limit: number): string;
17
+ /**
18
+ * Detect the LLM provider from a model ID string.
19
+ * The result is used as the `provider` field on llm_call spans.
20
+ */
21
+ export declare function detectProvider(model: string): string;
22
+ export declare const LLM_CONTENT_LIMIT = 1000;
23
+ export declare const TOOL_CONTENT_LIMIT = 500;