@visibe.ai/node 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +330 -0
  2. package/dist/cjs/api.js +92 -0
  3. package/dist/cjs/client.js +242 -0
  4. package/dist/cjs/index.js +216 -0
  5. package/dist/cjs/integrations/anthropic.js +277 -0
  6. package/dist/cjs/integrations/base.js +32 -0
  7. package/dist/cjs/integrations/bedrock.js +442 -0
  8. package/dist/cjs/integrations/group-context.js +10 -0
  9. package/dist/cjs/integrations/langchain.js +274 -0
  10. package/dist/cjs/integrations/langgraph.js +173 -0
  11. package/dist/cjs/integrations/openai.js +447 -0
  12. package/dist/cjs/integrations/vercel-ai.js +261 -0
  13. package/dist/cjs/types/index.js +5 -0
  14. package/dist/cjs/utils.js +122 -0
  15. package/dist/esm/api.js +87 -0
  16. package/dist/esm/client.js +238 -0
  17. package/dist/esm/index.js +209 -0
  18. package/dist/esm/integrations/anthropic.js +272 -0
  19. package/dist/esm/integrations/base.js +28 -0
  20. package/dist/esm/integrations/bedrock.js +438 -0
  21. package/dist/esm/integrations/group-context.js +7 -0
  22. package/dist/esm/integrations/langchain.js +269 -0
  23. package/dist/esm/integrations/langgraph.js +168 -0
  24. package/dist/esm/integrations/openai.js +442 -0
  25. package/dist/esm/integrations/vercel-ai.js +258 -0
  26. package/dist/esm/types/index.js +4 -0
  27. package/dist/esm/utils.js +116 -0
  28. package/dist/types/api.d.ts +27 -0
  29. package/dist/types/client.d.ts +50 -0
  30. package/dist/types/index.d.ts +7 -0
  31. package/dist/types/integrations/anthropic.d.ts +9 -0
  32. package/dist/types/integrations/base.d.ts +17 -0
  33. package/dist/types/integrations/bedrock.d.ts +11 -0
  34. package/dist/types/integrations/group-context.d.ts +12 -0
  35. package/dist/types/integrations/langchain.d.ts +40 -0
  36. package/dist/types/integrations/langgraph.d.ts +13 -0
  37. package/dist/types/integrations/openai.d.ts +11 -0
  38. package/dist/types/integrations/vercel-ai.d.ts +2 -0
  39. package/dist/types/types/index.d.ts +21 -0
  40. package/dist/types/utils.d.ts +23 -0
  41. package/package.json +80 -0
@@ -0,0 +1,261 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.patchVercelAI = patchVercelAI;
4
+ const node_crypto_1 = require("node:crypto");
5
+ const group_context_1 = require("./group-context");
6
+ const utils_1 = require("../utils");
7
+ // ---------------------------------------------------------------------------
8
+ // Provider detection — Vercel AI SDK exposes model.provider as e.g.
9
+ // "openai.chat", "anthropic.messages". Strip the suffix to get a clean name.
10
+ // ---------------------------------------------------------------------------
11
+ // Vercel AI SDK gives bare provider names like "openai", "anthropic" — no model
12
+ // prefix — so we can't use detectProvider() which expects full model IDs.
13
+ // Map directly and fall back to detectProvider() for model-style strings.
14
+ const _VERCEL_PROVIDER_MAP = {
15
+ openai: 'openai',
16
+ anthropic: 'anthropic',
17
+ amazon: 'amazon',
18
+ meta: 'meta',
19
+ mistral: 'mistral',
20
+ cohere: 'cohere',
21
+ ai21: 'ai21',
22
+ google: 'google',
23
+ deepseek: 'deepseek',
24
+ };
25
+ function detectVercelProvider(modelProvider) {
26
+ if (!modelProvider)
27
+ return 'unknown';
28
+ const prefix = modelProvider.split('.')[0].toLowerCase();
29
+ // If not in the map, return the prefix as-is — the Vercel AI SDK already
30
+ // provides a clean provider string (e.g. 'mock-provider', 'anthropic').
31
+ return _VERCEL_PROVIDER_MAP[prefix] ?? prefix;
32
+ }
33
+ // ---------------------------------------------------------------------------
34
+ // Shared span sender — used by all three wrappers
35
+ // ---------------------------------------------------------------------------
36
+ async function sendLLMTrace(visibe, agentName, model, provider, inputTokens, outputTokens, inputText, outputText, durationMs) {
37
+ // Check if we are inside a track() group — route into the shared trace if so.
38
+ const groupCtx = group_context_1.activeGroupTraceStorage.getStore();
39
+ const traceId = groupCtx?.traceId ?? (0, node_crypto_1.randomUUID)();
40
+ if (!groupCtx) {
41
+ await visibe.apiClient.createTrace({
42
+ trace_id: traceId,
43
+ name: agentName,
44
+ framework: 'vercel_ai',
45
+ started_at: new Date().toISOString(),
46
+ ...(visibe.sessionId ? { session_id: visibe.sessionId } : {}),
47
+ });
48
+ }
49
+ const cost = (0, utils_1.calculateCost)(model, inputTokens, outputTokens);
50
+ // Build span manually so we can override the provider from Vercel's metadata.
51
+ const span = {
52
+ span_id: `step_1`,
53
+ type: 'llm_call',
54
+ timestamp: new Date().toISOString(),
55
+ agent_name: agentName,
56
+ model,
57
+ provider,
58
+ status: 'success',
59
+ description: `LLM Call using ${model}`,
60
+ input_tokens: inputTokens,
61
+ output_tokens: outputTokens,
62
+ cost,
63
+ duration_ms: durationMs,
64
+ input_text: inputText.slice(0, visibe.contentLimit),
65
+ output_text: outputText.slice(0, visibe.contentLimit),
66
+ };
67
+ visibe.batcher.add(traceId, span);
68
+ // Notify the group tracker (if inside track()) about this LLM span.
69
+ groupCtx?.onLLMSpan(inputTokens, outputTokens, cost);
70
+ if (!groupCtx) {
71
+ visibe.batcher.flush();
72
+ const sent = await visibe.apiClient.completeTrace(traceId, {
73
+ status: 'completed',
74
+ ended_at: new Date().toISOString(),
75
+ duration_ms: durationMs,
76
+ llm_call_count: 1,
77
+ prompt: inputText.slice(0, visibe.contentLimit),
78
+ model,
79
+ total_cost: cost,
80
+ total_tokens: inputTokens + outputTokens,
81
+ total_input_tokens: inputTokens,
82
+ total_output_tokens: outputTokens,
83
+ });
84
+ const tokens = (inputTokens + outputTokens).toLocaleString();
85
+ const sentStr = sent ? 'OK' : 'FAILED';
86
+ console.log(`[Visibe] Trace: ${agentName} | 1 LLM calls | ${tokens} tokens | $${cost.toFixed(6)} | ${(durationMs / 1000).toFixed(1)}s | 0 tool calls | status: completed | model: ${model} | sent: ${sentStr}`);
87
+ }
88
+ }
89
+ // ---------------------------------------------------------------------------
90
+ // Extract prompt text from Vercel AI SDK params
91
+ // ---------------------------------------------------------------------------
92
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
93
+ function extractVercelPrompt(params) {
94
+ if (typeof params.prompt === 'string')
95
+ return params.prompt;
96
+ if (Array.isArray(params.messages)) {
97
+ const msgs = params.messages;
98
+ for (let i = msgs.length - 1; i >= 0; i--) {
99
+ const msg = msgs[i];
100
+ if (msg?.role === 'user') {
101
+ if (typeof msg.content === 'string')
102
+ return msg.content;
103
+ if (Array.isArray(msg.content)) {
104
+ const part = msg.content.find((p) => p.type === 'text');
105
+ return part?.text ?? '';
106
+ }
107
+ }
108
+ }
109
+ }
110
+ return '';
111
+ }
112
+ // ---------------------------------------------------------------------------
113
+ // patchVercelAI — patches the ai module using a require.cache Proxy.
114
+ //
115
+ // ai@6 exports are non-configurable ESM→CJS getters — direct assignment and
116
+ // Object.defineProperty both fail. The only reliable approach is to replace
117
+ // require.cache[key].exports with a Proxy that intercepts property reads, so
118
+ // that any subsequent require('ai') call (including destructuring) returns the
119
+ // patched wrappers.
120
+ //
121
+ // Called from index.ts patchFramework(). Returns a restore function.
122
+ // ---------------------------------------------------------------------------
123
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
124
+ function patchVercelAI(aiModule, visibe, agentName = 'vercel-ai') {
125
+ const origGenerateText = aiModule.generateText;
126
+ const origStreamText = aiModule.streamText;
127
+ const origGenerateObject = aiModule.generateObject;
128
+ // Build patched wrappers (declared as variables so the Proxy's get trap can close over them).
129
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
130
+ let patchedGenerateText;
131
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
132
+ let patchedStreamText;
133
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
134
+ let patchedGenerateObject;
135
+ // --- generateText ---
136
+ if (origGenerateText) {
137
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
138
+ patchedGenerateText = async (params) => {
139
+ const start = Date.now();
140
+ const result = await origGenerateText(params);
141
+ const model = params.model?.modelId ?? 'unknown';
142
+ const provider = detectVercelProvider(params.model?.provider);
143
+ // Await so the trace is guaranteed to be created before this function returns.
144
+ // This ensures test helpers (captureTraceId) capture the traceId reliably.
145
+ await sendLLMTrace(visibe, agentName, model, provider,
146
+ // ai@3/4 uses promptTokens; ai@6 uses inputTokens
147
+ result.usage?.promptTokens ?? result.usage?.inputTokens ?? 0, result.usage?.completionTokens ?? result.usage?.outputTokens ?? 0, extractVercelPrompt(params), result.text ?? '', Date.now() - start).catch(() => { });
148
+ return result;
149
+ };
150
+ }
151
+ // --- streamText ---
152
+ if (origStreamText) {
153
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
154
+ patchedStreamText = (params) => {
155
+ const start = Date.now();
156
+ const stream = origStreamText(params);
157
+ const model = params.model?.modelId ?? 'unknown';
158
+ const provider = detectVercelProvider(params.model?.provider);
159
+ // The Vercel AI SDK's streamText result has a `.usage` promise that
160
+ // resolves after the stream is consumed.
161
+ if (stream?.usage && typeof stream.usage.then === 'function') {
162
+ stream.usage.then((usage) => {
163
+ sendLLMTrace(visibe, agentName, model, provider,
164
+ // ai@3/4 uses promptTokens; ai@6 uses inputTokens
165
+ usage?.promptTokens ?? usage?.inputTokens ?? 0, usage?.completionTokens ?? usage?.outputTokens ?? 0, extractVercelPrompt(params), '', // full text not easily available without consuming the stream
166
+ Date.now() - start).catch(() => { });
167
+ }).catch(() => { });
168
+ }
169
+ return stream;
170
+ };
171
+ }
172
+ // --- generateObject ---
173
+ if (origGenerateObject) {
174
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
175
+ patchedGenerateObject = async (params) => {
176
+ const start = Date.now();
177
+ const result = await origGenerateObject(params);
178
+ const model = params.model?.modelId ?? 'unknown';
179
+ const provider = detectVercelProvider(params.model?.provider);
180
+ await sendLLMTrace(visibe, agentName, model, provider,
181
+ // ai@3/4 uses promptTokens; ai@6 uses inputTokens
182
+ result.usage?.promptTokens ?? result.usage?.inputTokens ?? 0, result.usage?.completionTokens ?? result.usage?.outputTokens ?? 0, extractVercelPrompt(params), JSON.stringify(result.object ?? ''), Date.now() - start).catch(() => { });
183
+ return result;
184
+ };
185
+ }
186
+ // ---------------------------------------------------------------------------
187
+ // 1. Try direct property assignment first.
188
+ // Works for plain objects (tests) and ai@3/4 where exports are configurable.
189
+ // For ai@6 with non-configurable exports this silently fails; the Proxy (below)
190
+ // handles that case.
191
+ // ---------------------------------------------------------------------------
192
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
193
+ const directRestores = [];
194
+ try {
195
+ if (patchedGenerateText) {
196
+ const orig = aiModule.generateText;
197
+ aiModule.generateText = patchedGenerateText;
198
+ directRestores.push(() => { aiModule.generateText = orig; });
199
+ }
200
+ }
201
+ catch { /* non-configurable */ }
202
+ try {
203
+ if (patchedStreamText) {
204
+ const orig = aiModule.streamText;
205
+ aiModule.streamText = patchedStreamText;
206
+ directRestores.push(() => { aiModule.streamText = orig; });
207
+ }
208
+ }
209
+ catch { /* non-configurable */ }
210
+ try {
211
+ if (patchedGenerateObject) {
212
+ const orig = aiModule.generateObject;
213
+ aiModule.generateObject = patchedGenerateObject;
214
+ directRestores.push(() => { aiModule.generateObject = orig; });
215
+ }
216
+ }
217
+ catch { /* non-configurable */ }
218
+ // ---------------------------------------------------------------------------
219
+ // 2. Replace require.cache exports with a Proxy (ai@6 fallback).
220
+ //
221
+ // ai@6's exports are Object.defineProperty'd with configurable:false — direct
222
+ // assignment silently fails and Object.defineProperty throws. By replacing the
223
+ // module's cache entry with a Proxy we intercept property reads on future
224
+ // require('ai') calls (including destructuring) and return our wrappers.
225
+ // ---------------------------------------------------------------------------
226
+ let cacheKey;
227
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
228
+ let originalExports;
229
+ try {
230
+ // eslint-disable-next-line @typescript-eslint/no-require-imports
231
+ cacheKey = Object.keys(require.cache).find(k => require.cache[k]?.exports === aiModule);
232
+ if (cacheKey && require.cache[cacheKey]) {
233
+ originalExports = require.cache[cacheKey].exports;
234
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
235
+ require.cache[cacheKey].exports = new Proxy(aiModule, {
236
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
237
+ get(target, prop) {
238
+ if (prop === 'generateText' && patchedGenerateText)
239
+ return patchedGenerateText;
240
+ if (prop === 'streamText' && patchedStreamText)
241
+ return patchedStreamText;
242
+ if (prop === 'generateObject' && patchedGenerateObject)
243
+ return patchedGenerateObject;
244
+ return Reflect.get(target, prop);
245
+ },
246
+ });
247
+ }
248
+ }
249
+ catch { /* require.cache not available in non-CJS contexts (e.g. pure ESM) */ }
250
+ // Restore function — undo direct assignments and reset require.cache.
251
+ return () => {
252
+ for (const restore of directRestores)
253
+ restore();
254
+ try {
255
+ if (cacheKey && require.cache[cacheKey]) {
256
+ require.cache[cacheKey].exports = originalExports;
257
+ }
258
+ }
259
+ catch { /* package may have been unloaded */ }
260
+ };
261
+ }
@@ -0,0 +1,5 @@
1
+ "use strict";
2
+ // ---------------------------------------------------------------------------
3
+ // Public SDK options
4
+ // ---------------------------------------------------------------------------
5
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,122 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.TOOL_CONTENT_LIMIT = exports.LLM_CONTENT_LIMIT = exports.MODEL_PRICING = void 0;
4
+ exports.calculateCost = calculateCost;
5
+ exports.truncate = truncate;
6
+ exports.detectProvider = detectProvider;
7
+ // Prices are per 1,000 tokens in USD.
8
+ // Keys must stay sorted longest-first within the object for readability,
9
+ // but calculateCost() re-sorts at runtime so insertion order doesn't matter.
10
+ exports.MODEL_PRICING = {
11
+ // OpenAI
12
+ 'gpt-4o-mini': { input: 0.00015, output: 0.0006 },
13
+ 'gpt-4.1-mini': { input: 0.0004, output: 0.0016 },
14
+ 'gpt-4.1-nano': { input: 0.0001, output: 0.0004 },
15
+ 'gpt-4.1': { input: 0.002, output: 0.008 },
16
+ 'gpt-4-turbo': { input: 0.01, output: 0.03 },
17
+ 'gpt-4o': { input: 0.0025, output: 0.01 },
18
+ 'gpt-4': { input: 0.03, output: 0.06 },
19
+ 'gpt-3.5-turbo': { input: 0.0015, output: 0.002 },
20
+ 'o1-mini': { input: 0.003, output: 0.012 },
21
+ 'o3-mini': { input: 0.0011, output: 0.0044 },
22
+ 'o1': { input: 0.015, output: 0.06 },
23
+ // Anthropic (direct API)
24
+ 'claude-3.5-sonnet': { input: 0.003, output: 0.015 },
25
+ 'claude-3.5-haiku': { input: 0.0008, output: 0.004 },
26
+ 'claude-3-opus': { input: 0.015, output: 0.075 },
27
+ 'claude-3-sonnet': { input: 0.003, output: 0.015 },
28
+ 'claude-3-haiku': { input: 0.00025, output: 0.00125 },
29
+ // Google (via OpenAI compat)
30
+ 'gemini-1.5-pro': { input: 0.00125, output: 0.005 },
31
+ 'gemini-1.5-flash': { input: 0.000075, output: 0.0003 },
32
+ 'gemini-2.0-flash': { input: 0.0001, output: 0.0004 },
33
+ // DeepSeek
34
+ 'deepseek-reasoner': { input: 0.00055, output: 0.00219 },
35
+ 'deepseek-chat': { input: 0.00027, output: 0.0011 },
36
+ // AWS Bedrock — Anthropic
37
+ 'anthropic.claude-3.5-sonnet': { input: 0.003, output: 0.015 },
38
+ 'anthropic.claude-3.5-haiku': { input: 0.0008, output: 0.004 },
39
+ 'anthropic.claude-3-opus': { input: 0.015, output: 0.075 },
40
+ 'anthropic.claude-3-sonnet': { input: 0.003, output: 0.015 },
41
+ 'anthropic.claude-3-haiku': { input: 0.00025, output: 0.00125 },
42
+ 'anthropic.claude-4-sonnet': { input: 0.003, output: 0.015 },
43
+ 'anthropic.claude-4-opus': { input: 0.015, output: 0.075 },
44
+ // AWS Bedrock — Meta
45
+ 'meta.llama3-1-405b-instruct': { input: 0.00532, output: 0.016 },
46
+ 'meta.llama3-1-70b-instruct': { input: 0.00265, output: 0.0035 },
47
+ 'meta.llama3-1-8b-instruct': { input: 0.0003, output: 0.0006 },
48
+ 'meta.llama3-70b-instruct': { input: 0.00265, output: 0.0035 },
49
+ 'meta.llama3-8b-instruct': { input: 0.0003, output: 0.0006 },
50
+ // AWS Bedrock — Amazon
51
+ 'amazon.titan-text-express': { input: 0.0002, output: 0.0006 },
52
+ 'amazon.titan-text-lite': { input: 0.00015, output: 0.0002 },
53
+ 'amazon.nova-pro': { input: 0.0008, output: 0.0032 },
54
+ 'amazon.nova-lite': { input: 0.00006, output: 0.00024 },
55
+ 'amazon.nova-micro': { input: 0.000035, output: 0.00014 },
56
+ // AWS Bedrock — Mistral / Cohere / AI21
57
+ 'mistral.mistral-large': { input: 0.004, output: 0.012 },
58
+ 'mistral.mistral-small': { input: 0.001, output: 0.003 },
59
+ 'mistral.mixtral-8x7b-instruct': { input: 0.00045, output: 0.0007 },
60
+ 'cohere.command-r-plus': { input: 0.003, output: 0.015 },
61
+ 'cohere.command-r': { input: 0.0005, output: 0.0015 },
62
+ 'ai21.jamba-1.5-large': { input: 0.002, output: 0.008 },
63
+ 'ai21.jamba-1.5-mini': { input: 0.0002, output: 0.0004 },
64
+ };
65
+ // Sorted keys (longest first) built once at module load — avoids re-sorting on every call.
66
+ const _pricingKeys = Object.keys(exports.MODEL_PRICING).sort((a, b) => b.length - a.length);
67
+ /**
68
+ * Calculate the USD cost for a single LLM call.
69
+ * Uses longest-prefix matching so "gpt-4o-mini" matches before "gpt-4".
70
+ * Always returns parseFloat(x.toFixed(6)) — toFixed(4) rounds tiny costs to $0.
71
+ */
72
+ function calculateCost(model, inputTokens, outputTokens) {
73
+ const modelLower = model.toLowerCase();
74
+ const match = _pricingKeys.find(k => modelLower.includes(k));
75
+ if (!match)
76
+ return 0;
77
+ const { input, output } = exports.MODEL_PRICING[match];
78
+ const cost = (inputTokens / 1000) * input + (outputTokens / 1000) * output;
79
+ return parseFloat(cost.toFixed(6));
80
+ }
81
+ /**
82
+ * Truncate text to a maximum character limit.
83
+ * Returns '' for null/undefined. Appends '...' when truncated.
84
+ * Uses ?? not || so that empty strings are preserved (not converted to '').
85
+ */
86
+ function truncate(text, limit) {
87
+ if (text == null)
88
+ return '';
89
+ if (limit === 0)
90
+ return '';
91
+ if (text.length <= limit)
92
+ return text;
93
+ return text.slice(0, limit) + '...';
94
+ }
95
+ /**
96
+ * Detect the LLM provider from a model ID string.
97
+ * The result is used as the `provider` field on llm_call spans.
98
+ */
99
+ function detectProvider(model) {
100
+ const m = model.toLowerCase();
101
+ if (m.startsWith('gpt-') || m.startsWith('o1') || m.startsWith('o3'))
102
+ return 'openai';
103
+ if (m.startsWith('claude-') || m.startsWith('anthropic.'))
104
+ return 'anthropic';
105
+ if (m.startsWith('amazon.') || m.startsWith('nova') || m.startsWith('titan'))
106
+ return 'amazon';
107
+ if (m.startsWith('meta.') || m.startsWith('llama'))
108
+ return 'meta';
109
+ if (m.startsWith('mistral.') || m.startsWith('mixtral'))
110
+ return 'mistral';
111
+ if (m.startsWith('cohere.'))
112
+ return 'cohere';
113
+ if (m.startsWith('ai21.'))
114
+ return 'ai21';
115
+ if (m.startsWith('gemini'))
116
+ return 'google';
117
+ if (m.startsWith('deepseek'))
118
+ return 'deepseek';
119
+ return 'unknown';
120
+ }
121
+ exports.LLM_CONTENT_LIMIT = 1000; // default for llm_call spans
122
+ exports.TOOL_CONTENT_LIMIT = 500; // default for tool_call spans
@@ -0,0 +1,87 @@
1
+ const DEFAULT_API_URL = 'https://api.visibe.ai';
2
+ const DEFAULT_TIMEOUT_MS = 10000;
3
+ export class APIClient {
4
+ constructor(options) {
5
+ this.apiUrl = options.apiUrl ?? DEFAULT_API_URL;
6
+ this.apiKey = options.apiKey;
7
+ this.timeout = options.timeout ?? DEFAULT_TIMEOUT_MS;
8
+ this._enabled = Boolean(options.apiKey);
9
+ if (!this._enabled) {
10
+ process.emitWarning('[Visibe] No API key provided — tracing is disabled. Set VISIBE_API_KEY or pass apiKey= to enable.', { type: 'VisibleSDKWarning', code: 'VISIBE_NO_API_KEY' });
11
+ }
12
+ }
13
+ async _request(method, path, body) {
14
+ if (!this._enabled)
15
+ return false;
16
+ try {
17
+ const response = await fetch(`${this.apiUrl}${path}`, {
18
+ method,
19
+ headers: {
20
+ 'Content-Type': 'application/json',
21
+ 'Authorization': `Bearer ${this.apiKey}`,
22
+ },
23
+ body: body ? JSON.stringify(body) : undefined,
24
+ signal: AbortSignal.timeout(this.timeout),
25
+ });
26
+ return response.ok;
27
+ }
28
+ catch {
29
+ return false; // fire-and-forget — never throw
30
+ }
31
+ }
32
+ async createTrace(data) {
33
+ return this._request('POST', '/api/traces', data);
34
+ }
35
+ async sendSpan(traceId, span) {
36
+ return this._request('POST', `/api/traces/${traceId}/spans`, span);
37
+ }
38
+ async sendSpansBatch(traceId, spans) {
39
+ return this._request('POST', `/api/traces/${traceId}/spans/batch`, { spans });
40
+ }
41
+ async completeTrace(traceId, data) {
42
+ return this._request('PATCH', `/api/traces/${traceId}`, data);
43
+ }
44
+ }
45
+ // ---------------------------------------------------------------------------
46
+ export class SpanBatcher {
47
+ constructor(api) {
48
+ this.api = api;
49
+ this.buffer = [];
50
+ this.batchSize = 50;
51
+ this.flushInterval = 2000; // ms
52
+ this.timer = setInterval(() => this.flush(), this.flushInterval);
53
+ // CRITICAL: unref() so the timer does not prevent the process from exiting
54
+ // once all user code has finished.
55
+ this.timer.unref();
56
+ }
57
+ add(traceId, span) {
58
+ // Fast-path: drop immediately when there is no API key — don't buffer.
59
+ if (!this.api._enabled)
60
+ return;
61
+ this.buffer.push({ traceId, span });
62
+ if (this.buffer.length >= this.batchSize)
63
+ this.flush();
64
+ }
65
+ flush() {
66
+ if (this.buffer.length === 0)
67
+ return;
68
+ // Drain atomically — safe because Node.js is single-threaded.
69
+ const batch = this.buffer.splice(0);
70
+ // Group spans by traceId so we make one batch request per active trace.
71
+ const byTrace = new Map();
72
+ for (const { traceId, span } of batch) {
73
+ const arr = byTrace.get(traceId) ?? [];
74
+ arr.push(span);
75
+ byTrace.set(traceId, arr);
76
+ }
77
+ for (const [traceId, spans] of byTrace) {
78
+ this.api.sendSpansBatch(traceId, spans).catch(() => { }); // fire-and-forget
79
+ }
80
+ }
81
+ async shutdown() {
82
+ clearInterval(this.timer);
83
+ this.flush();
84
+ // Give in-flight requests up to 300 ms to complete before the process exits.
85
+ await new Promise(resolve => setTimeout(resolve, 300));
86
+ }
87
+ }