@visibe.ai/node 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/README.md +330 -0
  2. package/dist/cjs/api.js +92 -0
  3. package/dist/cjs/client.js +242 -0
  4. package/dist/cjs/index.js +216 -0
  5. package/dist/cjs/integrations/anthropic.js +277 -0
  6. package/dist/cjs/integrations/base.js +32 -0
  7. package/dist/cjs/integrations/bedrock.js +442 -0
  8. package/dist/cjs/integrations/group-context.js +10 -0
  9. package/dist/cjs/integrations/langchain.js +274 -0
  10. package/dist/cjs/integrations/langgraph.js +173 -0
  11. package/dist/cjs/integrations/openai.js +447 -0
  12. package/dist/cjs/integrations/vercel-ai.js +261 -0
  13. package/dist/cjs/types/index.js +5 -0
  14. package/dist/cjs/utils.js +122 -0
  15. package/dist/esm/api.js +87 -0
  16. package/dist/esm/client.js +238 -0
  17. package/dist/esm/index.js +209 -0
  18. package/dist/esm/integrations/anthropic.js +272 -0
  19. package/dist/esm/integrations/base.js +28 -0
  20. package/dist/esm/integrations/bedrock.js +438 -0
  21. package/dist/esm/integrations/group-context.js +7 -0
  22. package/dist/esm/integrations/langchain.js +269 -0
  23. package/dist/esm/integrations/langgraph.js +168 -0
  24. package/dist/esm/integrations/openai.js +442 -0
  25. package/dist/esm/integrations/vercel-ai.js +258 -0
  26. package/dist/esm/types/index.js +4 -0
  27. package/dist/esm/utils.js +116 -0
  28. package/dist/types/api.d.ts +27 -0
  29. package/dist/types/client.d.ts +50 -0
  30. package/dist/types/index.d.ts +7 -0
  31. package/dist/types/integrations/anthropic.d.ts +9 -0
  32. package/dist/types/integrations/base.d.ts +17 -0
  33. package/dist/types/integrations/bedrock.d.ts +11 -0
  34. package/dist/types/integrations/group-context.d.ts +12 -0
  35. package/dist/types/integrations/langchain.d.ts +40 -0
  36. package/dist/types/integrations/langgraph.d.ts +13 -0
  37. package/dist/types/integrations/openai.d.ts +11 -0
  38. package/dist/types/integrations/vercel-ai.d.ts +2 -0
  39. package/dist/types/types/index.d.ts +21 -0
  40. package/dist/types/utils.d.ts +23 -0
  41. package/package.json +80 -0
@@ -0,0 +1,442 @@
1
+ import { AsyncLocalStorage } from 'node:async_hooks';
2
+ import { randomUUID } from 'node:crypto';
3
+ import { BaseIntegration } from './base';
4
+ import { activeGroupTraceStorage } from './group-context';
5
+ import { calculateCost } from '../utils';
6
+ // ---------------------------------------------------------------------------
7
+ // AsyncLocalStorage — used to prevent double-tracing when LangChain/LangGraph
8
+ // is the outer trace owner. The OpenAI wrapper checks this store at the start
9
+ // of every intercepted call; if it is set, the call is passed through as-is.
10
+ // ---------------------------------------------------------------------------
11
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
12
+ export const activeLangChainStorage = new AsyncLocalStorage();
13
+ // ---------------------------------------------------------------------------
14
+ // Helpers — extract text from OpenAI message arrays
15
+ // ---------------------------------------------------------------------------
16
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
17
+ function extractLastUserMessage(messages) {
18
+ if (!Array.isArray(messages))
19
+ return '';
20
+ // Walk backwards to find the last user-role message
21
+ for (let i = messages.length - 1; i >= 0; i--) {
22
+ const msg = messages[i];
23
+ if (msg?.role === 'user') {
24
+ if (typeof msg.content === 'string')
25
+ return msg.content;
26
+ // Content can be an array of content parts (vision / multi-modal)
27
+ if (Array.isArray(msg.content)) {
28
+ const textPart = msg.content.find((p) => p.type === 'text');
29
+ return textPart?.text ?? '';
30
+ }
31
+ }
32
+ }
33
+ return '';
34
+ }
35
+ // When the model makes a tool call the message content is null.
36
+ // Format all tool calls as "toolName(args); toolName(args)".
37
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
38
+ function formatToolCalls(toolCalls) {
39
+ if (!Array.isArray(toolCalls) || toolCalls.length === 0)
40
+ return '';
41
+ return toolCalls
42
+ .map((tc) => `${tc.function?.name ?? 'unknown'}(${tc.function?.arguments ?? ''})`)
43
+ .join('; ');
44
+ }
45
+ // Extract plain text from a Responses API input value (string or message array).
46
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
47
+ function extractResponsesInputText(input) {
48
+ if (typeof input === 'string')
49
+ return input;
50
+ if (Array.isArray(input)) {
51
+ for (let i = input.length - 1; i >= 0; i--) {
52
+ const msg = input[i];
53
+ if (msg?.role === 'user') {
54
+ if (typeof msg.content === 'string')
55
+ return msg.content;
56
+ if (Array.isArray(msg.content)) {
57
+ const part = msg.content.find((p) => p.type === 'input_text' || p.type === 'text');
58
+ return part?.text ?? '';
59
+ }
60
+ }
61
+ }
62
+ }
63
+ return '';
64
+ }
65
+ // ---------------------------------------------------------------------------
66
+ // OpenAIIntegration class
67
+ // ---------------------------------------------------------------------------
68
+ export class OpenAIIntegration extends BaseIntegration {
69
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
70
+ patchClient(client, agentName) {
71
+ const originalCreate = client.chat.completions.create.bind(client.chat.completions);
72
+ const originalResponsesCreate = client.responses?.create?.bind(client.responses);
73
+ // --- chat.completions.create ---
74
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
75
+ client.chat.completions.create = async (params, options) => {
76
+ // If a LangChain/LangGraph trace is active, pass through without wrapping.
77
+ if (activeLangChainStorage.getStore() !== undefined) {
78
+ return originalCreate(params, options);
79
+ }
80
+ if (params.stream) {
81
+ return this._wrapStream(originalCreate, params, options, agentName);
82
+ }
83
+ return this._wrapCreate(originalCreate, params, options, agentName);
84
+ };
85
+ // --- responses.create (Responses API) ---
86
+ if (originalResponsesCreate) {
87
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
88
+ client.responses.create = async (params, options) => {
89
+ if (activeLangChainStorage.getStore() !== undefined) {
90
+ return originalResponsesCreate(params, options);
91
+ }
92
+ return this._wrapResponsesCreate(originalResponsesCreate, params, options, agentName);
93
+ };
94
+ }
95
+ // Return restore function
96
+ return () => {
97
+ client.chat.completions.create = originalCreate;
98
+ if (originalResponsesCreate) {
99
+ client.responses.create = originalResponsesCreate;
100
+ }
101
+ };
102
+ }
103
+ // ---------------------------------------------------------------------------
104
+ // chat.completions.create — non-streaming
105
+ // ---------------------------------------------------------------------------
106
+ async _wrapCreate(
107
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
108
+ original,
109
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
110
+ params,
111
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
112
+ options, agentName) {
113
+ const groupCtx = activeGroupTraceStorage.getStore();
114
+ const traceId = groupCtx?.traceId ?? randomUUID();
115
+ const startedAt = new Date().toISOString();
116
+ const startMs = Date.now();
117
+ if (!groupCtx) {
118
+ await this.visibe.apiClient.createTrace({
119
+ trace_id: traceId,
120
+ name: agentName,
121
+ framework: 'openai',
122
+ started_at: startedAt,
123
+ ...(this.visibe.sessionId ? { session_id: this.visibe.sessionId } : {}),
124
+ });
125
+ }
126
+ const spanId = this.nextSpanId();
127
+ let response;
128
+ let spanStatus = 'success';
129
+ try {
130
+ response = await original(params, options);
131
+ }
132
+ catch (err) {
133
+ spanStatus = 'failed';
134
+ const errorSpan = this.visibe.buildErrorSpan({
135
+ spanId: this.nextSpanId(),
136
+ errorType: err?.constructor?.name ?? 'Error',
137
+ errorMessage: err?.message ?? String(err),
138
+ });
139
+ this.visibe.batcher.add(traceId, errorSpan);
140
+ if (!groupCtx) {
141
+ this.visibe.batcher.flush();
142
+ await this.visibe.apiClient.completeTrace(traceId, {
143
+ status: 'failed',
144
+ ended_at: new Date().toISOString(),
145
+ duration_ms: Date.now() - startMs,
146
+ });
147
+ }
148
+ throw err;
149
+ }
150
+ const model = response.model ?? params.model ?? 'unknown';
151
+ const inputTokens = response.usage?.prompt_tokens ?? 0;
152
+ const outputTokens = response.usage?.completion_tokens ?? 0;
153
+ const cost = calculateCost(model, inputTokens, outputTokens);
154
+ const choice = response.choices?.[0];
155
+ const rawContent = choice?.message?.content;
156
+ const toolCalls = choice?.message?.tool_calls ?? [];
157
+ const outputText = rawContent ?? formatToolCalls(toolCalls);
158
+ const inputText = extractLastUserMessage(params.messages ?? []);
159
+ const llmSpan = this.visibe.buildLLMSpan({
160
+ spanId,
161
+ agentName,
162
+ model,
163
+ status: spanStatus,
164
+ inputTokens,
165
+ outputTokens,
166
+ inputText,
167
+ outputText,
168
+ durationMs: Date.now() - startMs,
169
+ });
170
+ this.visibe.batcher.add(traceId, llmSpan);
171
+ // Notify the group tracker (if inside track()) about this LLM span.
172
+ groupCtx?.onLLMSpan(inputTokens, outputTokens, cost);
173
+ if (!groupCtx) {
174
+ this.visibe.batcher.flush();
175
+ const sent = await this.visibe.apiClient.completeTrace(traceId, {
176
+ status: 'completed',
177
+ ended_at: new Date().toISOString(),
178
+ duration_ms: Date.now() - startMs,
179
+ llm_call_count: 1,
180
+ prompt: inputText,
181
+ model,
182
+ total_cost: cost,
183
+ total_tokens: inputTokens + outputTokens,
184
+ total_input_tokens: inputTokens,
185
+ total_output_tokens: outputTokens,
186
+ });
187
+ _printSummary(agentName, model, inputTokens, outputTokens, cost, Date.now() - startMs, sent);
188
+ }
189
+ return response;
190
+ }
191
+ // ---------------------------------------------------------------------------
192
+ // chat.completions.create — streaming (stream: true)
193
+ // ---------------------------------------------------------------------------
194
+ async _wrapStream(
195
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
196
+ original,
197
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
198
+ params,
199
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
200
+ options, agentName) {
201
+ const groupCtx = activeGroupTraceStorage.getStore();
202
+ const traceId = groupCtx?.traceId ?? randomUUID();
203
+ const startedAt = new Date().toISOString();
204
+ const startMs = Date.now();
205
+ if (!groupCtx) {
206
+ await this.visibe.apiClient.createTrace({
207
+ trace_id: traceId,
208
+ name: agentName,
209
+ framework: 'openai',
210
+ started_at: startedAt,
211
+ ...(this.visibe.sessionId ? { session_id: this.visibe.sessionId } : {}),
212
+ });
213
+ }
214
+ // Inject stream_options so the final chunk includes usage data.
215
+ const augmentedParams = {
216
+ ...params,
217
+ stream_options: { ...params.stream_options, include_usage: true },
218
+ };
219
+ const spanId = this.nextSpanId();
220
+ let outputText = '';
221
+ let inputTokens = 0, outputTokens = 0;
222
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
223
+ let model = params.model ?? 'unknown';
224
+ const originalStream = await original(augmentedParams, options);
225
+ // Wrap the async iterator to accumulate chunks, then finalize on exhaustion.
226
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
227
+ const wrappedStream = {
228
+ ...originalStream,
229
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
230
+ [Symbol.asyncIterator]() {
231
+ const iter = originalStream[Symbol.asyncIterator]();
232
+ return {
233
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
234
+ async next() {
235
+ const result = await iter.next();
236
+ if (!result.done) {
237
+ const chunk = result.value;
238
+ model = chunk.model ?? model;
239
+ const delta = chunk.choices?.[0]?.delta?.content;
240
+ if (delta)
241
+ outputText += delta;
242
+ // Last chunk carries usage when stream_options.include_usage is set
243
+ if (chunk.usage) {
244
+ inputTokens = chunk.usage.prompt_tokens ?? 0;
245
+ outputTokens = chunk.usage.completion_tokens ?? 0;
246
+ }
247
+ }
248
+ return result;
249
+ },
250
+ };
251
+ },
252
+ };
253
+ // After the caller consumes the stream, finalize the span.
254
+ // We do this by overriding the stream's return/throw so finalization
255
+ // runs both on normal exhaustion and on early break.
256
+ const finalize = async (streamStatus) => {
257
+ const cost = calculateCost(model, inputTokens, outputTokens);
258
+ const inputText = extractLastUserMessage(params.messages ?? []);
259
+ const llmSpan = this.visibe.buildLLMSpan({
260
+ spanId,
261
+ agentName,
262
+ model,
263
+ status: streamStatus,
264
+ inputTokens,
265
+ outputTokens,
266
+ inputText,
267
+ outputText,
268
+ durationMs: Date.now() - startMs,
269
+ });
270
+ this.visibe.batcher.add(traceId, llmSpan);
271
+ // Notify the group tracker (if inside track()) about this LLM span.
272
+ groupCtx?.onLLMSpan(inputTokens, outputTokens, cost);
273
+ if (!groupCtx) {
274
+ this.visibe.batcher.flush();
275
+ const sent = await this.visibe.apiClient.completeTrace(traceId, {
276
+ status: streamStatus === 'success' ? 'completed' : 'failed',
277
+ ended_at: new Date().toISOString(),
278
+ duration_ms: Date.now() - startMs,
279
+ llm_call_count: 1,
280
+ prompt: inputText,
281
+ model,
282
+ total_cost: cost,
283
+ total_tokens: inputTokens + outputTokens,
284
+ total_input_tokens: inputTokens,
285
+ total_output_tokens: outputTokens,
286
+ });
287
+ _printSummary(agentName, model, inputTokens, outputTokens, cost, Date.now() - startMs, sent);
288
+ }
289
+ };
290
+ // Attach finalize to the iterator methods the OpenAI SDK exposes.
291
+ const iter = wrappedStream[Symbol.asyncIterator].bind(wrappedStream);
292
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
293
+ wrappedStream[Symbol.asyncIterator] = () => {
294
+ const it = iter();
295
+ let finalized = false;
296
+ const doFinalize = async (status) => {
297
+ if (!finalized) {
298
+ finalized = true;
299
+ await finalize(status);
300
+ }
301
+ };
302
+ return {
303
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
304
+ async next() {
305
+ try {
306
+ const result = await it.next();
307
+ if (result.done)
308
+ await doFinalize('success');
309
+ return result;
310
+ }
311
+ catch (err) {
312
+ await doFinalize('failed');
313
+ throw err;
314
+ }
315
+ },
316
+ async return(value) {
317
+ await doFinalize('success');
318
+ return it.return ? it.return(value) : { done: true, value };
319
+ },
320
+ async throw(err) {
321
+ await doFinalize('failed');
322
+ return it.throw ? it.throw(err) : Promise.reject(err);
323
+ },
324
+ };
325
+ };
326
+ return wrappedStream;
327
+ }
328
+ // ---------------------------------------------------------------------------
329
+ // Responses API — client.responses.create
330
+ // ---------------------------------------------------------------------------
331
+ async _wrapResponsesCreate(
332
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
333
+ original,
334
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
335
+ params,
336
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
337
+ options, agentName) {
338
+ const groupCtx = activeGroupTraceStorage.getStore();
339
+ const traceId = groupCtx?.traceId ?? randomUUID();
340
+ const startedAt = new Date().toISOString();
341
+ const startMs = Date.now();
342
+ if (!groupCtx) {
343
+ await this.visibe.apiClient.createTrace({
344
+ trace_id: traceId,
345
+ name: agentName,
346
+ framework: 'openai',
347
+ started_at: startedAt,
348
+ ...(this.visibe.sessionId ? { session_id: this.visibe.sessionId } : {}),
349
+ });
350
+ }
351
+ const spanId = this.nextSpanId();
352
+ let response;
353
+ let spanStatus = 'success';
354
+ try {
355
+ response = await original(params, options);
356
+ }
357
+ catch (err) {
358
+ spanStatus = 'failed';
359
+ this.visibe.batcher.add(traceId, this.visibe.buildErrorSpan({
360
+ spanId: this.nextSpanId(),
361
+ errorType: err?.constructor?.name ?? 'Error',
362
+ errorMessage: err?.message ?? String(err),
363
+ }));
364
+ if (!groupCtx) {
365
+ this.visibe.batcher.flush();
366
+ await this.visibe.apiClient.completeTrace(traceId, {
367
+ status: 'failed', ended_at: new Date().toISOString(), duration_ms: Date.now() - startMs,
368
+ });
369
+ }
370
+ throw err;
371
+ }
372
+ const model = response.model ?? params.model ?? 'unknown';
373
+ const inputTokens = response.usage?.input_tokens ?? 0;
374
+ const outputTokens = response.usage?.output_tokens ?? 0;
375
+ const cost = calculateCost(model, inputTokens, outputTokens);
376
+ // Extract output text from message items; format function/web-search calls.
377
+ const outputItems = response.output ?? [];
378
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
379
+ const textParts = outputItems
380
+ .filter((item) => item.type === 'message')
381
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
382
+ .flatMap((item) => item.content ?? [])
383
+ .filter((c) => c.type === 'output_text' || c.type === 'text')
384
+ .map((c) => c.text ?? '');
385
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
386
+ const toolItems = outputItems.filter((item) => item.type === 'function_call' || item.type === 'web_search_call');
387
+ const toolText = toolItems
388
+ .map((item) => `${item.name ?? item.type}(${JSON.stringify(item.arguments ?? {})})`)
389
+ .join('; ');
390
+ const outputText = textParts.join('') || toolText;
391
+ const inputText = extractResponsesInputText(params.input);
392
+ this.visibe.batcher.add(traceId, this.visibe.buildLLMSpan({
393
+ spanId,
394
+ agentName,
395
+ model,
396
+ status: spanStatus,
397
+ inputTokens,
398
+ outputTokens,
399
+ inputText,
400
+ outputText,
401
+ durationMs: Date.now() - startMs,
402
+ }));
403
+ // Notify the group tracker (if inside track()) about this LLM span.
404
+ groupCtx?.onLLMSpan(inputTokens, outputTokens, cost);
405
+ if (!groupCtx) {
406
+ this.visibe.batcher.flush();
407
+ const sent = await this.visibe.apiClient.completeTrace(traceId, {
408
+ status: 'completed',
409
+ ended_at: new Date().toISOString(),
410
+ duration_ms: Date.now() - startMs,
411
+ llm_call_count: 1,
412
+ prompt: inputText,
413
+ model,
414
+ total_cost: cost,
415
+ total_tokens: inputTokens + outputTokens,
416
+ total_input_tokens: inputTokens,
417
+ total_output_tokens: outputTokens,
418
+ });
419
+ _printSummary(agentName, model, inputTokens, outputTokens, cost, Date.now() - startMs, sent);
420
+ }
421
+ return response;
422
+ }
423
+ }
424
+ // ---------------------------------------------------------------------------
425
+ // Module-level factory — called by client.ts applyIntegration()
426
+ // ---------------------------------------------------------------------------
427
+ export function patchOpenAIClient(
428
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
429
+ client, agentName, visibe) {
430
+ const integration = new OpenAIIntegration(visibe);
431
+ return integration.patchClient(client, agentName);
432
+ }
433
+ // ---------------------------------------------------------------------------
434
+ // Private helpers
435
+ // ---------------------------------------------------------------------------
436
+ function _printSummary(name, model, inputTokens, outputTokens, cost, durationMs, sent) {
437
+ const durationSec = (durationMs / 1000).toFixed(1);
438
+ const tokens = (inputTokens + outputTokens).toLocaleString();
439
+ const costStr = `$${cost.toFixed(6)}`;
440
+ const sentStr = sent ? 'OK' : 'FAILED';
441
+ console.log(`[Visibe] Trace: ${name} | 1 LLM calls | ${tokens} tokens | ${costStr} | ${durationSec}s | 0 tool calls | status: completed | model: ${model} | sent: ${sentStr}`);
442
+ }