@visibe.ai/node 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +330 -0
- package/dist/cjs/api.js +92 -0
- package/dist/cjs/client.js +242 -0
- package/dist/cjs/index.js +216 -0
- package/dist/cjs/integrations/anthropic.js +277 -0
- package/dist/cjs/integrations/base.js +32 -0
- package/dist/cjs/integrations/bedrock.js +442 -0
- package/dist/cjs/integrations/group-context.js +10 -0
- package/dist/cjs/integrations/langchain.js +274 -0
- package/dist/cjs/integrations/langgraph.js +173 -0
- package/dist/cjs/integrations/openai.js +447 -0
- package/dist/cjs/integrations/vercel-ai.js +261 -0
- package/dist/cjs/types/index.js +5 -0
- package/dist/cjs/utils.js +122 -0
- package/dist/esm/api.js +87 -0
- package/dist/esm/client.js +238 -0
- package/dist/esm/index.js +209 -0
- package/dist/esm/integrations/anthropic.js +272 -0
- package/dist/esm/integrations/base.js +28 -0
- package/dist/esm/integrations/bedrock.js +438 -0
- package/dist/esm/integrations/group-context.js +7 -0
- package/dist/esm/integrations/langchain.js +269 -0
- package/dist/esm/integrations/langgraph.js +168 -0
- package/dist/esm/integrations/openai.js +442 -0
- package/dist/esm/integrations/vercel-ai.js +258 -0
- package/dist/esm/types/index.js +4 -0
- package/dist/esm/utils.js +116 -0
- package/dist/types/api.d.ts +27 -0
- package/dist/types/client.d.ts +50 -0
- package/dist/types/index.d.ts +7 -0
- package/dist/types/integrations/anthropic.d.ts +9 -0
- package/dist/types/integrations/base.d.ts +17 -0
- package/dist/types/integrations/bedrock.d.ts +11 -0
- package/dist/types/integrations/group-context.d.ts +12 -0
- package/dist/types/integrations/langchain.d.ts +40 -0
- package/dist/types/integrations/langgraph.d.ts +13 -0
- package/dist/types/integrations/openai.d.ts +11 -0
- package/dist/types/integrations/vercel-ai.d.ts +2 -0
- package/dist/types/types/index.d.ts +21 -0
- package/dist/types/utils.d.ts +23 -0
- package/package.json +80 -0
package/README.md
ADDED
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
# Visibe SDK for Node.js
|
|
4
|
+
|
|
5
|
+
**Observability for AI agents.** Track costs, performance, and errors across your entire AI stack — whether you're using LangChain, LangGraph, Vercel AI, Anthropic, AWS Bedrock, or direct OpenAI calls.
|
|
6
|
+
|
|
7
|
+
[](https://www.npmjs.com/package/@visibe.ai/node)
|
|
8
|
+

|
|
9
|
+

|
|
10
|
+
|
|
11
|
+
</div>
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
## 📦 Getting Started
|
|
16
|
+
|
|
17
|
+
### Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install @visibe.ai/node
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
### Setup
|
|
24
|
+
|
|
25
|
+
Set your API key:
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
export VISIBE_API_KEY=sk_live_your_api_key_here
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
Or in a `.env` file:
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
VISIBE_API_KEY=sk_live_your_api_key_here
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### One line to instrument everything
|
|
38
|
+
|
|
39
|
+
```typescript
|
|
40
|
+
import { init } from '@visibe.ai/node'
|
|
41
|
+
|
|
42
|
+
init()
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
That's it. Every OpenAI, Anthropic, LangChain, LangGraph, Vercel AI, and Bedrock client created after this call is automatically traced — no other code changes needed.
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## 🧩 Integrations
|
|
50
|
+
|
|
51
|
+
| Framework | Auto (`init()`) | Manual (`instrument()`) |
|
|
52
|
+
|-----------|:-:|:-:|
|
|
53
|
+
| **OpenAI** | ✅ | ✅ |
|
|
54
|
+
| **Anthropic** | ✅ | ✅ |
|
|
55
|
+
| **LangChain** | ✅ | ✅ |
|
|
56
|
+
| **LangGraph** | ✅ | ✅ |
|
|
57
|
+
| **Vercel AI** | ✅ | — |
|
|
58
|
+
| **AWS Bedrock** | ✅ | ✅ |
|
|
59
|
+
|
|
60
|
+
Also works with OpenAI-compatible providers: Azure OpenAI, Groq, Together.ai, DeepSeek, and others.
|
|
61
|
+
|
|
62
|
+
### OpenAI
|
|
63
|
+
|
|
64
|
+
```typescript
|
|
65
|
+
import { init } from '@visibe.ai/node'
|
|
66
|
+
import OpenAI from 'openai'
|
|
67
|
+
|
|
68
|
+
init()
|
|
69
|
+
|
|
70
|
+
const client = new OpenAI()
|
|
71
|
+
const response = await client.chat.completions.create({
|
|
72
|
+
model: 'gpt-4o-mini',
|
|
73
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
74
|
+
})
|
|
75
|
+
// Automatically traced — cost, tokens, duration, and content captured.
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
Streaming is also supported:
|
|
79
|
+
|
|
80
|
+
```typescript
|
|
81
|
+
const stream = await client.chat.completions.create({
|
|
82
|
+
model: 'gpt-4o-mini',
|
|
83
|
+
messages: [{ role: 'user', content: 'Count to 5' }],
|
|
84
|
+
stream: true,
|
|
85
|
+
})
|
|
86
|
+
for await (const chunk of stream) {
|
|
87
|
+
process.stdout.write(chunk.choices[0]?.delta?.content ?? '')
|
|
88
|
+
}
|
|
89
|
+
// Token usage and cost captured when the stream is exhausted.
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
### Anthropic
|
|
93
|
+
|
|
94
|
+
```typescript
|
|
95
|
+
import { init } from '@visibe.ai/node'
|
|
96
|
+
import Anthropic from '@anthropic-ai/sdk'
|
|
97
|
+
|
|
98
|
+
init()
|
|
99
|
+
|
|
100
|
+
const client = new Anthropic()
|
|
101
|
+
const response = await client.messages.create({
|
|
102
|
+
model: 'claude-3-5-sonnet-20241022',
|
|
103
|
+
max_tokens: 100,
|
|
104
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
105
|
+
})
|
|
106
|
+
// Automatically traced.
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### LangChain
|
|
110
|
+
|
|
111
|
+
```typescript
|
|
112
|
+
import { init } from '@visibe.ai/node'
|
|
113
|
+
|
|
114
|
+
init()
|
|
115
|
+
|
|
116
|
+
// require() AFTER init() so the instrumentation is already active
|
|
117
|
+
const { ChatOpenAI } = require('@langchain/openai')
|
|
118
|
+
const { PromptTemplate } = require('@langchain/core/prompts')
|
|
119
|
+
const { StringOutputParser } = require('@langchain/core/output_parsers')
|
|
120
|
+
const { RunnableSequence } = require('@langchain/core/runnables')
|
|
121
|
+
|
|
122
|
+
const chain = RunnableSequence.from([
|
|
123
|
+
PromptTemplate.fromTemplate('Summarize: {text}'),
|
|
124
|
+
new ChatOpenAI({ model: 'gpt-4o-mini' }),
|
|
125
|
+
new StringOutputParser(),
|
|
126
|
+
])
|
|
127
|
+
|
|
128
|
+
const result = await chain.invoke({ text: 'AI observability matters.' })
|
|
129
|
+
// Full chain traced — LLM calls, token counts, and duration captured.
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
You can also use the `LangChainCallback` directly for explicit control:
|
|
133
|
+
|
|
134
|
+
```typescript
|
|
135
|
+
import { LangChainCallback } from '@visibe.ai/node/integrations/langchain'
|
|
136
|
+
import { randomUUID } from 'node:crypto'
|
|
137
|
+
|
|
138
|
+
const traceId = randomUUID()
|
|
139
|
+
const callback = new LangChainCallback({ visibe, traceId, agentName: 'my-agent' })
|
|
140
|
+
|
|
141
|
+
const model = new ChatOpenAI({ model: 'gpt-4o-mini', callbacks: [callback] })
|
|
142
|
+
await model.invoke([new HumanMessage('Hello!')])
|
|
143
|
+
```
|
|
144
|
+
|
|
145
|
+
### LangGraph
|
|
146
|
+
|
|
147
|
+
```typescript
|
|
148
|
+
import { init } from '@visibe.ai/node'
|
|
149
|
+
|
|
150
|
+
init() // must come BEFORE graph compilation
|
|
151
|
+
|
|
152
|
+
const { StateGraph, END } = require('@langchain/langgraph')
|
|
153
|
+
const { ChatOpenAI } = require('@langchain/openai')
|
|
154
|
+
const { HumanMessage } = require('@langchain/core/messages')
|
|
155
|
+
|
|
156
|
+
const model = new ChatOpenAI({ model: 'gpt-4o-mini' })
|
|
157
|
+
|
|
158
|
+
const graph = new StateGraph({
|
|
159
|
+
channels: { messages: { value: (x, y) => x.concat(y), default: () => [] } },
|
|
160
|
+
})
|
|
161
|
+
.addNode('research', async (state) => ({
|
|
162
|
+
messages: [await model.invoke([new HumanMessage('Research this topic')])],
|
|
163
|
+
}))
|
|
164
|
+
.addNode('summarise', async (state) => ({
|
|
165
|
+
messages: [await model.invoke([new HumanMessage('Summarise the research')])],
|
|
166
|
+
}))
|
|
167
|
+
.addEdge('__start__', 'research')
|
|
168
|
+
.addEdge('research', 'summarise')
|
|
169
|
+
.addEdge('summarise', END)
|
|
170
|
+
.compile()
|
|
171
|
+
|
|
172
|
+
await graph.invoke({ messages: [] })
|
|
173
|
+
// Each node's LLM calls traced, total cost and token counts rolled up per graph run.
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
### Vercel AI
|
|
177
|
+
|
|
178
|
+
```typescript
|
|
179
|
+
import { init } from '@visibe.ai/node'
|
|
180
|
+
|
|
181
|
+
init() // must come BEFORE require('ai')
|
|
182
|
+
|
|
183
|
+
// require() AFTER init() so patchVercelAI() has replaced the exports
|
|
184
|
+
const { generateText } = require('ai')
|
|
185
|
+
const { openai } = require('@ai-sdk/openai')
|
|
186
|
+
|
|
187
|
+
const result = await generateText({
|
|
188
|
+
model: openai('gpt-4o-mini'),
|
|
189
|
+
prompt: 'Write a haiku about observability.',
|
|
190
|
+
})
|
|
191
|
+
// Automatically traced — provider, model, tokens, and cost captured.
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
`streamText` and `generateObject` are also automatically patched.
|
|
195
|
+
|
|
196
|
+
### AWS Bedrock
|
|
197
|
+
|
|
198
|
+
```typescript
|
|
199
|
+
import { init } from '@visibe.ai/node'
|
|
200
|
+
import { BedrockRuntimeClient, ConverseCommand } from '@aws-sdk/client-bedrock-runtime'
|
|
201
|
+
|
|
202
|
+
init()
|
|
203
|
+
|
|
204
|
+
const client = new BedrockRuntimeClient({ region: 'us-east-1' })
|
|
205
|
+
const response = await client.send(new ConverseCommand({
|
|
206
|
+
modelId: 'anthropic.claude-3-haiku-20240307-v1:0',
|
|
207
|
+
messages: [{ role: 'user', content: [{ text: 'Hello!' }] }],
|
|
208
|
+
}))
|
|
209
|
+
// Automatically traced. Works with all models available via Bedrock —
|
|
210
|
+
// Claude, Nova, Llama, Mistral, and more.
|
|
211
|
+
```
|
|
212
|
+
|
|
213
|
+
Supports `ConverseCommand`, `ConverseStreamCommand`, `InvokeModelCommand`, and `InvokeModelWithResponseStreamCommand`.
|
|
214
|
+
|
|
215
|
+
---
|
|
216
|
+
|
|
217
|
+
## ⚙️ Configuration
|
|
218
|
+
|
|
219
|
+
```typescript
|
|
220
|
+
import { init } from '@visibe.ai/node'
|
|
221
|
+
|
|
222
|
+
init({
|
|
223
|
+
apiKey: 'sk_live_abc123', // or set VISIBE_API_KEY env var
|
|
224
|
+
frameworks: ['openai', 'langgraph'], // limit to specific frameworks
|
|
225
|
+
contentLimit: 500, // max chars for LLM content in traces
|
|
226
|
+
debug: true, // enable debug logging
|
|
227
|
+
})
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
### Options
|
|
231
|
+
|
|
232
|
+
| Option | Type | Description | Default |
|
|
233
|
+
|--------|------|-------------|---------|
|
|
234
|
+
| `apiKey` | `string` | Your Visibe API key | `VISIBE_API_KEY` env var |
|
|
235
|
+
| `apiUrl` | `string` | Override API endpoint | `https://api.visibe.ai` |
|
|
236
|
+
| `frameworks` | `string[]` | Limit auto-instrumentation to specific frameworks | All detected |
|
|
237
|
+
| `contentLimit` | `number` | Max chars for LLM/tool content in spans | `1000` |
|
|
238
|
+
| `debug` | `boolean` | Enable debug logging | `false` |
|
|
239
|
+
| `sessionId` | `string` | Tag all traces with a session ID | — |
|
|
240
|
+
|
|
241
|
+
### Environment Variables
|
|
242
|
+
|
|
243
|
+
| Variable | Description | Default |
|
|
244
|
+
|----------|-------------|---------|
|
|
245
|
+
| `VISIBE_API_KEY` | Your API key (required) | — |
|
|
246
|
+
| `VISIBE_API_URL` | Override API endpoint | `https://api.visibe.ai` |
|
|
247
|
+
| `VISIBE_CONTENT_LIMIT` | Max chars for LLM/tool content in spans | `1000` |
|
|
248
|
+
| `VISIBE_DEBUG` | Enable debug logging (`1` to enable) | `0` |
|
|
249
|
+
|
|
250
|
+
---
|
|
251
|
+
|
|
252
|
+
## 📊 What Gets Tracked
|
|
253
|
+
|
|
254
|
+
| Metric | Description |
|
|
255
|
+
|--------|-------------|
|
|
256
|
+
| **Cost** | Total spend + per-call cost breakdown using current model pricing |
|
|
257
|
+
| **Tokens** | Input/output tokens per LLM call |
|
|
258
|
+
| **Duration** | Total time + time per step |
|
|
259
|
+
| **Tools** | Which tools were used, duration, success/failure |
|
|
260
|
+
| **Errors** | When and where things failed, with error type and message |
|
|
261
|
+
| **Spans** | Full execution timeline with LLM calls, tool calls, and agent events |
|
|
262
|
+
|
|
263
|
+
---
|
|
264
|
+
|
|
265
|
+
## 🔧 Manual Instrumentation
|
|
266
|
+
|
|
267
|
+
For cases where you need explicit control — instrumenting a specific client, grouping multiple calls into a named trace, or using Visibe without `init()`.
|
|
268
|
+
|
|
269
|
+
### Instrument a specific client
|
|
270
|
+
|
|
271
|
+
```typescript
|
|
272
|
+
import { Visibe } from '@visibe.ai/node'
|
|
273
|
+
import OpenAI from 'openai'
|
|
274
|
+
|
|
275
|
+
const visibe = new Visibe({ apiKey: 'sk_live_abc123' })
|
|
276
|
+
const client = new OpenAI()
|
|
277
|
+
|
|
278
|
+
visibe.instrument(client, { name: 'my-agent' })
|
|
279
|
+
|
|
280
|
+
await client.chat.completions.create({
|
|
281
|
+
model: 'gpt-4o-mini',
|
|
282
|
+
messages: [{ role: 'user', content: 'Hello!' }],
|
|
283
|
+
})
|
|
284
|
+
// Each call creates its own trace named 'my-agent'.
|
|
285
|
+
```
|
|
286
|
+
|
|
287
|
+
### Group multiple calls into one trace
|
|
288
|
+
|
|
289
|
+
```typescript
|
|
290
|
+
import { Visibe } from '@visibe.ai/node'
|
|
291
|
+
import OpenAI from 'openai'
|
|
292
|
+
|
|
293
|
+
const visibe = new Visibe()
|
|
294
|
+
const client = new OpenAI()
|
|
295
|
+
|
|
296
|
+
await visibe.track(client, 'my-conversation', async () => {
|
|
297
|
+
await client.chat.completions.create({ model: 'gpt-4o-mini', messages: [...] })
|
|
298
|
+
await client.chat.completions.create({ model: 'gpt-4o-mini', messages: [...] })
|
|
299
|
+
})
|
|
300
|
+
// Both calls sent as one grouped trace with combined cost and token totals.
|
|
301
|
+
```
|
|
302
|
+
|
|
303
|
+
### Remove instrumentation
|
|
304
|
+
|
|
305
|
+
```typescript
|
|
306
|
+
visibe.uninstrument(client)
|
|
307
|
+
```
|
|
308
|
+
|
|
309
|
+
### Graceful shutdown
|
|
310
|
+
|
|
311
|
+
The SDK registers `SIGTERM` and `SIGINT` handlers automatically. For long-running scripts where you want to ensure all spans are flushed before exit:
|
|
312
|
+
|
|
313
|
+
```typescript
|
|
314
|
+
import { shutdown } from '@visibe.ai/node'
|
|
315
|
+
|
|
316
|
+
await shutdown()
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
---
|
|
320
|
+
|
|
321
|
+
## 🔗 Resources
|
|
322
|
+
|
|
323
|
+
- [npm Package](https://www.npmjs.com/package/@visibe.ai/node) — Install the latest version
|
|
324
|
+
- [Visibe Dashboard](https://app.visibe.ai) — View your traces and analytics
|
|
325
|
+
|
|
326
|
+
---
|
|
327
|
+
|
|
328
|
+
## 📃 License
|
|
329
|
+
|
|
330
|
+
MIT — see [LICENSE](LICENSE) for details.
|
package/dist/cjs/api.js
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.SpanBatcher = exports.APIClient = void 0;
|
|
4
|
+
const DEFAULT_API_URL = 'https://api.visibe.ai';
|
|
5
|
+
const DEFAULT_TIMEOUT_MS = 10000;
|
|
6
|
+
class APIClient {
|
|
7
|
+
constructor(options) {
|
|
8
|
+
this.apiUrl = options.apiUrl ?? DEFAULT_API_URL;
|
|
9
|
+
this.apiKey = options.apiKey;
|
|
10
|
+
this.timeout = options.timeout ?? DEFAULT_TIMEOUT_MS;
|
|
11
|
+
this._enabled = Boolean(options.apiKey);
|
|
12
|
+
if (!this._enabled) {
|
|
13
|
+
process.emitWarning('[Visibe] No API key provided — tracing is disabled. Set VISIBE_API_KEY or pass apiKey= to enable.', { type: 'VisibleSDKWarning', code: 'VISIBE_NO_API_KEY' });
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
async _request(method, path, body) {
|
|
17
|
+
if (!this._enabled)
|
|
18
|
+
return false;
|
|
19
|
+
try {
|
|
20
|
+
const response = await fetch(`${this.apiUrl}${path}`, {
|
|
21
|
+
method,
|
|
22
|
+
headers: {
|
|
23
|
+
'Content-Type': 'application/json',
|
|
24
|
+
'Authorization': `Bearer ${this.apiKey}`,
|
|
25
|
+
},
|
|
26
|
+
body: body ? JSON.stringify(body) : undefined,
|
|
27
|
+
signal: AbortSignal.timeout(this.timeout),
|
|
28
|
+
});
|
|
29
|
+
return response.ok;
|
|
30
|
+
}
|
|
31
|
+
catch {
|
|
32
|
+
return false; // fire-and-forget — never throw
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
async createTrace(data) {
|
|
36
|
+
return this._request('POST', '/api/traces', data);
|
|
37
|
+
}
|
|
38
|
+
async sendSpan(traceId, span) {
|
|
39
|
+
return this._request('POST', `/api/traces/${traceId}/spans`, span);
|
|
40
|
+
}
|
|
41
|
+
async sendSpansBatch(traceId, spans) {
|
|
42
|
+
return this._request('POST', `/api/traces/${traceId}/spans/batch`, { spans });
|
|
43
|
+
}
|
|
44
|
+
async completeTrace(traceId, data) {
|
|
45
|
+
return this._request('PATCH', `/api/traces/${traceId}`, data);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
exports.APIClient = APIClient;
|
|
49
|
+
// ---------------------------------------------------------------------------
|
|
50
|
+
class SpanBatcher {
|
|
51
|
+
constructor(api) {
|
|
52
|
+
this.api = api;
|
|
53
|
+
this.buffer = [];
|
|
54
|
+
this.batchSize = 50;
|
|
55
|
+
this.flushInterval = 2000; // ms
|
|
56
|
+
this.timer = setInterval(() => this.flush(), this.flushInterval);
|
|
57
|
+
// CRITICAL: unref() so the timer does not prevent the process from exiting
|
|
58
|
+
// once all user code has finished.
|
|
59
|
+
this.timer.unref();
|
|
60
|
+
}
|
|
61
|
+
add(traceId, span) {
|
|
62
|
+
// Fast-path: drop immediately when there is no API key — don't buffer.
|
|
63
|
+
if (!this.api._enabled)
|
|
64
|
+
return;
|
|
65
|
+
this.buffer.push({ traceId, span });
|
|
66
|
+
if (this.buffer.length >= this.batchSize)
|
|
67
|
+
this.flush();
|
|
68
|
+
}
|
|
69
|
+
flush() {
|
|
70
|
+
if (this.buffer.length === 0)
|
|
71
|
+
return;
|
|
72
|
+
// Drain atomically — safe because Node.js is single-threaded.
|
|
73
|
+
const batch = this.buffer.splice(0);
|
|
74
|
+
// Group spans by traceId so we make one batch request per active trace.
|
|
75
|
+
const byTrace = new Map();
|
|
76
|
+
for (const { traceId, span } of batch) {
|
|
77
|
+
const arr = byTrace.get(traceId) ?? [];
|
|
78
|
+
arr.push(span);
|
|
79
|
+
byTrace.set(traceId, arr);
|
|
80
|
+
}
|
|
81
|
+
for (const [traceId, spans] of byTrace) {
|
|
82
|
+
this.api.sendSpansBatch(traceId, spans).catch(() => { }); // fire-and-forget
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
async shutdown() {
|
|
86
|
+
clearInterval(this.timer);
|
|
87
|
+
this.flush();
|
|
88
|
+
// Give in-flight requests up to 300 ms to complete before the process exits.
|
|
89
|
+
await new Promise(resolve => setTimeout(resolve, 300));
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
exports.SpanBatcher = SpanBatcher;
|
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.Visibe = void 0;
|
|
4
|
+
const node_crypto_1 = require("node:crypto");
|
|
5
|
+
const api_1 = require("./api");
|
|
6
|
+
const group_context_1 = require("./integrations/group-context");
|
|
7
|
+
const utils_1 = require("./utils");
|
|
8
|
+
class Visibe {
|
|
9
|
+
constructor(options) {
|
|
10
|
+
// Map of instrumented clients to their cleanup functions.
|
|
11
|
+
this._instrumented = new Map();
|
|
12
|
+
const apiKey = options.apiKey ?? process.env['VISIBE_API_KEY'];
|
|
13
|
+
const apiUrl = options.apiUrl ?? process.env['VISIBE_API_URL'];
|
|
14
|
+
const debug = options.debug ?? process.env['VISIBE_DEBUG'] === '1';
|
|
15
|
+
const limit = options.contentLimit
|
|
16
|
+
?? (process.env['VISIBE_CONTENT_LIMIT'] ? Number(process.env['VISIBE_CONTENT_LIMIT']) : undefined)
|
|
17
|
+
?? utils_1.LLM_CONTENT_LIMIT;
|
|
18
|
+
this.contentLimit = limit;
|
|
19
|
+
this.sessionId = options.sessionId;
|
|
20
|
+
this.debug = debug;
|
|
21
|
+
this.apiClient = new api_1.APIClient({ apiKey, apiUrl });
|
|
22
|
+
this.batcher = new api_1.SpanBatcher(this.apiClient);
|
|
23
|
+
}
|
|
24
|
+
// ---------------------------------------------------------------------------
|
|
25
|
+
// instrument() — wrap a client so each call creates its own trace.
|
|
26
|
+
// ---------------------------------------------------------------------------
|
|
27
|
+
instrument(client, options) {
|
|
28
|
+
if (this._instrumented.has(client))
|
|
29
|
+
return; // already patched
|
|
30
|
+
const name = options?.name ?? 'agent';
|
|
31
|
+
// Detect which kind of client this is and apply the right integration.
|
|
32
|
+
// Integrations are resolved lazily here to avoid circular imports at module load.
|
|
33
|
+
const restore = applyIntegration(client, name, this);
|
|
34
|
+
if (restore) {
|
|
35
|
+
this._instrumented.set(client, restore);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
uninstrument(client) {
|
|
39
|
+
const restore = this._instrumented.get(client);
|
|
40
|
+
if (restore) {
|
|
41
|
+
restore();
|
|
42
|
+
this._instrumented.delete(client);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
// ---------------------------------------------------------------------------
|
|
46
|
+
// track() — group multiple LLM calls into a single named trace.
|
|
47
|
+
//
|
|
48
|
+
// Instruments the client if not already done, then runs fn() inside an
|
|
49
|
+
// AsyncLocalStorage context so all integrations automatically route their
|
|
50
|
+
// spans into the shared traceId and report token/cost totals back here.
|
|
51
|
+
// ---------------------------------------------------------------------------
|
|
52
|
+
async track(client, name, fn) {
|
|
53
|
+
const traceId = (0, node_crypto_1.randomUUID)();
|
|
54
|
+
const startedAt = new Date().toISOString();
|
|
55
|
+
const startMs = Date.now();
|
|
56
|
+
await this.apiClient.createTrace({
|
|
57
|
+
trace_id: traceId,
|
|
58
|
+
name,
|
|
59
|
+
framework: detectFrameworkName(client),
|
|
60
|
+
started_at: startedAt,
|
|
61
|
+
...(this.sessionId ? { session_id: this.sessionId } : {}),
|
|
62
|
+
});
|
|
63
|
+
// Accumulators updated by the group context callbacks (called from integrations).
|
|
64
|
+
let llmCallCount = 0;
|
|
65
|
+
let toolCallCount = 0;
|
|
66
|
+
let totalInput = 0;
|
|
67
|
+
let totalOutput = 0;
|
|
68
|
+
let totalCost = 0;
|
|
69
|
+
const groupCtx = {
|
|
70
|
+
traceId,
|
|
71
|
+
onLLMSpan: (inputTokens, outputTokens, cost) => {
|
|
72
|
+
llmCallCount++;
|
|
73
|
+
totalInput += inputTokens;
|
|
74
|
+
totalOutput += outputTokens;
|
|
75
|
+
totalCost += cost;
|
|
76
|
+
},
|
|
77
|
+
onToolSpan: () => { toolCallCount++; },
|
|
78
|
+
};
|
|
79
|
+
// Temporarily instrument client if not already done so that the integration
|
|
80
|
+
// patch is in place for the duration of fn().
|
|
81
|
+
const wasInstrumented = this._instrumented.has(client);
|
|
82
|
+
if (!wasInstrumented) {
|
|
83
|
+
this.instrument(client, { name });
|
|
84
|
+
}
|
|
85
|
+
// Run fn() inside the group ALS context. All instrumented integrations
|
|
86
|
+
// check activeGroupTraceStorage.getStore() — when set they skip their own
|
|
87
|
+
// createTrace/completeTrace calls and route spans into the shared traceId.
|
|
88
|
+
return group_context_1.activeGroupTraceStorage.run(groupCtx, async () => {
|
|
89
|
+
let status = 'completed';
|
|
90
|
+
try {
|
|
91
|
+
return await fn();
|
|
92
|
+
}
|
|
93
|
+
catch (err) {
|
|
94
|
+
status = 'failed';
|
|
95
|
+
throw err;
|
|
96
|
+
}
|
|
97
|
+
finally {
|
|
98
|
+
if (!wasInstrumented) {
|
|
99
|
+
this.uninstrument(client);
|
|
100
|
+
}
|
|
101
|
+
const durationMs = Date.now() - startMs;
|
|
102
|
+
// CRITICAL ORDER: flush spans first, then complete the trace.
|
|
103
|
+
// The backend computes all breakdowns from spans at read time.
|
|
104
|
+
this.batcher.flush();
|
|
105
|
+
const sent = await this.apiClient.completeTrace(traceId, {
|
|
106
|
+
status,
|
|
107
|
+
ended_at: new Date().toISOString(),
|
|
108
|
+
duration_ms: durationMs,
|
|
109
|
+
llm_call_count: llmCallCount,
|
|
110
|
+
total_cost: parseFloat(totalCost.toFixed(6)),
|
|
111
|
+
total_tokens: totalInput + totalOutput,
|
|
112
|
+
total_input_tokens: totalInput,
|
|
113
|
+
total_output_tokens: totalOutput,
|
|
114
|
+
});
|
|
115
|
+
printTraceSummary({
|
|
116
|
+
name,
|
|
117
|
+
llmCallCount,
|
|
118
|
+
toolCallCount,
|
|
119
|
+
totalTokens: totalInput + totalOutput,
|
|
120
|
+
totalCost,
|
|
121
|
+
durationMs,
|
|
122
|
+
status,
|
|
123
|
+
}, sent);
|
|
124
|
+
}
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
// ---------------------------------------------------------------------------
|
|
128
|
+
// flushSpans() — called by shutdown() in index.ts.
|
|
129
|
+
// ---------------------------------------------------------------------------
|
|
130
|
+
flushSpans() {
|
|
131
|
+
this.batcher.flush();
|
|
132
|
+
}
|
|
133
|
+
// ---------------------------------------------------------------------------
|
|
134
|
+
// Internal helpers used by integrations.
|
|
135
|
+
// ---------------------------------------------------------------------------
|
|
136
|
+
buildLLMSpan(opts) {
|
|
137
|
+
const cost = (0, utils_1.calculateCost)(opts.model, opts.inputTokens, opts.outputTokens);
|
|
138
|
+
return {
|
|
139
|
+
span_id: opts.spanId,
|
|
140
|
+
...(opts.parentSpanId ? { parent_span_id: opts.parentSpanId } : {}),
|
|
141
|
+
type: 'llm_call',
|
|
142
|
+
timestamp: new Date().toISOString(),
|
|
143
|
+
agent_name: opts.agentName,
|
|
144
|
+
model: opts.model,
|
|
145
|
+
provider: (0, utils_1.detectProvider)(opts.model),
|
|
146
|
+
status: opts.status,
|
|
147
|
+
description: `LLM Call using ${opts.model}`,
|
|
148
|
+
input_tokens: opts.inputTokens,
|
|
149
|
+
output_tokens: opts.outputTokens,
|
|
150
|
+
cost,
|
|
151
|
+
duration_ms: opts.durationMs,
|
|
152
|
+
input_text: (0, utils_1.truncate)(opts.inputText, this.contentLimit),
|
|
153
|
+
output_text: (0, utils_1.truncate)(opts.outputText, this.contentLimit),
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
buildToolSpan(opts) {
|
|
157
|
+
return {
|
|
158
|
+
span_id: opts.spanId,
|
|
159
|
+
...(opts.parentSpanId ? { parent_span_id: opts.parentSpanId } : {}),
|
|
160
|
+
type: 'tool_call',
|
|
161
|
+
timestamp: new Date().toISOString(),
|
|
162
|
+
tool_name: opts.toolName,
|
|
163
|
+
agent_name: opts.agentName,
|
|
164
|
+
status: opts.status,
|
|
165
|
+
duration_ms: opts.durationMs,
|
|
166
|
+
input_text: (0, utils_1.truncate)(opts.inputText, 500),
|
|
167
|
+
output_text: (0, utils_1.truncate)(opts.outputText, 500),
|
|
168
|
+
};
|
|
169
|
+
}
|
|
170
|
+
buildErrorSpan(opts) {
|
|
171
|
+
return {
|
|
172
|
+
span_id: opts.spanId,
|
|
173
|
+
type: 'error',
|
|
174
|
+
timestamp: new Date().toISOString(),
|
|
175
|
+
description: `Error: ${opts.errorType}`,
|
|
176
|
+
error_type: opts.errorType,
|
|
177
|
+
error_message: (0, utils_1.truncate)(opts.errorMessage, 500),
|
|
178
|
+
duration_ms: opts.durationMs ?? 10,
|
|
179
|
+
};
|
|
180
|
+
}
|
|
181
|
+
buildAgentStartSpan(opts) {
|
|
182
|
+
return {
|
|
183
|
+
span_id: opts.spanId,
|
|
184
|
+
type: 'agent_start', // EXACT string — backend validates this
|
|
185
|
+
timestamp: new Date().toISOString(),
|
|
186
|
+
agent_name: opts.agentName,
|
|
187
|
+
description: `Agent started: ${opts.agentName}`,
|
|
188
|
+
};
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
exports.Visibe = Visibe;
|
|
192
|
+
// ---------------------------------------------------------------------------
|
|
193
|
+
// Helpers (module-private)
|
|
194
|
+
// ---------------------------------------------------------------------------
|
|
195
|
+
function printTraceSummary(summary, sent) {
|
|
196
|
+
const durationSec = (summary.durationMs / 1000).toFixed(1);
|
|
197
|
+
const tokens = summary.totalTokens.toLocaleString();
|
|
198
|
+
const cost = `$${summary.totalCost.toFixed(6)}`;
|
|
199
|
+
const sentStr = sent ? 'OK' : 'FAILED';
|
|
200
|
+
console.log(`[Visibe] Trace: ${summary.name} | ${summary.llmCallCount} LLM calls | ${tokens} tokens | ${cost} | ${durationSec}s | ${summary.toolCallCount} tool calls | status: ${summary.status} | sent: ${sentStr}`);
|
|
201
|
+
}
|
|
202
|
+
// Detect a human-readable framework name from a client instance.
|
|
203
|
+
// Used in createTrace() payload. Integrations can refine this later.
|
|
204
|
+
function detectFrameworkName(client) {
|
|
205
|
+
const name = client?.constructor?.name ?? '';
|
|
206
|
+
if (name === 'OpenAI')
|
|
207
|
+
return 'openai';
|
|
208
|
+
if (name === 'Anthropic')
|
|
209
|
+
return 'anthropic';
|
|
210
|
+
if (name === 'BedrockRuntimeClient')
|
|
211
|
+
return 'bedrock';
|
|
212
|
+
return 'unknown';
|
|
213
|
+
}
|
|
214
|
+
// Apply the correct integration for a given client instance.
|
|
215
|
+
// Returns a restore function, or null if the client type is unrecognised.
|
|
216
|
+
function applyIntegration(client, name, visibe) {
|
|
217
|
+
// Integrations are loaded lazily to avoid import errors when the peer
|
|
218
|
+
// dependency is not installed. Each integration module exports a
|
|
219
|
+
// `patchClient(client, name, visibe)` that returns a restore function.
|
|
220
|
+
const constructorName = client?.constructor?.name ?? '';
|
|
221
|
+
try {
|
|
222
|
+
if (constructorName === 'OpenAI') {
|
|
223
|
+
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
|
224
|
+
const { patchOpenAIClient } = require('./integrations/openai');
|
|
225
|
+
return patchOpenAIClient(client, name, visibe);
|
|
226
|
+
}
|
|
227
|
+
if (constructorName === 'Anthropic') {
|
|
228
|
+
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
|
229
|
+
const { patchAnthropicClient } = require('./integrations/anthropic');
|
|
230
|
+
return patchAnthropicClient(client, name, visibe);
|
|
231
|
+
}
|
|
232
|
+
if (constructorName === 'BedrockRuntimeClient') {
|
|
233
|
+
// eslint-disable-next-line @typescript-eslint/no-require-imports
|
|
234
|
+
const { patchBedrockClient } = require('./integrations/bedrock');
|
|
235
|
+
return patchBedrockClient(client, name, visibe);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
catch {
|
|
239
|
+
// Integration module not available or patch threw — never crash user code.
|
|
240
|
+
}
|
|
241
|
+
return null;
|
|
242
|
+
}
|