@deeptracer/ai 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +488 -0
- package/dist/index.cjs +244 -0
- package/dist/index.d.cts +80 -0
- package/dist/index.d.ts +80 -0
- package/dist/index.js +215 -0
- package/package.json +32 -0
package/README.md
ADDED
|
@@ -0,0 +1,488 @@
|
|
|
1
|
+
# @deeptracer/ai
|
|
2
|
+
|
|
3
|
+
Automatic LLM usage tracking wrappers for the [DeepTracer JavaScript SDK](https://github.com/getdeeptracer/deeptracer-js). Wraps **Vercel AI SDK**, **OpenAI**, and **Anthropic** clients to automatically capture model, token counts, latency, and provider for every LLM call -- both streaming and non-streaming.
|
|
4
|
+
|
|
5
|
+
No code changes required in your AI calls. Wrap once, track everything.
|
|
6
|
+
|
|
7
|
+
## Table of Contents
|
|
8
|
+
|
|
9
|
+
- [Installation](#installation)
|
|
10
|
+
- [Quick Start](#quick-start)
|
|
11
|
+
- [API Reference](#api-reference)
|
|
12
|
+
- [wrapVercelAI(logger, fns)](#wrapvercelailogger-fns)
|
|
13
|
+
- [wrapOpenAI(logger, client)](#wrapopenailogger-client)
|
|
14
|
+
- [wrapAnthropic(logger, client)](#wrapanthropiclogger-client)
|
|
15
|
+
- [What Gets Tracked](#what-gets-tracked)
|
|
16
|
+
- [Streaming Support](#streaming-support)
|
|
17
|
+
- [Full Examples](#full-examples)
|
|
18
|
+
- [Vercel AI SDK with Multiple Providers](#vercel-ai-sdk-with-multiple-providers)
|
|
19
|
+
- [OpenAI Direct Client](#openai-direct-client)
|
|
20
|
+
- [Anthropic Direct Client](#anthropic-direct-client)
|
|
21
|
+
- [Multiple Providers in One App](#multiple-providers-in-one-app)
|
|
22
|
+
- [Provider Detection](#provider-detection)
|
|
23
|
+
- [Monorepo](#monorepo)
|
|
24
|
+
- [License](#license)
|
|
25
|
+
|
|
26
|
+
## Installation
|
|
27
|
+
|
|
28
|
+
```bash
|
|
29
|
+
npm install @deeptracer/ai @deeptracer/node
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
`@deeptracer/ai` depends on `@deeptracer/core` (included automatically). You also need whichever AI SDK you are wrapping:
|
|
33
|
+
|
|
34
|
+
```bash
|
|
35
|
+
# For Vercel AI SDK
|
|
36
|
+
npm install ai @ai-sdk/openai # or @ai-sdk/anthropic, @ai-sdk/google, etc.
|
|
37
|
+
|
|
38
|
+
# For OpenAI directly
|
|
39
|
+
npm install openai
|
|
40
|
+
|
|
41
|
+
# For Anthropic directly
|
|
42
|
+
npm install @anthropic-ai/sdk
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
## Quick Start
|
|
46
|
+
|
|
47
|
+
```ts
|
|
48
|
+
import { init } from "@deeptracer/node"
|
|
49
|
+
import { wrapVercelAI } from "@deeptracer/ai"
|
|
50
|
+
import { generateText, streamText } from "ai"
|
|
51
|
+
import { openai } from "@ai-sdk/openai"
|
|
52
|
+
|
|
53
|
+
const logger = init({
|
|
54
|
+
product: "my-app",
|
|
55
|
+
service: "api",
|
|
56
|
+
environment: "production",
|
|
57
|
+
endpoint: "https://your-deeptracer.example.com",
|
|
58
|
+
apiKey: "dt_live_xxx",
|
|
59
|
+
})
|
|
60
|
+
|
|
61
|
+
// Wrap Vercel AI SDK functions
|
|
62
|
+
const ai = wrapVercelAI(logger, { generateText, streamText })
|
|
63
|
+
|
|
64
|
+
// Use exactly as before -- tracking is automatic
|
|
65
|
+
const { text } = await ai.generateText({
|
|
66
|
+
model: openai("gpt-4o"),
|
|
67
|
+
prompt: "Explain quantum computing in one sentence.",
|
|
68
|
+
})
|
|
69
|
+
// DeepTracer automatically records: model, provider, input/output tokens, latency
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## API Reference
|
|
73
|
+
|
|
74
|
+
### wrapVercelAI(logger, fns)
|
|
75
|
+
|
|
76
|
+
Wrap Vercel AI SDK functions with automatic LLM usage tracking. Works with `generateText`, `streamText`, `generateObject`, and `streamObject`.
|
|
77
|
+
|
|
78
|
+
```ts
|
|
79
|
+
import { wrapVercelAI } from "@deeptracer/ai"
|
|
80
|
+
import { generateText, streamText, generateObject, streamObject } from "ai"
|
|
81
|
+
|
|
82
|
+
const ai = wrapVercelAI(logger, { generateText, streamText, generateObject, streamObject })
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
**Parameters:**
|
|
86
|
+
- `logger: Logger` -- A DeepTracer logger instance (from `@deeptracer/node` or `@deeptracer/core`).
|
|
87
|
+
- `fns: T` -- An object containing Vercel AI SDK functions to wrap. Only `generateText`, `streamText`, `generateObject`, and `streamObject` are instrumented; all other properties are passed through unchanged.
|
|
88
|
+
|
|
89
|
+
**Returns:** `T` -- The same object shape with wrapped functions. Use it as a drop-in replacement.
|
|
90
|
+
|
|
91
|
+
**Supported functions:**
|
|
92
|
+
|
|
93
|
+
| Function | Tracking Method |
|
|
94
|
+
|----------|----------------|
|
|
95
|
+
| `generateText` | Tracks after the response completes. Reads `result.usage.promptTokens` and `result.usage.completionTokens`. |
|
|
96
|
+
| `generateObject` | Same as `generateText`. |
|
|
97
|
+
| `streamText` | Awaits the `result.usage` promise (resolved when the stream finishes). |
|
|
98
|
+
| `streamObject` | Same as `streamText`. |
|
|
99
|
+
|
|
100
|
+
**Usage with `generateText`:**
|
|
101
|
+
|
|
102
|
+
```ts
|
|
103
|
+
const { text } = await ai.generateText({
|
|
104
|
+
model: openai("gpt-4o"),
|
|
105
|
+
prompt: "Hello!",
|
|
106
|
+
})
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
**Usage with `streamText`:**
|
|
110
|
+
|
|
111
|
+
```ts
|
|
112
|
+
const result = ai.streamText({
|
|
113
|
+
model: openai("gpt-4o"),
|
|
114
|
+
prompt: "Write a story.",
|
|
115
|
+
})
|
|
116
|
+
|
|
117
|
+
// Consume the stream normally -- tracking happens after the stream finishes
|
|
118
|
+
for await (const chunk of result.textStream) {
|
|
119
|
+
process.stdout.write(chunk)
|
|
120
|
+
}
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
**Usage with `generateObject`:**
|
|
124
|
+
|
|
125
|
+
```ts
|
|
126
|
+
import { z } from "zod"
|
|
127
|
+
|
|
128
|
+
const { object } = await ai.generateObject({
|
|
129
|
+
model: openai("gpt-4o"),
|
|
130
|
+
schema: z.object({
|
|
131
|
+
name: z.string(),
|
|
132
|
+
age: z.number(),
|
|
133
|
+
}),
|
|
134
|
+
prompt: "Generate a fictional character.",
|
|
135
|
+
})
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
---
|
|
139
|
+
|
|
140
|
+
### wrapOpenAI(logger, client)
|
|
141
|
+
|
|
142
|
+
Wrap an OpenAI client instance with automatic LLM usage tracking. Intercepts `chat.completions.create()` for both streaming and non-streaming calls.
|
|
143
|
+
|
|
144
|
+
```ts
|
|
145
|
+
import { wrapOpenAI } from "@deeptracer/ai"
|
|
146
|
+
import OpenAI from "openai"
|
|
147
|
+
|
|
148
|
+
const openai = wrapOpenAI(logger, new OpenAI())
|
|
149
|
+
```
|
|
150
|
+
|
|
151
|
+
**Parameters:**
|
|
152
|
+
- `logger: Logger` -- A DeepTracer logger instance.
|
|
153
|
+
- `client: T` -- An OpenAI client instance (`new OpenAI()`). The client is mutated in-place and also returned.
|
|
154
|
+
|
|
155
|
+
**Returns:** `T` -- The same client instance, with `chat.completions.create` wrapped.
|
|
156
|
+
|
|
157
|
+
**Non-streaming usage:**
|
|
158
|
+
|
|
159
|
+
```ts
|
|
160
|
+
const response = await openai.chat.completions.create({
|
|
161
|
+
model: "gpt-4o",
|
|
162
|
+
messages: [
|
|
163
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
164
|
+
{ role: "user", content: "Hello!" },
|
|
165
|
+
],
|
|
166
|
+
})
|
|
167
|
+
// Tracked: model, provider ("openai"), input/output tokens, latency
|
|
168
|
+
```
|
|
169
|
+
|
|
170
|
+
**Streaming usage:**
|
|
171
|
+
|
|
172
|
+
```ts
|
|
173
|
+
const stream = await openai.chat.completions.create({
|
|
174
|
+
model: "gpt-4o",
|
|
175
|
+
messages: [{ role: "user", content: "Tell me a joke." }],
|
|
176
|
+
stream: true,
|
|
177
|
+
})
|
|
178
|
+
|
|
179
|
+
for await (const chunk of stream) {
|
|
180
|
+
process.stdout.write(chunk.choices[0]?.delta?.content || "")
|
|
181
|
+
}
|
|
182
|
+
// Usage is tracked after the stream is fully consumed
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
**What gets intercepted:**
|
|
186
|
+
- `client.chat.completions.create()` -- both `stream: false` (default) and `stream: true`.
|
|
187
|
+
- If the client does not have `chat.completions.create`, the client is returned unmodified.
|
|
188
|
+
|
|
189
|
+
**How streaming tracking works:**
|
|
190
|
+
The wrapper intercepts the async iterator on the stream. As chunks arrive, it watches for a `chunk.usage` field (sent by OpenAI in the final chunk when `stream_options: { include_usage: true }` is set). After the iterator is exhausted, usage is reported to DeepTracer. The operation name for streaming calls is `"chat.completions.create (stream)"`.
|
|
191
|
+
|
|
192
|
+
---
|
|
193
|
+
|
|
194
|
+
### wrapAnthropic(logger, client)
|
|
195
|
+
|
|
196
|
+
Wrap an Anthropic client instance with automatic LLM usage tracking. Intercepts both `messages.create()` and `messages.stream()`.
|
|
197
|
+
|
|
198
|
+
```ts
|
|
199
|
+
import { wrapAnthropic } from "@deeptracer/ai"
|
|
200
|
+
import Anthropic from "@anthropic-ai/sdk"
|
|
201
|
+
|
|
202
|
+
const anthropic = wrapAnthropic(logger, new Anthropic())
|
|
203
|
+
```
|
|
204
|
+
|
|
205
|
+
**Parameters:**
|
|
206
|
+
- `logger: Logger` -- A DeepTracer logger instance.
|
|
207
|
+
- `client: T` -- An Anthropic client instance (`new Anthropic()`). The client is mutated in-place and also returned.
|
|
208
|
+
|
|
209
|
+
**Returns:** `T` -- The same client instance, with `messages.create` and `messages.stream` wrapped.
|
|
210
|
+
|
|
211
|
+
**Non-streaming usage with `messages.create()`:**
|
|
212
|
+
|
|
213
|
+
```ts
|
|
214
|
+
const message = await anthropic.messages.create({
|
|
215
|
+
model: "claude-sonnet-4-20250514",
|
|
216
|
+
max_tokens: 1024,
|
|
217
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
218
|
+
})
|
|
219
|
+
// Tracked: model, provider ("anthropic"), input/output tokens, latency
|
|
220
|
+
```
|
|
221
|
+
|
|
222
|
+
**Streaming with `messages.create({ stream: true })`:**
|
|
223
|
+
|
|
224
|
+
```ts
|
|
225
|
+
const stream = await anthropic.messages.create({
|
|
226
|
+
model: "claude-sonnet-4-20250514",
|
|
227
|
+
max_tokens: 1024,
|
|
228
|
+
messages: [{ role: "user", content: "Write a haiku." }],
|
|
229
|
+
stream: true,
|
|
230
|
+
})
|
|
231
|
+
// If the stream has a finalMessage() method, usage is tracked when that resolves.
|
|
232
|
+
// Otherwise, usage is reported immediately with zero tokens.
|
|
233
|
+
```
|
|
234
|
+
|
|
235
|
+
**Streaming with `messages.stream()`:**
|
|
236
|
+
|
|
237
|
+
```ts
|
|
238
|
+
const stream = anthropic.messages.stream({
|
|
239
|
+
model: "claude-sonnet-4-20250514",
|
|
240
|
+
max_tokens: 1024,
|
|
241
|
+
messages: [{ role: "user", content: "Tell me a story." }],
|
|
242
|
+
})
|
|
243
|
+
|
|
244
|
+
stream.on("text", (text) => process.stdout.write(text))
|
|
245
|
+
|
|
246
|
+
const finalMessage = await stream.finalMessage()
|
|
247
|
+
// Usage is tracked when finalMessage() resolves or when the "finalMessage" event fires.
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
**What gets intercepted:**
|
|
251
|
+
|
|
252
|
+
| Method | Operation Name | Tracking |
|
|
253
|
+
|--------|---------------|----------|
|
|
254
|
+
| `messages.create()` (non-streaming) | `"messages.create"` | After response. Reads `result.usage.input_tokens` / `output_tokens`. |
|
|
255
|
+
| `messages.create({ stream: true })` | `"messages.create (stream)"` | Via `stream.finalMessage()` if available. |
|
|
256
|
+
| `messages.stream()` | `"messages.stream"` | Via `stream.finalMessage()` or `"finalMessage"` event. De-duplicated. |
|
|
257
|
+
|
|
258
|
+
## What Gets Tracked
|
|
259
|
+
|
|
260
|
+
Every wrapped LLM call sends the following data to DeepTracer via `logger.llmUsage()`:
|
|
261
|
+
|
|
262
|
+
| Field | Description | Source |
|
|
263
|
+
|-------|-------------|--------|
|
|
264
|
+
| `model` | Model identifier (e.g., `"gpt-4o"`, `"claude-sonnet-4-20250514"`) | Response or params |
|
|
265
|
+
| `provider` | Provider name (e.g., `"openai"`, `"anthropic"`, `"google"`) | Params or auto-detected from model ID |
|
|
266
|
+
| `operation` | Operation name (e.g., `"generateText"`, `"chat.completions.create"`) | Wrapper |
|
|
267
|
+
| `inputTokens` | Number of input/prompt tokens | Response usage |
|
|
268
|
+
| `outputTokens` | Number of output/completion tokens | Response usage |
|
|
269
|
+
| `latencyMs` | Wall-clock time in milliseconds | Measured by wrapper |
|
|
270
|
+
|
|
271
|
+
Each call also emits an `info`-level log entry for visibility:
|
|
272
|
+
|
|
273
|
+
```
|
|
274
|
+
LLM call: gpt-4o (generateText) { llm_usage: { model: "gpt-4o", provider: "openai", ... } }
|
|
275
|
+
```
|
|
276
|
+
|
|
277
|
+
## Streaming Support
|
|
278
|
+
|
|
279
|
+
All three wrappers support streaming. The approach varies by SDK:
|
|
280
|
+
|
|
281
|
+
| SDK | Streaming Mechanism | When Usage Is Reported |
|
|
282
|
+
|-----|---------------------|----------------------|
|
|
283
|
+
| **Vercel AI SDK** (`streamText`/`streamObject`) | Awaits `result.usage` promise | After stream completes |
|
|
284
|
+
| **OpenAI** (`stream: true`) | Intercepts async iterator | After iterator is exhausted |
|
|
285
|
+
| **Anthropic** (`stream: true` / `.stream()`) | Hooks `finalMessage()` or `"finalMessage"` event | When final message is available |
|
|
286
|
+
|
|
287
|
+
Streaming wrappers are non-blocking -- they do not interfere with the stream's output or timing. Usage data is reported asynchronously after the stream finishes.
|
|
288
|
+
|
|
289
|
+
## Full Examples
|
|
290
|
+
|
|
291
|
+
### Vercel AI SDK with Multiple Providers
|
|
292
|
+
|
|
293
|
+
```ts
|
|
294
|
+
import { init } from "@deeptracer/node"
|
|
295
|
+
import { wrapVercelAI } from "@deeptracer/ai"
|
|
296
|
+
import { generateText, streamText } from "ai"
|
|
297
|
+
import { openai } from "@ai-sdk/openai"
|
|
298
|
+
import { anthropic } from "@ai-sdk/anthropic"
|
|
299
|
+
|
|
300
|
+
const logger = init({
|
|
301
|
+
product: "my-app",
|
|
302
|
+
service: "ai-service",
|
|
303
|
+
environment: "production",
|
|
304
|
+
endpoint: process.env.DEEPTRACER_ENDPOINT!,
|
|
305
|
+
apiKey: process.env.DEEPTRACER_API_KEY!,
|
|
306
|
+
})
|
|
307
|
+
|
|
308
|
+
const ai = wrapVercelAI(logger, { generateText, streamText })
|
|
309
|
+
|
|
310
|
+
// OpenAI via Vercel AI SDK
|
|
311
|
+
const { text: summary } = await ai.generateText({
|
|
312
|
+
model: openai("gpt-4o"),
|
|
313
|
+
prompt: "Summarize the history of computing.",
|
|
314
|
+
})
|
|
315
|
+
|
|
316
|
+
// Anthropic via Vercel AI SDK
|
|
317
|
+
const { text: analysis } = await ai.generateText({
|
|
318
|
+
model: anthropic("claude-sonnet-4-20250514"),
|
|
319
|
+
prompt: "Analyze current tech trends.",
|
|
320
|
+
})
|
|
321
|
+
|
|
322
|
+
// Streaming
|
|
323
|
+
const result = ai.streamText({
|
|
324
|
+
model: openai("gpt-4o-mini"),
|
|
325
|
+
prompt: "Write a short poem about APIs.",
|
|
326
|
+
})
|
|
327
|
+
|
|
328
|
+
for await (const chunk of result.textStream) {
|
|
329
|
+
process.stdout.write(chunk)
|
|
330
|
+
}
|
|
331
|
+
```
|
|
332
|
+
|
|
333
|
+
### OpenAI Direct Client
|
|
334
|
+
|
|
335
|
+
```ts
|
|
336
|
+
import { init } from "@deeptracer/node"
|
|
337
|
+
import { wrapOpenAI } from "@deeptracer/ai"
|
|
338
|
+
import OpenAI from "openai"
|
|
339
|
+
|
|
340
|
+
const logger = init({
|
|
341
|
+
product: "my-app",
|
|
342
|
+
service: "chatbot",
|
|
343
|
+
environment: "production",
|
|
344
|
+
endpoint: process.env.DEEPTRACER_ENDPOINT!,
|
|
345
|
+
apiKey: process.env.DEEPTRACER_API_KEY!,
|
|
346
|
+
})
|
|
347
|
+
|
|
348
|
+
const openai = wrapOpenAI(logger, new OpenAI({
|
|
349
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
350
|
+
}))
|
|
351
|
+
|
|
352
|
+
// Non-streaming
|
|
353
|
+
const response = await openai.chat.completions.create({
|
|
354
|
+
model: "gpt-4o",
|
|
355
|
+
messages: [
|
|
356
|
+
{ role: "system", content: "You are a helpful assistant." },
|
|
357
|
+
{ role: "user", content: "What is the capital of France?" },
|
|
358
|
+
],
|
|
359
|
+
})
|
|
360
|
+
console.log(response.choices[0].message.content)
|
|
361
|
+
|
|
362
|
+
// Streaming
|
|
363
|
+
const stream = await openai.chat.completions.create({
|
|
364
|
+
model: "gpt-4o",
|
|
365
|
+
messages: [{ role: "user", content: "Count to 10." }],
|
|
366
|
+
stream: true,
|
|
367
|
+
stream_options: { include_usage: true }, // recommended for usage tracking
|
|
368
|
+
})
|
|
369
|
+
|
|
370
|
+
for await (const chunk of stream) {
|
|
371
|
+
const content = chunk.choices[0]?.delta?.content
|
|
372
|
+
if (content) process.stdout.write(content)
|
|
373
|
+
}
|
|
374
|
+
```
|
|
375
|
+
|
|
376
|
+
### Anthropic Direct Client
|
|
377
|
+
|
|
378
|
+
```ts
|
|
379
|
+
import { init } from "@deeptracer/node"
|
|
380
|
+
import { wrapAnthropic } from "@deeptracer/ai"
|
|
381
|
+
import Anthropic from "@anthropic-ai/sdk"
|
|
382
|
+
|
|
383
|
+
const logger = init({
|
|
384
|
+
product: "my-app",
|
|
385
|
+
service: "chatbot",
|
|
386
|
+
environment: "production",
|
|
387
|
+
endpoint: process.env.DEEPTRACER_ENDPOINT!,
|
|
388
|
+
apiKey: process.env.DEEPTRACER_API_KEY!,
|
|
389
|
+
})
|
|
390
|
+
|
|
391
|
+
const anthropic = wrapAnthropic(logger, new Anthropic({
|
|
392
|
+
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
393
|
+
}))
|
|
394
|
+
|
|
395
|
+
// Non-streaming
|
|
396
|
+
const message = await anthropic.messages.create({
|
|
397
|
+
model: "claude-sonnet-4-20250514",
|
|
398
|
+
max_tokens: 1024,
|
|
399
|
+
messages: [{ role: "user", content: "Explain recursion simply." }],
|
|
400
|
+
})
|
|
401
|
+
console.log(message.content[0].text)
|
|
402
|
+
|
|
403
|
+
// Streaming with messages.stream()
|
|
404
|
+
const stream = anthropic.messages.stream({
|
|
405
|
+
model: "claude-sonnet-4-20250514",
|
|
406
|
+
max_tokens: 1024,
|
|
407
|
+
messages: [{ role: "user", content: "Write a limerick about TypeScript." }],
|
|
408
|
+
})
|
|
409
|
+
|
|
410
|
+
stream.on("text", (text) => process.stdout.write(text))
|
|
411
|
+
|
|
412
|
+
const finalMessage = await stream.finalMessage()
|
|
413
|
+
console.log("\n\nTokens used:", finalMessage.usage)
|
|
414
|
+
```
|
|
415
|
+
|
|
416
|
+
### Multiple Providers in One App
|
|
417
|
+
|
|
418
|
+
```ts
|
|
419
|
+
import { init } from "@deeptracer/node"
|
|
420
|
+
import { wrapVercelAI, wrapOpenAI, wrapAnthropic } from "@deeptracer/ai"
|
|
421
|
+
import { generateText } from "ai"
|
|
422
|
+
import { openai as aiSdkOpenai } from "@ai-sdk/openai"
|
|
423
|
+
import OpenAI from "openai"
|
|
424
|
+
import Anthropic from "@anthropic-ai/sdk"
|
|
425
|
+
|
|
426
|
+
const logger = init({
|
|
427
|
+
product: "multi-llm-app",
|
|
428
|
+
service: "orchestrator",
|
|
429
|
+
environment: "production",
|
|
430
|
+
endpoint: process.env.DEEPTRACER_ENDPOINT!,
|
|
431
|
+
apiKey: process.env.DEEPTRACER_API_KEY!,
|
|
432
|
+
})
|
|
433
|
+
|
|
434
|
+
// Wrap all three SDKs
|
|
435
|
+
const ai = wrapVercelAI(logger, { generateText })
|
|
436
|
+
const openai = wrapOpenAI(logger, new OpenAI())
|
|
437
|
+
const anthropic = wrapAnthropic(logger, new Anthropic())
|
|
438
|
+
|
|
439
|
+
// All calls are automatically tracked in DeepTracer
|
|
440
|
+
// with model, provider, token counts, and latency
|
|
441
|
+
|
|
442
|
+
await ai.generateText({ model: aiSdkOpenai("gpt-4o-mini"), prompt: "Hello via Vercel AI" })
|
|
443
|
+
|
|
444
|
+
await openai.chat.completions.create({
|
|
445
|
+
model: "gpt-4o",
|
|
446
|
+
messages: [{ role: "user", content: "Hello via OpenAI" }],
|
|
447
|
+
})
|
|
448
|
+
|
|
449
|
+
await anthropic.messages.create({
|
|
450
|
+
model: "claude-sonnet-4-20250514",
|
|
451
|
+
max_tokens: 256,
|
|
452
|
+
messages: [{ role: "user", content: "Hello via Anthropic" }],
|
|
453
|
+
})
|
|
454
|
+
```
|
|
455
|
+
|
|
456
|
+
## Provider Detection
|
|
457
|
+
|
|
458
|
+
For `wrapVercelAI`, the provider name is determined in this order:
|
|
459
|
+
|
|
460
|
+
1. `params.model.provider` (set by the Vercel AI SDK provider packages)
|
|
461
|
+
2. Auto-detection from the model ID string:
|
|
462
|
+
|
|
463
|
+
| Model ID prefix | Detected provider |
|
|
464
|
+
|-----------------|-------------------|
|
|
465
|
+
| `gpt-`, `o1`, `o3`, `o4` | `"openai"` |
|
|
466
|
+
| `claude-` | `"anthropic"` |
|
|
467
|
+
| `gemini-` | `"google"` |
|
|
468
|
+
| `mistral`, `mixtral` | `"mistral"` |
|
|
469
|
+
| `llama` | `"meta"` |
|
|
470
|
+
|
|
471
|
+
For `wrapOpenAI` and `wrapAnthropic`, the provider is hardcoded to `"openai"` and `"anthropic"` respectively.
|
|
472
|
+
|
|
473
|
+
## Monorepo
|
|
474
|
+
|
|
475
|
+
This package is part of the [DeepTracer JavaScript SDK](https://github.com/getdeeptracer/deeptracer-js) monorepo:
|
|
476
|
+
|
|
477
|
+
| Package | Description |
|
|
478
|
+
|---------|-------------|
|
|
479
|
+
| [`@deeptracer/core`](https://github.com/getdeeptracer/deeptracer-js/tree/main/packages/core) | Zero-dependency shared core |
|
|
480
|
+
| [`@deeptracer/node`](https://github.com/getdeeptracer/deeptracer-js/tree/main/packages/node) | Node.js/Bun SDK -- global errors, console capture, Hono & Express middleware |
|
|
481
|
+
| **`@deeptracer/ai`** | AI SDK wrappers (this package) |
|
|
482
|
+
| [`@deeptracer/browser`](https://github.com/getdeeptracer/deeptracer-js/tree/main/packages/browser) | Browser SDK (preview) |
|
|
483
|
+
| [`@deeptracer/react`](https://github.com/getdeeptracer/deeptracer-js/tree/main/packages/react) | React integration (coming soon) |
|
|
484
|
+
| [`@deeptracer/nextjs`](https://github.com/getdeeptracer/deeptracer-js/tree/main/packages/nextjs) | Next.js integration (coming soon) |
|
|
485
|
+
|
|
486
|
+
## License
|
|
487
|
+
|
|
488
|
+
MIT
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
wrapAnthropic: () => wrapAnthropic,
|
|
24
|
+
wrapOpenAI: () => wrapOpenAI,
|
|
25
|
+
wrapVercelAI: () => wrapVercelAI
|
|
26
|
+
});
|
|
27
|
+
module.exports = __toCommonJS(index_exports);
|
|
28
|
+
|
|
29
|
+
// src/vercel-ai.ts
|
|
30
|
+
function wrapVercelAI(logger, fns) {
|
|
31
|
+
const wrapped = {};
|
|
32
|
+
for (const [name, fn] of Object.entries(fns)) {
|
|
33
|
+
if (typeof fn !== "function") {
|
|
34
|
+
wrapped[name] = fn;
|
|
35
|
+
continue;
|
|
36
|
+
}
|
|
37
|
+
if (name === "generateText" || name === "generateObject") {
|
|
38
|
+
wrapped[name] = wrapVercelGenerate(logger, fn, name);
|
|
39
|
+
} else if (name === "streamText" || name === "streamObject") {
|
|
40
|
+
wrapped[name] = wrapVercelStream(logger, fn, name);
|
|
41
|
+
} else {
|
|
42
|
+
wrapped[name] = fn;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
return wrapped;
|
|
46
|
+
}
|
|
47
|
+
function wrapVercelGenerate(logger, fn, operation) {
|
|
48
|
+
return async (params, ...rest) => {
|
|
49
|
+
const startMs = Date.now();
|
|
50
|
+
const result = await fn(params, ...rest);
|
|
51
|
+
const latencyMs = Date.now() - startMs;
|
|
52
|
+
const model = result?.response?.modelId || params?.model?.modelId || "unknown";
|
|
53
|
+
const provider = params?.model?.provider || extractProviderFromModelId(model) || "unknown";
|
|
54
|
+
logger.llmUsage({
|
|
55
|
+
model,
|
|
56
|
+
provider,
|
|
57
|
+
operation,
|
|
58
|
+
inputTokens: result?.usage?.promptTokens || 0,
|
|
59
|
+
outputTokens: result?.usage?.completionTokens || 0,
|
|
60
|
+
latencyMs
|
|
61
|
+
});
|
|
62
|
+
return result;
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
function wrapVercelStream(logger, fn, operation) {
|
|
66
|
+
return (params, ...rest) => {
|
|
67
|
+
const startMs = Date.now();
|
|
68
|
+
const result = fn(params, ...rest);
|
|
69
|
+
const model = params?.model?.modelId || "unknown";
|
|
70
|
+
const provider = params?.model?.provider || extractProviderFromModelId(model) || "unknown";
|
|
71
|
+
if (result?.usage && typeof result.usage.then === "function") {
|
|
72
|
+
result.usage.then((usage) => {
|
|
73
|
+
const latencyMs = Date.now() - startMs;
|
|
74
|
+
logger.llmUsage({
|
|
75
|
+
model,
|
|
76
|
+
provider,
|
|
77
|
+
operation,
|
|
78
|
+
inputTokens: usage?.promptTokens || 0,
|
|
79
|
+
outputTokens: usage?.completionTokens || 0,
|
|
80
|
+
latencyMs
|
|
81
|
+
});
|
|
82
|
+
}).catch(() => {
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
return result;
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
function extractProviderFromModelId(modelId) {
|
|
89
|
+
if (!modelId || modelId === "unknown") return void 0;
|
|
90
|
+
if (modelId.startsWith("gpt-") || modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4")) return "openai";
|
|
91
|
+
if (modelId.startsWith("claude-")) return "anthropic";
|
|
92
|
+
if (modelId.startsWith("gemini-")) return "google";
|
|
93
|
+
if (modelId.startsWith("mistral") || modelId.startsWith("mixtral")) return "mistral";
|
|
94
|
+
if (modelId.startsWith("llama")) return "meta";
|
|
95
|
+
return void 0;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// src/openai.ts
|
|
99
|
+
function wrapOpenAI(logger, client) {
|
|
100
|
+
const originalCreate = client.chat?.completions?.create;
|
|
101
|
+
if (!originalCreate) return client;
|
|
102
|
+
const boundCreate = originalCreate.bind(client.chat.completions);
|
|
103
|
+
client.chat.completions.create = async (params, ...rest) => {
|
|
104
|
+
const startMs = Date.now();
|
|
105
|
+
if (!params.stream) {
|
|
106
|
+
const result = await boundCreate(params, ...rest);
|
|
107
|
+
const latencyMs = Date.now() - startMs;
|
|
108
|
+
logger.llmUsage({
|
|
109
|
+
model: result.model || params.model || "unknown",
|
|
110
|
+
provider: "openai",
|
|
111
|
+
operation: "chat.completions.create",
|
|
112
|
+
inputTokens: result.usage?.prompt_tokens || 0,
|
|
113
|
+
outputTokens: result.usage?.completion_tokens || 0,
|
|
114
|
+
latencyMs
|
|
115
|
+
});
|
|
116
|
+
return result;
|
|
117
|
+
}
|
|
118
|
+
const stream = await boundCreate(params, ...rest);
|
|
119
|
+
const originalIterator = stream[Symbol.asyncIterator]?.bind(stream);
|
|
120
|
+
if (!originalIterator) return stream;
|
|
121
|
+
let usageData = null;
|
|
122
|
+
stream[Symbol.asyncIterator] = async function* () {
|
|
123
|
+
for await (const chunk of originalIterator()) {
|
|
124
|
+
if (chunk.usage) {
|
|
125
|
+
usageData = chunk.usage;
|
|
126
|
+
}
|
|
127
|
+
yield chunk;
|
|
128
|
+
}
|
|
129
|
+
const latencyMs = Date.now() - startMs;
|
|
130
|
+
logger.llmUsage({
|
|
131
|
+
model: params.model || "unknown",
|
|
132
|
+
provider: "openai",
|
|
133
|
+
operation: "chat.completions.create (stream)",
|
|
134
|
+
inputTokens: usageData?.prompt_tokens || 0,
|
|
135
|
+
outputTokens: usageData?.completion_tokens || 0,
|
|
136
|
+
latencyMs
|
|
137
|
+
});
|
|
138
|
+
};
|
|
139
|
+
return stream;
|
|
140
|
+
};
|
|
141
|
+
return client;
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
// src/anthropic.ts
|
|
145
|
+
function wrapAnthropic(logger, client) {
|
|
146
|
+
const originalCreate = client.messages?.create;
|
|
147
|
+
if (originalCreate) {
|
|
148
|
+
const boundCreate = originalCreate.bind(client.messages);
|
|
149
|
+
client.messages.create = async (params, ...rest) => {
|
|
150
|
+
const startMs = Date.now();
|
|
151
|
+
if (!params.stream) {
|
|
152
|
+
const result = await boundCreate(params, ...rest);
|
|
153
|
+
const latencyMs2 = Date.now() - startMs;
|
|
154
|
+
logger.llmUsage({
|
|
155
|
+
model: result.model || params.model || "unknown",
|
|
156
|
+
provider: "anthropic",
|
|
157
|
+
operation: "messages.create",
|
|
158
|
+
inputTokens: result.usage?.input_tokens || 0,
|
|
159
|
+
outputTokens: result.usage?.output_tokens || 0,
|
|
160
|
+
latencyMs: latencyMs2
|
|
161
|
+
});
|
|
162
|
+
return result;
|
|
163
|
+
}
|
|
164
|
+
const stream = await boundCreate(params, ...rest);
|
|
165
|
+
const latencyMs = Date.now() - startMs;
|
|
166
|
+
if (stream && typeof stream.finalMessage === "function") {
|
|
167
|
+
stream.finalMessage().then((message) => {
|
|
168
|
+
const totalLatencyMs = Date.now() - startMs;
|
|
169
|
+
logger.llmUsage({
|
|
170
|
+
model: message.model || params.model || "unknown",
|
|
171
|
+
provider: "anthropic",
|
|
172
|
+
operation: "messages.create (stream)",
|
|
173
|
+
inputTokens: message.usage?.input_tokens || 0,
|
|
174
|
+
outputTokens: message.usage?.output_tokens || 0,
|
|
175
|
+
latencyMs: totalLatencyMs
|
|
176
|
+
});
|
|
177
|
+
}).catch(() => {
|
|
178
|
+
});
|
|
179
|
+
} else {
|
|
180
|
+
logger.llmUsage({
|
|
181
|
+
model: params.model || "unknown",
|
|
182
|
+
provider: "anthropic",
|
|
183
|
+
operation: "messages.create (stream)",
|
|
184
|
+
inputTokens: 0,
|
|
185
|
+
outputTokens: 0,
|
|
186
|
+
latencyMs
|
|
187
|
+
});
|
|
188
|
+
}
|
|
189
|
+
return stream;
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
const originalStream = client.messages?.stream;
|
|
193
|
+
if (originalStream) {
|
|
194
|
+
const boundStream = originalStream.bind(client.messages);
|
|
195
|
+
client.messages.stream = (params, ...rest) => {
|
|
196
|
+
const startMs = Date.now();
|
|
197
|
+
const stream = boundStream(params, ...rest);
|
|
198
|
+
if (stream && typeof stream.finalMessage === "function") {
|
|
199
|
+
const originalFinalMessage = stream.finalMessage.bind(stream);
|
|
200
|
+
let tracked = false;
|
|
201
|
+
stream.finalMessage = async () => {
|
|
202
|
+
const message = await originalFinalMessage();
|
|
203
|
+
if (!tracked) {
|
|
204
|
+
tracked = true;
|
|
205
|
+
const latencyMs = Date.now() - startMs;
|
|
206
|
+
logger.llmUsage({
|
|
207
|
+
model: message.model || params.model || "unknown",
|
|
208
|
+
provider: "anthropic",
|
|
209
|
+
operation: "messages.stream",
|
|
210
|
+
inputTokens: message.usage?.input_tokens || 0,
|
|
211
|
+
outputTokens: message.usage?.output_tokens || 0,
|
|
212
|
+
latencyMs
|
|
213
|
+
});
|
|
214
|
+
}
|
|
215
|
+
return message;
|
|
216
|
+
};
|
|
217
|
+
if (typeof stream.on === "function") {
|
|
218
|
+
stream.on("finalMessage", (message) => {
|
|
219
|
+
if (!tracked) {
|
|
220
|
+
tracked = true;
|
|
221
|
+
const latencyMs = Date.now() - startMs;
|
|
222
|
+
logger.llmUsage({
|
|
223
|
+
model: message.model || params.model || "unknown",
|
|
224
|
+
provider: "anthropic",
|
|
225
|
+
operation: "messages.stream",
|
|
226
|
+
inputTokens: message.usage?.input_tokens || 0,
|
|
227
|
+
outputTokens: message.usage?.output_tokens || 0,
|
|
228
|
+
latencyMs
|
|
229
|
+
});
|
|
230
|
+
}
|
|
231
|
+
});
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
return stream;
|
|
235
|
+
};
|
|
236
|
+
}
|
|
237
|
+
return client;
|
|
238
|
+
}
|
|
239
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
240
|
+
0 && (module.exports = {
|
|
241
|
+
wrapAnthropic,
|
|
242
|
+
wrapOpenAI,
|
|
243
|
+
wrapVercelAI
|
|
244
|
+
});
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import { Logger } from '@deeptracer/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wrap Vercel AI SDK functions with automatic LLM usage tracking.
|
|
5
|
+
* Works with generateText, streamText, generateObject, streamObject.
|
|
6
|
+
*
|
|
7
|
+
* @param logger - DeepTracer logger instance
|
|
8
|
+
* @param fns - Object containing Vercel AI SDK functions to wrap
|
|
9
|
+
* @returns The same functions, wrapped with automatic LLM tracking
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* ```ts
|
|
13
|
+
* import { createLogger } from "@deeptracer/node"
|
|
14
|
+
* import { wrapVercelAI } from "@deeptracer/ai"
|
|
15
|
+
* import { generateText, streamText } from "ai"
|
|
16
|
+
* import { openai } from "@ai-sdk/openai"
|
|
17
|
+
*
|
|
18
|
+
* const logger = createLogger({ ... })
|
|
19
|
+
* const ai = wrapVercelAI(logger, { generateText, streamText })
|
|
20
|
+
*
|
|
21
|
+
* const { text } = await ai.generateText({
|
|
22
|
+
* model: openai("gpt-4o"),
|
|
23
|
+
* prompt: "Hello",
|
|
24
|
+
* })
|
|
25
|
+
* ```
|
|
26
|
+
*/
|
|
27
|
+
declare function wrapVercelAI<T extends Record<string, any>>(logger: Logger, fns: T): T;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Wrap an OpenAI client instance with automatic LLM usage tracking.
|
|
31
|
+
* Intercepts chat.completions.create() for both streaming and non-streaming.
|
|
32
|
+
*
|
|
33
|
+
* @param logger - DeepTracer logger instance
|
|
34
|
+
* @param client - OpenAI client instance (new OpenAI())
|
|
35
|
+
* @returns The same client, with tracking added
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* ```ts
|
|
39
|
+
* import { createLogger } from "@deeptracer/node"
|
|
40
|
+
* import { wrapOpenAI } from "@deeptracer/ai"
|
|
41
|
+
* import OpenAI from "openai"
|
|
42
|
+
*
|
|
43
|
+
* const logger = createLogger({ ... })
|
|
44
|
+
* const openai = wrapOpenAI(logger, new OpenAI())
|
|
45
|
+
*
|
|
46
|
+
* const response = await openai.chat.completions.create({
|
|
47
|
+
* model: "gpt-4o",
|
|
48
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
49
|
+
* })
|
|
50
|
+
* ```
|
|
51
|
+
*/
|
|
52
|
+
declare function wrapOpenAI<T extends Record<string, any>>(logger: Logger, client: T): T;
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Wrap an Anthropic client instance with automatic LLM usage tracking.
|
|
56
|
+
* Intercepts messages.create() and messages.stream() for both streaming and non-streaming.
|
|
57
|
+
*
|
|
58
|
+
* @param logger - DeepTracer logger instance
|
|
59
|
+
* @param client - Anthropic client instance (new Anthropic())
|
|
60
|
+
* @returns The same client, with tracking added
|
|
61
|
+
*
|
|
62
|
+
* @example
|
|
63
|
+
* ```ts
|
|
64
|
+
* import { createLogger } from "@deeptracer/node"
|
|
65
|
+
* import { wrapAnthropic } from "@deeptracer/ai"
|
|
66
|
+
* import Anthropic from "@anthropic-ai/sdk"
|
|
67
|
+
*
|
|
68
|
+
* const logger = createLogger({ ... })
|
|
69
|
+
* const anthropic = wrapAnthropic(logger, new Anthropic())
|
|
70
|
+
*
|
|
71
|
+
* const message = await anthropic.messages.create({
|
|
72
|
+
* model: "claude-sonnet-4-20250514",
|
|
73
|
+
* max_tokens: 1024,
|
|
74
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
75
|
+
* })
|
|
76
|
+
* ```
|
|
77
|
+
*/
|
|
78
|
+
declare function wrapAnthropic<T extends Record<string, any>>(logger: Logger, client: T): T;
|
|
79
|
+
|
|
80
|
+
export { wrapAnthropic, wrapOpenAI, wrapVercelAI };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import { Logger } from '@deeptracer/core';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Wrap Vercel AI SDK functions with automatic LLM usage tracking.
|
|
5
|
+
* Works with generateText, streamText, generateObject, streamObject.
|
|
6
|
+
*
|
|
7
|
+
* @param logger - DeepTracer logger instance
|
|
8
|
+
* @param fns - Object containing Vercel AI SDK functions to wrap
|
|
9
|
+
* @returns The same functions, wrapped with automatic LLM tracking
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* ```ts
|
|
13
|
+
* import { createLogger } from "@deeptracer/node"
|
|
14
|
+
* import { wrapVercelAI } from "@deeptracer/ai"
|
|
15
|
+
* import { generateText, streamText } from "ai"
|
|
16
|
+
* import { openai } from "@ai-sdk/openai"
|
|
17
|
+
*
|
|
18
|
+
* const logger = createLogger({ ... })
|
|
19
|
+
* const ai = wrapVercelAI(logger, { generateText, streamText })
|
|
20
|
+
*
|
|
21
|
+
* const { text } = await ai.generateText({
|
|
22
|
+
* model: openai("gpt-4o"),
|
|
23
|
+
* prompt: "Hello",
|
|
24
|
+
* })
|
|
25
|
+
* ```
|
|
26
|
+
*/
|
|
27
|
+
declare function wrapVercelAI<T extends Record<string, any>>(logger: Logger, fns: T): T;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Wrap an OpenAI client instance with automatic LLM usage tracking.
|
|
31
|
+
* Intercepts chat.completions.create() for both streaming and non-streaming.
|
|
32
|
+
*
|
|
33
|
+
* @param logger - DeepTracer logger instance
|
|
34
|
+
* @param client - OpenAI client instance (new OpenAI())
|
|
35
|
+
* @returns The same client, with tracking added
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* ```ts
|
|
39
|
+
* import { createLogger } from "@deeptracer/node"
|
|
40
|
+
* import { wrapOpenAI } from "@deeptracer/ai"
|
|
41
|
+
* import OpenAI from "openai"
|
|
42
|
+
*
|
|
43
|
+
* const logger = createLogger({ ... })
|
|
44
|
+
* const openai = wrapOpenAI(logger, new OpenAI())
|
|
45
|
+
*
|
|
46
|
+
* const response = await openai.chat.completions.create({
|
|
47
|
+
* model: "gpt-4o",
|
|
48
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
49
|
+
* })
|
|
50
|
+
* ```
|
|
51
|
+
*/
|
|
52
|
+
declare function wrapOpenAI<T extends Record<string, any>>(logger: Logger, client: T): T;
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Wrap an Anthropic client instance with automatic LLM usage tracking.
|
|
56
|
+
* Intercepts messages.create() and messages.stream() for both streaming and non-streaming.
|
|
57
|
+
*
|
|
58
|
+
* @param logger - DeepTracer logger instance
|
|
59
|
+
* @param client - Anthropic client instance (new Anthropic())
|
|
60
|
+
* @returns The same client, with tracking added
|
|
61
|
+
*
|
|
62
|
+
* @example
|
|
63
|
+
* ```ts
|
|
64
|
+
* import { createLogger } from "@deeptracer/node"
|
|
65
|
+
* import { wrapAnthropic } from "@deeptracer/ai"
|
|
66
|
+
* import Anthropic from "@anthropic-ai/sdk"
|
|
67
|
+
*
|
|
68
|
+
* const logger = createLogger({ ... })
|
|
69
|
+
* const anthropic = wrapAnthropic(logger, new Anthropic())
|
|
70
|
+
*
|
|
71
|
+
* const message = await anthropic.messages.create({
|
|
72
|
+
* model: "claude-sonnet-4-20250514",
|
|
73
|
+
* max_tokens: 1024,
|
|
74
|
+
* messages: [{ role: "user", content: "Hello!" }],
|
|
75
|
+
* })
|
|
76
|
+
* ```
|
|
77
|
+
*/
|
|
78
|
+
declare function wrapAnthropic<T extends Record<string, any>>(logger: Logger, client: T): T;
|
|
79
|
+
|
|
80
|
+
export { wrapAnthropic, wrapOpenAI, wrapVercelAI };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
// src/vercel-ai.ts
|
|
2
|
+
function wrapVercelAI(logger, fns) {
|
|
3
|
+
const wrapped = {};
|
|
4
|
+
for (const [name, fn] of Object.entries(fns)) {
|
|
5
|
+
if (typeof fn !== "function") {
|
|
6
|
+
wrapped[name] = fn;
|
|
7
|
+
continue;
|
|
8
|
+
}
|
|
9
|
+
if (name === "generateText" || name === "generateObject") {
|
|
10
|
+
wrapped[name] = wrapVercelGenerate(logger, fn, name);
|
|
11
|
+
} else if (name === "streamText" || name === "streamObject") {
|
|
12
|
+
wrapped[name] = wrapVercelStream(logger, fn, name);
|
|
13
|
+
} else {
|
|
14
|
+
wrapped[name] = fn;
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
return wrapped;
|
|
18
|
+
}
|
|
19
|
+
function wrapVercelGenerate(logger, fn, operation) {
|
|
20
|
+
return async (params, ...rest) => {
|
|
21
|
+
const startMs = Date.now();
|
|
22
|
+
const result = await fn(params, ...rest);
|
|
23
|
+
const latencyMs = Date.now() - startMs;
|
|
24
|
+
const model = result?.response?.modelId || params?.model?.modelId || "unknown";
|
|
25
|
+
const provider = params?.model?.provider || extractProviderFromModelId(model) || "unknown";
|
|
26
|
+
logger.llmUsage({
|
|
27
|
+
model,
|
|
28
|
+
provider,
|
|
29
|
+
operation,
|
|
30
|
+
inputTokens: result?.usage?.promptTokens || 0,
|
|
31
|
+
outputTokens: result?.usage?.completionTokens || 0,
|
|
32
|
+
latencyMs
|
|
33
|
+
});
|
|
34
|
+
return result;
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
function wrapVercelStream(logger, fn, operation) {
|
|
38
|
+
return (params, ...rest) => {
|
|
39
|
+
const startMs = Date.now();
|
|
40
|
+
const result = fn(params, ...rest);
|
|
41
|
+
const model = params?.model?.modelId || "unknown";
|
|
42
|
+
const provider = params?.model?.provider || extractProviderFromModelId(model) || "unknown";
|
|
43
|
+
if (result?.usage && typeof result.usage.then === "function") {
|
|
44
|
+
result.usage.then((usage) => {
|
|
45
|
+
const latencyMs = Date.now() - startMs;
|
|
46
|
+
logger.llmUsage({
|
|
47
|
+
model,
|
|
48
|
+
provider,
|
|
49
|
+
operation,
|
|
50
|
+
inputTokens: usage?.promptTokens || 0,
|
|
51
|
+
outputTokens: usage?.completionTokens || 0,
|
|
52
|
+
latencyMs
|
|
53
|
+
});
|
|
54
|
+
}).catch(() => {
|
|
55
|
+
});
|
|
56
|
+
}
|
|
57
|
+
return result;
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
function extractProviderFromModelId(modelId) {
|
|
61
|
+
if (!modelId || modelId === "unknown") return void 0;
|
|
62
|
+
if (modelId.startsWith("gpt-") || modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4")) return "openai";
|
|
63
|
+
if (modelId.startsWith("claude-")) return "anthropic";
|
|
64
|
+
if (modelId.startsWith("gemini-")) return "google";
|
|
65
|
+
if (modelId.startsWith("mistral") || modelId.startsWith("mixtral")) return "mistral";
|
|
66
|
+
if (modelId.startsWith("llama")) return "meta";
|
|
67
|
+
return void 0;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
// src/openai.ts
|
|
71
|
+
function wrapOpenAI(logger, client) {
|
|
72
|
+
const originalCreate = client.chat?.completions?.create;
|
|
73
|
+
if (!originalCreate) return client;
|
|
74
|
+
const boundCreate = originalCreate.bind(client.chat.completions);
|
|
75
|
+
client.chat.completions.create = async (params, ...rest) => {
|
|
76
|
+
const startMs = Date.now();
|
|
77
|
+
if (!params.stream) {
|
|
78
|
+
const result = await boundCreate(params, ...rest);
|
|
79
|
+
const latencyMs = Date.now() - startMs;
|
|
80
|
+
logger.llmUsage({
|
|
81
|
+
model: result.model || params.model || "unknown",
|
|
82
|
+
provider: "openai",
|
|
83
|
+
operation: "chat.completions.create",
|
|
84
|
+
inputTokens: result.usage?.prompt_tokens || 0,
|
|
85
|
+
outputTokens: result.usage?.completion_tokens || 0,
|
|
86
|
+
latencyMs
|
|
87
|
+
});
|
|
88
|
+
return result;
|
|
89
|
+
}
|
|
90
|
+
const stream = await boundCreate(params, ...rest);
|
|
91
|
+
const originalIterator = stream[Symbol.asyncIterator]?.bind(stream);
|
|
92
|
+
if (!originalIterator) return stream;
|
|
93
|
+
let usageData = null;
|
|
94
|
+
stream[Symbol.asyncIterator] = async function* () {
|
|
95
|
+
for await (const chunk of originalIterator()) {
|
|
96
|
+
if (chunk.usage) {
|
|
97
|
+
usageData = chunk.usage;
|
|
98
|
+
}
|
|
99
|
+
yield chunk;
|
|
100
|
+
}
|
|
101
|
+
const latencyMs = Date.now() - startMs;
|
|
102
|
+
logger.llmUsage({
|
|
103
|
+
model: params.model || "unknown",
|
|
104
|
+
provider: "openai",
|
|
105
|
+
operation: "chat.completions.create (stream)",
|
|
106
|
+
inputTokens: usageData?.prompt_tokens || 0,
|
|
107
|
+
outputTokens: usageData?.completion_tokens || 0,
|
|
108
|
+
latencyMs
|
|
109
|
+
});
|
|
110
|
+
};
|
|
111
|
+
return stream;
|
|
112
|
+
};
|
|
113
|
+
return client;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// src/anthropic.ts
|
|
117
|
+
function wrapAnthropic(logger, client) {
|
|
118
|
+
const originalCreate = client.messages?.create;
|
|
119
|
+
if (originalCreate) {
|
|
120
|
+
const boundCreate = originalCreate.bind(client.messages);
|
|
121
|
+
client.messages.create = async (params, ...rest) => {
|
|
122
|
+
const startMs = Date.now();
|
|
123
|
+
if (!params.stream) {
|
|
124
|
+
const result = await boundCreate(params, ...rest);
|
|
125
|
+
const latencyMs2 = Date.now() - startMs;
|
|
126
|
+
logger.llmUsage({
|
|
127
|
+
model: result.model || params.model || "unknown",
|
|
128
|
+
provider: "anthropic",
|
|
129
|
+
operation: "messages.create",
|
|
130
|
+
inputTokens: result.usage?.input_tokens || 0,
|
|
131
|
+
outputTokens: result.usage?.output_tokens || 0,
|
|
132
|
+
latencyMs: latencyMs2
|
|
133
|
+
});
|
|
134
|
+
return result;
|
|
135
|
+
}
|
|
136
|
+
const stream = await boundCreate(params, ...rest);
|
|
137
|
+
const latencyMs = Date.now() - startMs;
|
|
138
|
+
if (stream && typeof stream.finalMessage === "function") {
|
|
139
|
+
stream.finalMessage().then((message) => {
|
|
140
|
+
const totalLatencyMs = Date.now() - startMs;
|
|
141
|
+
logger.llmUsage({
|
|
142
|
+
model: message.model || params.model || "unknown",
|
|
143
|
+
provider: "anthropic",
|
|
144
|
+
operation: "messages.create (stream)",
|
|
145
|
+
inputTokens: message.usage?.input_tokens || 0,
|
|
146
|
+
outputTokens: message.usage?.output_tokens || 0,
|
|
147
|
+
latencyMs: totalLatencyMs
|
|
148
|
+
});
|
|
149
|
+
}).catch(() => {
|
|
150
|
+
});
|
|
151
|
+
} else {
|
|
152
|
+
logger.llmUsage({
|
|
153
|
+
model: params.model || "unknown",
|
|
154
|
+
provider: "anthropic",
|
|
155
|
+
operation: "messages.create (stream)",
|
|
156
|
+
inputTokens: 0,
|
|
157
|
+
outputTokens: 0,
|
|
158
|
+
latencyMs
|
|
159
|
+
});
|
|
160
|
+
}
|
|
161
|
+
return stream;
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
const originalStream = client.messages?.stream;
|
|
165
|
+
if (originalStream) {
|
|
166
|
+
const boundStream = originalStream.bind(client.messages);
|
|
167
|
+
client.messages.stream = (params, ...rest) => {
|
|
168
|
+
const startMs = Date.now();
|
|
169
|
+
const stream = boundStream(params, ...rest);
|
|
170
|
+
if (stream && typeof stream.finalMessage === "function") {
|
|
171
|
+
const originalFinalMessage = stream.finalMessage.bind(stream);
|
|
172
|
+
let tracked = false;
|
|
173
|
+
stream.finalMessage = async () => {
|
|
174
|
+
const message = await originalFinalMessage();
|
|
175
|
+
if (!tracked) {
|
|
176
|
+
tracked = true;
|
|
177
|
+
const latencyMs = Date.now() - startMs;
|
|
178
|
+
logger.llmUsage({
|
|
179
|
+
model: message.model || params.model || "unknown",
|
|
180
|
+
provider: "anthropic",
|
|
181
|
+
operation: "messages.stream",
|
|
182
|
+
inputTokens: message.usage?.input_tokens || 0,
|
|
183
|
+
outputTokens: message.usage?.output_tokens || 0,
|
|
184
|
+
latencyMs
|
|
185
|
+
});
|
|
186
|
+
}
|
|
187
|
+
return message;
|
|
188
|
+
};
|
|
189
|
+
if (typeof stream.on === "function") {
|
|
190
|
+
stream.on("finalMessage", (message) => {
|
|
191
|
+
if (!tracked) {
|
|
192
|
+
tracked = true;
|
|
193
|
+
const latencyMs = Date.now() - startMs;
|
|
194
|
+
logger.llmUsage({
|
|
195
|
+
model: message.model || params.model || "unknown",
|
|
196
|
+
provider: "anthropic",
|
|
197
|
+
operation: "messages.stream",
|
|
198
|
+
inputTokens: message.usage?.input_tokens || 0,
|
|
199
|
+
outputTokens: message.usage?.output_tokens || 0,
|
|
200
|
+
latencyMs
|
|
201
|
+
});
|
|
202
|
+
}
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
return stream;
|
|
207
|
+
};
|
|
208
|
+
}
|
|
209
|
+
return client;
|
|
210
|
+
}
|
|
211
|
+
export {
|
|
212
|
+
wrapAnthropic,
|
|
213
|
+
wrapOpenAI,
|
|
214
|
+
wrapVercelAI
|
|
215
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@deeptracer/ai",
|
|
3
|
+
"version": "0.2.0",
|
|
4
|
+
"description": "DeepTracer AI SDK wrappers — automatic LLM usage tracking for Vercel AI SDK, OpenAI, and Anthropic",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.cjs",
|
|
7
|
+
"module": "dist/index.js",
|
|
8
|
+
"types": "dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.js",
|
|
13
|
+
"require": "./dist/index.cjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"files": ["dist", "README.md"],
|
|
17
|
+
"sideEffects": false,
|
|
18
|
+
"keywords": ["deeptracer", "ai", "llm", "openai", "anthropic", "vercel-ai"],
|
|
19
|
+
"repository": {
|
|
20
|
+
"type": "git",
|
|
21
|
+
"url": "https://github.com/getdeeptracer/deeptracer-js.git",
|
|
22
|
+
"directory": "packages/ai"
|
|
23
|
+
},
|
|
24
|
+
"license": "MIT",
|
|
25
|
+
"dependencies": {
|
|
26
|
+
"@deeptracer/core": "0.2.0"
|
|
27
|
+
},
|
|
28
|
+
"scripts": {
|
|
29
|
+
"build": "tsup",
|
|
30
|
+
"dev": "tsup --watch"
|
|
31
|
+
}
|
|
32
|
+
}
|