@fallom/trace 0.1.1 → 0.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +92 -179
- package/dist/index.d.mts +76 -3
- package/dist/index.d.ts +76 -3
- package/dist/index.js +231 -40
- package/dist/index.mjs +231 -46
- package/package.json +6 -3
package/README.md
CHANGED
|
@@ -6,162 +6,141 @@ Model A/B testing and tracing for LLM applications. Zero latency, production-rea
|
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
8
|
npm install @fallom/trace
|
|
9
|
-
|
|
10
|
-
# With auto-instrumentation for your LLM provider:
|
|
11
|
-
npm install @fallom/trace @traceloop/node-server-sdk
|
|
12
9
|
```
|
|
13
10
|
|
|
14
11
|
## Quick Start
|
|
15
12
|
|
|
16
13
|
```typescript
|
|
17
|
-
|
|
18
|
-
import
|
|
14
|
+
import fallom from "@fallom/trace";
|
|
15
|
+
import OpenAI from "openai";
|
|
19
16
|
|
|
20
|
-
|
|
17
|
+
// Initialize Fallom
|
|
18
|
+
await fallom.init({ apiKey: "your-api-key" });
|
|
21
19
|
|
|
22
|
-
//
|
|
23
|
-
const
|
|
20
|
+
// Wrap your LLM client for automatic tracing
|
|
21
|
+
const openai = fallom.trace.wrapOpenAI(new OpenAI());
|
|
24
22
|
|
|
25
|
-
// Set
|
|
26
|
-
fallom.trace.setSession(
|
|
23
|
+
// Set session context
|
|
24
|
+
fallom.trace.setSession("my-agent", sessionId);
|
|
27
25
|
|
|
28
26
|
// All LLM calls are now automatically traced!
|
|
29
|
-
const openai = new OpenAI();
|
|
30
27
|
const response = await openai.chat.completions.create({
|
|
31
|
-
model:
|
|
32
|
-
messages: [{ role:
|
|
28
|
+
model: "gpt-4o",
|
|
29
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
33
30
|
});
|
|
34
31
|
```
|
|
35
32
|
|
|
36
|
-
> ⚠️ **Import Order Matters!** Auto-instrumentation hooks into libraries when they're imported. You must call `fallom.init()` BEFORE importing `openai`, `@anthropic-ai/sdk`, etc. Use dynamic imports (`await import('openai')`) to ensure correct order.
|
|
37
|
-
|
|
38
33
|
## Model A/B Testing
|
|
39
34
|
|
|
40
35
|
Run A/B tests on models with zero latency. Same session always gets same model (sticky assignment).
|
|
41
36
|
|
|
42
37
|
```typescript
|
|
43
|
-
import { models } from
|
|
38
|
+
import { models } from "@fallom/trace";
|
|
44
39
|
|
|
45
40
|
// Get assigned model for this session
|
|
46
|
-
const model = await models.get(
|
|
41
|
+
const model = await models.get("summarizer-config", sessionId);
|
|
47
42
|
// Returns: "gpt-4o" or "claude-3-5-sonnet" based on your config weights
|
|
48
43
|
|
|
49
|
-
const
|
|
50
|
-
await agent.run(message);
|
|
51
|
-
```
|
|
52
|
-
|
|
53
|
-
### Version Pinning
|
|
54
|
-
|
|
55
|
-
Pin to a specific config version, or use latest (default):
|
|
56
|
-
|
|
57
|
-
```typescript
|
|
58
|
-
// Use latest version (default)
|
|
59
|
-
const model = await models.get('my-config', sessionId);
|
|
60
|
-
|
|
61
|
-
// Pin to specific version
|
|
62
|
-
const model = await models.get('my-config', sessionId, { version: 2 });
|
|
44
|
+
const response = await openai.chat.completions.create({ model, ... });
|
|
63
45
|
```
|
|
64
46
|
|
|
65
47
|
### Fallback for Resilience
|
|
66
48
|
|
|
67
|
-
Always provide a fallback so your app works even if Fallom is down:
|
|
68
|
-
|
|
69
49
|
```typescript
|
|
70
|
-
const model = await models.get(
|
|
71
|
-
fallback:
|
|
50
|
+
const model = await models.get("my-config", sessionId, {
|
|
51
|
+
fallback: "gpt-4o-mini", // Used if config not found or Fallom unreachable
|
|
72
52
|
});
|
|
73
53
|
```
|
|
74
54
|
|
|
75
|
-
**Resilience guarantees:**
|
|
76
|
-
- Short timeouts (1-2 seconds max)
|
|
77
|
-
- Background config sync (never blocks your requests)
|
|
78
|
-
- Graceful degradation (returns fallback on any error)
|
|
79
|
-
- Your app is never impacted by Fallom being down
|
|
80
|
-
|
|
81
55
|
## Tracing
|
|
82
56
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
>
|
|
87
|
-
> ```typescript
|
|
88
|
-
> import OpenAI from 'openai';
|
|
89
|
-
>
|
|
90
|
-
> // OpenRouter, LiteLLM, vLLM, etc.
|
|
91
|
-
> const client = new OpenAI({
|
|
92
|
-
> baseURL: 'https://openrouter.ai/api/v1', // or your provider's URL
|
|
93
|
-
> apiKey: 'your-provider-key',
|
|
94
|
-
> });
|
|
95
|
-
>
|
|
96
|
-
> // Now this call will be auto-traced!
|
|
97
|
-
> const response = await client.chat.completions.create({
|
|
98
|
-
> model: 'gpt-4o',
|
|
99
|
-
> messages: [...],
|
|
100
|
-
> });
|
|
101
|
-
> ```
|
|
102
|
-
|
|
103
|
-
### Automatic Tracing
|
|
57
|
+
Wrap your LLM client once, all calls are automatically traced.
|
|
58
|
+
|
|
59
|
+
### OpenAI (+ OpenRouter, Azure, LiteLLM, etc.)
|
|
104
60
|
|
|
105
61
|
```typescript
|
|
106
|
-
|
|
107
|
-
import fallom from
|
|
108
|
-
|
|
62
|
+
import OpenAI from "openai";
|
|
63
|
+
import fallom from "@fallom/trace";
|
|
64
|
+
|
|
65
|
+
await fallom.init({ apiKey: "your-api-key" });
|
|
109
66
|
|
|
110
|
-
//
|
|
111
|
-
const
|
|
112
|
-
|
|
67
|
+
// Works with any OpenAI-compatible API
|
|
68
|
+
const openai = fallom.trace.wrapOpenAI(
|
|
69
|
+
new OpenAI({
|
|
70
|
+
baseURL: "https://openrouter.ai/api/v1", // or Azure, LiteLLM, etc.
|
|
71
|
+
apiKey: "your-provider-key",
|
|
72
|
+
})
|
|
73
|
+
);
|
|
113
74
|
|
|
114
|
-
|
|
115
|
-
fallom.trace.setSession('my-agent', sessionId);
|
|
75
|
+
fallom.trace.setSession("my-config", sessionId);
|
|
116
76
|
|
|
117
|
-
//
|
|
118
|
-
// - Model, tokens, latency
|
|
119
|
-
// - Prompts and completions
|
|
120
|
-
// - Your config_key and session_id
|
|
77
|
+
// Automatically traced!
|
|
121
78
|
const response = await openai.chat.completions.create({
|
|
122
|
-
model:
|
|
123
|
-
messages: [
|
|
79
|
+
model: "gpt-4o",
|
|
80
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
124
81
|
});
|
|
125
82
|
```
|
|
126
83
|
|
|
127
|
-
|
|
128
|
-
> ```bash
|
|
129
|
-
> npm install @traceloop/node-server-sdk
|
|
130
|
-
> ```
|
|
84
|
+
### Anthropic (Claude)
|
|
131
85
|
|
|
132
|
-
|
|
86
|
+
```typescript
|
|
87
|
+
import Anthropic from "@anthropic-ai/sdk";
|
|
88
|
+
import fallom from "@fallom/trace";
|
|
133
89
|
|
|
134
|
-
|
|
90
|
+
await fallom.init({ apiKey: "your-api-key" });
|
|
135
91
|
|
|
136
|
-
|
|
137
|
-
import { trace } from '@fallom/trace';
|
|
92
|
+
const anthropic = fallom.trace.wrapAnthropic(new Anthropic());
|
|
138
93
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
94
|
+
fallom.trace.setSession("my-config", sessionId);
|
|
95
|
+
|
|
96
|
+
// Automatically traced!
|
|
97
|
+
const response = await anthropic.messages.create({
|
|
98
|
+
model: "claude-3-5-sonnet-20241022",
|
|
99
|
+
messages: [{ role: "user", content: "Hello!" }],
|
|
143
100
|
});
|
|
144
101
|
```
|
|
145
102
|
|
|
146
|
-
###
|
|
103
|
+
### Google AI (Gemini)
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
107
|
+
import fallom from "@fallom/trace";
|
|
108
|
+
|
|
109
|
+
await fallom.init({ apiKey: "your-api-key" });
|
|
110
|
+
|
|
111
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
112
|
+
const model = fallom.trace.wrapGoogleAI(
|
|
113
|
+
genAI.getGenerativeModel({ model: "gemini-pro" })
|
|
114
|
+
);
|
|
115
|
+
|
|
116
|
+
fallom.trace.setSession("my-config", sessionId);
|
|
117
|
+
|
|
118
|
+
// Automatically traced!
|
|
119
|
+
const response = await model.generateContent("Hello!");
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
## What Gets Traced
|
|
123
|
+
|
|
124
|
+
For each LLM call, Fallom automatically captures:
|
|
125
|
+
- ✅ Model name
|
|
126
|
+
- ✅ Duration (latency)
|
|
127
|
+
- ✅ Token counts (prompt, completion, total)
|
|
128
|
+
- ✅ Input/output content (can be disabled)
|
|
129
|
+
- ✅ Errors
|
|
130
|
+
- ✅ Config key + session ID (for A/B analysis)
|
|
147
131
|
|
|
148
|
-
|
|
132
|
+
## Custom Metrics
|
|
133
|
+
|
|
134
|
+
Record business metrics for your A/B tests:
|
|
149
135
|
|
|
150
136
|
```typescript
|
|
151
|
-
import { trace } from
|
|
137
|
+
import { trace } from "@fallom/trace";
|
|
152
138
|
|
|
153
|
-
// Record custom metrics for this session
|
|
154
139
|
trace.span({
|
|
155
140
|
outlier_score: 0.8,
|
|
156
141
|
user_satisfaction: 4,
|
|
157
142
|
conversion: true,
|
|
158
143
|
});
|
|
159
|
-
|
|
160
|
-
// Or explicitly specify session (for batch jobs)
|
|
161
|
-
trace.span(
|
|
162
|
-
{ outlier_score: 0.8 },
|
|
163
|
-
{ configKey: 'my-agent', sessionId: 'user123-convo456' }
|
|
164
|
-
);
|
|
165
144
|
```
|
|
166
145
|
|
|
167
146
|
## Configuration
|
|
@@ -170,120 +149,54 @@ trace.span(
|
|
|
170
149
|
|
|
171
150
|
```bash
|
|
172
151
|
FALLOM_API_KEY=your-api-key
|
|
173
|
-
FALLOM_BASE_URL=https://spans.fallom.com
|
|
152
|
+
FALLOM_BASE_URL=https://spans.fallom.com
|
|
174
153
|
FALLOM_CAPTURE_CONTENT=true # set to "false" for privacy mode
|
|
175
154
|
```
|
|
176
155
|
|
|
177
|
-
### Initialization Options
|
|
178
|
-
|
|
179
|
-
```typescript
|
|
180
|
-
fallom.init({
|
|
181
|
-
apiKey: 'your-api-key', // Or use FALLOM_API_KEY env var
|
|
182
|
-
baseUrl: 'https://spans.fallom.com', // Or use FALLOM_BASE_URL env var
|
|
183
|
-
captureContent: true, // Set false for privacy mode
|
|
184
|
-
});
|
|
185
|
-
```
|
|
186
|
-
|
|
187
156
|
### Privacy Mode
|
|
188
157
|
|
|
189
|
-
|
|
158
|
+
Disable prompt/completion capture:
|
|
190
159
|
|
|
191
160
|
```typescript
|
|
192
|
-
// Via parameter
|
|
193
161
|
fallom.init({ captureContent: false });
|
|
194
|
-
|
|
195
|
-
// Or via environment variable
|
|
196
|
-
// FALLOM_CAPTURE_CONTENT=false
|
|
197
162
|
```
|
|
198
163
|
|
|
199
|
-
In privacy mode, Fallom still tracks:
|
|
200
|
-
- ✅ Model used
|
|
201
|
-
- ✅ Token counts
|
|
202
|
-
- ✅ Latency
|
|
203
|
-
- ✅ Session/config context
|
|
204
|
-
- ❌ Prompt content (not captured)
|
|
205
|
-
- ❌ Completion content (not captured)
|
|
206
|
-
|
|
207
164
|
## API Reference
|
|
208
165
|
|
|
209
166
|
### `fallom.init(options?)`
|
|
210
167
|
|
|
211
|
-
Initialize the SDK.
|
|
168
|
+
Initialize the SDK.
|
|
212
169
|
|
|
213
|
-
|
|
214
|
-
|--------|------|---------|-------------|
|
|
215
|
-
| `apiKey` | `string` | `FALLOM_API_KEY` env | Your Fallom API key |
|
|
216
|
-
| `baseUrl` | `string` | `https://spans.fallom.com` | API base URL |
|
|
217
|
-
| `captureContent` | `boolean` | `true` | Capture prompt/completion text |
|
|
170
|
+
### `fallom.trace.wrapOpenAI(client)`
|
|
218
171
|
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
Get model assignment for a session.
|
|
172
|
+
Wrap OpenAI client for automatic tracing. Works with any OpenAI-compatible API.
|
|
222
173
|
|
|
223
|
-
|
|
224
|
-
|-----------|------|-------------|
|
|
225
|
-
| `configKey` | `string` | Your config name from the dashboard |
|
|
226
|
-
| `sessionId` | `string` | Unique session/conversation ID (sticky assignment) |
|
|
227
|
-
| `options.version` | `number` | Pin to specific version (default: latest) |
|
|
228
|
-
| `options.fallback` | `string` | Model to return if anything fails |
|
|
229
|
-
| `options.debug` | `boolean` | Enable debug logging |
|
|
174
|
+
### `fallom.trace.wrapAnthropic(client)`
|
|
230
175
|
|
|
231
|
-
|
|
176
|
+
Wrap Anthropic client for automatic tracing.
|
|
232
177
|
|
|
233
|
-
### `fallom.trace.
|
|
178
|
+
### `fallom.trace.wrapGoogleAI(model)`
|
|
234
179
|
|
|
235
|
-
|
|
180
|
+
Wrap Google AI model for automatic tracing.
|
|
236
181
|
|
|
237
|
-
### `fallom.trace.
|
|
182
|
+
### `fallom.trace.setSession(configKey, sessionId)`
|
|
238
183
|
|
|
239
|
-
|
|
184
|
+
Set session context for tracing.
|
|
240
185
|
|
|
241
|
-
### `fallom.
|
|
186
|
+
### `fallom.models.get(configKey, sessionId, options?)`
|
|
242
187
|
|
|
243
|
-
|
|
188
|
+
Get model assignment for A/B testing. Returns `Promise<string>`.
|
|
244
189
|
|
|
245
|
-
### `fallom.trace.span(data
|
|
190
|
+
### `fallom.trace.span(data)`
|
|
246
191
|
|
|
247
192
|
Record custom business metrics.
|
|
248
193
|
|
|
249
|
-
| Parameter | Type | Description |
|
|
250
|
-
|-----------|------|-------------|
|
|
251
|
-
| `data` | `Record<string, unknown>` | Metrics to record |
|
|
252
|
-
| `options.configKey` | `string` | Optional if `setSession()` was called |
|
|
253
|
-
| `options.sessionId` | `string` | Optional if `setSession()` was called |
|
|
254
|
-
|
|
255
|
-
### `fallom.trace.shutdown()`
|
|
256
|
-
|
|
257
|
-
Gracefully shutdown the tracing SDK. Call this on process exit.
|
|
258
|
-
|
|
259
|
-
## Supported LLM Providers
|
|
260
|
-
|
|
261
|
-
Auto-instrumentation available for:
|
|
262
|
-
- OpenAI (+ OpenAI-compatible APIs: OpenRouter, LiteLLM, vLLM, Ollama, etc.)
|
|
263
|
-
- Anthropic
|
|
264
|
-
- Cohere
|
|
265
|
-
- AWS Bedrock
|
|
266
|
-
- Google Generative AI
|
|
267
|
-
- Azure OpenAI
|
|
268
|
-
- LangChain
|
|
269
|
-
- And more via Traceloop
|
|
270
|
-
|
|
271
|
-
Install `@traceloop/node-server-sdk` for comprehensive LLM instrumentation.
|
|
272
|
-
|
|
273
|
-
**Note:** You must use the official SDK for your provider. Raw HTTP requests (e.g., `fetch()`) will not be traced. For OpenAI-compatible APIs, use the OpenAI SDK with a custom `baseURL`.
|
|
274
|
-
|
|
275
|
-
## Examples
|
|
276
|
-
|
|
277
|
-
See the `../examples/` folder for complete examples:
|
|
278
|
-
- `random-fact/` - Simple A/B testing with Hono server
|
|
279
|
-
|
|
280
194
|
## Requirements
|
|
281
195
|
|
|
282
196
|
- Node.js >= 18.0.0
|
|
283
197
|
|
|
284
|
-
|
|
198
|
+
Works with ESM and CommonJS. Works with tsx, ts-node, Bun, and compiled JavaScript.
|
|
285
199
|
|
|
286
200
|
## License
|
|
287
201
|
|
|
288
202
|
MIT
|
|
289
|
-
|
package/dist/index.d.mts
CHANGED
|
@@ -36,7 +36,8 @@ declare function init$2(options?: {
|
|
|
36
36
|
apiKey?: string;
|
|
37
37
|
baseUrl?: string;
|
|
38
38
|
captureContent?: boolean;
|
|
39
|
-
|
|
39
|
+
debug?: boolean;
|
|
40
|
+
}): Promise<void>;
|
|
40
41
|
/**
|
|
41
42
|
* Set the current session context.
|
|
42
43
|
*
|
|
@@ -111,6 +112,74 @@ declare function span(data: Record<string, unknown>, options?: {
|
|
|
111
112
|
* Shutdown the tracing SDK gracefully.
|
|
112
113
|
*/
|
|
113
114
|
declare function shutdown(): Promise<void>;
|
|
115
|
+
/**
|
|
116
|
+
* Wrap an OpenAI client to automatically trace all chat completions.
|
|
117
|
+
* Works with OpenAI, OpenRouter, Azure OpenAI, LiteLLM, and any OpenAI-compatible API.
|
|
118
|
+
*
|
|
119
|
+
* @param client - The OpenAI client instance
|
|
120
|
+
* @returns The same client with tracing enabled
|
|
121
|
+
*
|
|
122
|
+
* @example
|
|
123
|
+
* ```typescript
|
|
124
|
+
* import OpenAI from "openai";
|
|
125
|
+
* import { trace } from "@fallom/trace";
|
|
126
|
+
*
|
|
127
|
+
* const openai = trace.wrapOpenAI(new OpenAI());
|
|
128
|
+
*
|
|
129
|
+
* trace.setSession("my-config", sessionId);
|
|
130
|
+
* const response = await openai.chat.completions.create({...}); // Automatically traced!
|
|
131
|
+
* ```
|
|
132
|
+
*/
|
|
133
|
+
declare function wrapOpenAI<T extends {
|
|
134
|
+
chat: {
|
|
135
|
+
completions: {
|
|
136
|
+
create: (...args: any[]) => Promise<any>;
|
|
137
|
+
};
|
|
138
|
+
};
|
|
139
|
+
}>(client: T): T;
|
|
140
|
+
/**
|
|
141
|
+
* Wrap an Anthropic client to automatically trace all message creations.
|
|
142
|
+
*
|
|
143
|
+
* @param client - The Anthropic client instance
|
|
144
|
+
* @returns The same client with tracing enabled
|
|
145
|
+
*
|
|
146
|
+
* @example
|
|
147
|
+
* ```typescript
|
|
148
|
+
* import Anthropic from "@anthropic-ai/sdk";
|
|
149
|
+
* import { trace } from "@fallom/trace";
|
|
150
|
+
*
|
|
151
|
+
* const anthropic = trace.wrapAnthropic(new Anthropic());
|
|
152
|
+
*
|
|
153
|
+
* trace.setSession("my-config", sessionId);
|
|
154
|
+
* const response = await anthropic.messages.create({...}); // Automatically traced!
|
|
155
|
+
* ```
|
|
156
|
+
*/
|
|
157
|
+
declare function wrapAnthropic<T extends {
|
|
158
|
+
messages: {
|
|
159
|
+
create: (...args: any[]) => Promise<any>;
|
|
160
|
+
};
|
|
161
|
+
}>(client: T): T;
|
|
162
|
+
/**
|
|
163
|
+
* Wrap a Google Generative AI client to automatically trace all content generations.
|
|
164
|
+
*
|
|
165
|
+
* @param client - The GoogleGenerativeAI client instance
|
|
166
|
+
* @returns The same client with tracing enabled
|
|
167
|
+
*
|
|
168
|
+
* @example
|
|
169
|
+
* ```typescript
|
|
170
|
+
* import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
171
|
+
* import { trace } from "@fallom/trace";
|
|
172
|
+
*
|
|
173
|
+
* const genAI = new GoogleGenerativeAI(apiKey);
|
|
174
|
+
* const model = trace.wrapGoogleAI(genAI.getGenerativeModel({ model: "gemini-pro" }));
|
|
175
|
+
*
|
|
176
|
+
* trace.setSession("my-config", sessionId);
|
|
177
|
+
* const response = await model.generateContent("Hello!"); // Automatically traced!
|
|
178
|
+
* ```
|
|
179
|
+
*/
|
|
180
|
+
declare function wrapGoogleAI<T extends {
|
|
181
|
+
generateContent: (...args: any[]) => Promise<any>;
|
|
182
|
+
}>(model: T): T;
|
|
114
183
|
|
|
115
184
|
declare const trace_clearSession: typeof clearSession;
|
|
116
185
|
declare const trace_getSession: typeof getSession;
|
|
@@ -118,8 +187,11 @@ declare const trace_runWithSession: typeof runWithSession;
|
|
|
118
187
|
declare const trace_setSession: typeof setSession;
|
|
119
188
|
declare const trace_shutdown: typeof shutdown;
|
|
120
189
|
declare const trace_span: typeof span;
|
|
190
|
+
declare const trace_wrapAnthropic: typeof wrapAnthropic;
|
|
191
|
+
declare const trace_wrapGoogleAI: typeof wrapGoogleAI;
|
|
192
|
+
declare const trace_wrapOpenAI: typeof wrapOpenAI;
|
|
121
193
|
declare namespace trace {
|
|
122
|
-
export { trace_clearSession as clearSession, trace_getSession as getSession, init$2 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span };
|
|
194
|
+
export { trace_clearSession as clearSession, trace_getSession as getSession, init$2 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span, trace_wrapAnthropic as wrapAnthropic, trace_wrapGoogleAI as wrapGoogleAI, trace_wrapOpenAI as wrapOpenAI };
|
|
123
195
|
}
|
|
124
196
|
|
|
125
197
|
/**
|
|
@@ -182,6 +254,7 @@ interface InitOptions {
|
|
|
182
254
|
apiKey?: string;
|
|
183
255
|
baseUrl?: string;
|
|
184
256
|
captureContent?: boolean;
|
|
257
|
+
debug?: boolean;
|
|
185
258
|
}
|
|
186
259
|
/**
|
|
187
260
|
* Initialize both trace and models at once.
|
|
@@ -205,7 +278,7 @@ interface InitOptions {
|
|
|
205
278
|
* fallom.init({ captureContent: false });
|
|
206
279
|
* ```
|
|
207
280
|
*/
|
|
208
|
-
declare function init(options?: InitOptions): void
|
|
281
|
+
declare function init(options?: InitOptions): Promise<void>;
|
|
209
282
|
|
|
210
283
|
/**
|
|
211
284
|
* Fallom - Model A/B testing and tracing for LLM applications.
|
package/dist/index.d.ts
CHANGED
|
@@ -36,7 +36,8 @@ declare function init$2(options?: {
|
|
|
36
36
|
apiKey?: string;
|
|
37
37
|
baseUrl?: string;
|
|
38
38
|
captureContent?: boolean;
|
|
39
|
-
|
|
39
|
+
debug?: boolean;
|
|
40
|
+
}): Promise<void>;
|
|
40
41
|
/**
|
|
41
42
|
* Set the current session context.
|
|
42
43
|
*
|
|
@@ -111,6 +112,74 @@ declare function span(data: Record<string, unknown>, options?: {
|
|
|
111
112
|
* Shutdown the tracing SDK gracefully.
|
|
112
113
|
*/
|
|
113
114
|
declare function shutdown(): Promise<void>;
|
|
115
|
+
/**
|
|
116
|
+
* Wrap an OpenAI client to automatically trace all chat completions.
|
|
117
|
+
* Works with OpenAI, OpenRouter, Azure OpenAI, LiteLLM, and any OpenAI-compatible API.
|
|
118
|
+
*
|
|
119
|
+
* @param client - The OpenAI client instance
|
|
120
|
+
* @returns The same client with tracing enabled
|
|
121
|
+
*
|
|
122
|
+
* @example
|
|
123
|
+
* ```typescript
|
|
124
|
+
* import OpenAI from "openai";
|
|
125
|
+
* import { trace } from "@fallom/trace";
|
|
126
|
+
*
|
|
127
|
+
* const openai = trace.wrapOpenAI(new OpenAI());
|
|
128
|
+
*
|
|
129
|
+
* trace.setSession("my-config", sessionId);
|
|
130
|
+
* const response = await openai.chat.completions.create({...}); // Automatically traced!
|
|
131
|
+
* ```
|
|
132
|
+
*/
|
|
133
|
+
declare function wrapOpenAI<T extends {
|
|
134
|
+
chat: {
|
|
135
|
+
completions: {
|
|
136
|
+
create: (...args: any[]) => Promise<any>;
|
|
137
|
+
};
|
|
138
|
+
};
|
|
139
|
+
}>(client: T): T;
|
|
140
|
+
/**
|
|
141
|
+
* Wrap an Anthropic client to automatically trace all message creations.
|
|
142
|
+
*
|
|
143
|
+
* @param client - The Anthropic client instance
|
|
144
|
+
* @returns The same client with tracing enabled
|
|
145
|
+
*
|
|
146
|
+
* @example
|
|
147
|
+
* ```typescript
|
|
148
|
+
* import Anthropic from "@anthropic-ai/sdk";
|
|
149
|
+
* import { trace } from "@fallom/trace";
|
|
150
|
+
*
|
|
151
|
+
* const anthropic = trace.wrapAnthropic(new Anthropic());
|
|
152
|
+
*
|
|
153
|
+
* trace.setSession("my-config", sessionId);
|
|
154
|
+
* const response = await anthropic.messages.create({...}); // Automatically traced!
|
|
155
|
+
* ```
|
|
156
|
+
*/
|
|
157
|
+
declare function wrapAnthropic<T extends {
|
|
158
|
+
messages: {
|
|
159
|
+
create: (...args: any[]) => Promise<any>;
|
|
160
|
+
};
|
|
161
|
+
}>(client: T): T;
|
|
162
|
+
/**
|
|
163
|
+
* Wrap a Google Generative AI client to automatically trace all content generations.
|
|
164
|
+
*
|
|
165
|
+
* @param client - The GoogleGenerativeAI client instance
|
|
166
|
+
* @returns The same client with tracing enabled
|
|
167
|
+
*
|
|
168
|
+
* @example
|
|
169
|
+
* ```typescript
|
|
170
|
+
* import { GoogleGenerativeAI } from "@google/generative-ai";
|
|
171
|
+
* import { trace } from "@fallom/trace";
|
|
172
|
+
*
|
|
173
|
+
* const genAI = new GoogleGenerativeAI(apiKey);
|
|
174
|
+
* const model = trace.wrapGoogleAI(genAI.getGenerativeModel({ model: "gemini-pro" }));
|
|
175
|
+
*
|
|
176
|
+
* trace.setSession("my-config", sessionId);
|
|
177
|
+
* const response = await model.generateContent("Hello!"); // Automatically traced!
|
|
178
|
+
* ```
|
|
179
|
+
*/
|
|
180
|
+
declare function wrapGoogleAI<T extends {
|
|
181
|
+
generateContent: (...args: any[]) => Promise<any>;
|
|
182
|
+
}>(model: T): T;
|
|
114
183
|
|
|
115
184
|
declare const trace_clearSession: typeof clearSession;
|
|
116
185
|
declare const trace_getSession: typeof getSession;
|
|
@@ -118,8 +187,11 @@ declare const trace_runWithSession: typeof runWithSession;
|
|
|
118
187
|
declare const trace_setSession: typeof setSession;
|
|
119
188
|
declare const trace_shutdown: typeof shutdown;
|
|
120
189
|
declare const trace_span: typeof span;
|
|
190
|
+
declare const trace_wrapAnthropic: typeof wrapAnthropic;
|
|
191
|
+
declare const trace_wrapGoogleAI: typeof wrapGoogleAI;
|
|
192
|
+
declare const trace_wrapOpenAI: typeof wrapOpenAI;
|
|
121
193
|
declare namespace trace {
|
|
122
|
-
export { trace_clearSession as clearSession, trace_getSession as getSession, init$2 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span };
|
|
194
|
+
export { trace_clearSession as clearSession, trace_getSession as getSession, init$2 as init, trace_runWithSession as runWithSession, trace_setSession as setSession, trace_shutdown as shutdown, trace_span as span, trace_wrapAnthropic as wrapAnthropic, trace_wrapGoogleAI as wrapGoogleAI, trace_wrapOpenAI as wrapOpenAI };
|
|
123
195
|
}
|
|
124
196
|
|
|
125
197
|
/**
|
|
@@ -182,6 +254,7 @@ interface InitOptions {
|
|
|
182
254
|
apiKey?: string;
|
|
183
255
|
baseUrl?: string;
|
|
184
256
|
captureContent?: boolean;
|
|
257
|
+
debug?: boolean;
|
|
185
258
|
}
|
|
186
259
|
/**
|
|
187
260
|
* Initialize both trace and models at once.
|
|
@@ -205,7 +278,7 @@ interface InitOptions {
|
|
|
205
278
|
* fallom.init({ captureContent: false });
|
|
206
279
|
* ```
|
|
207
280
|
*/
|
|
208
|
-
declare function init(options?: InitOptions): void
|
|
281
|
+
declare function init(options?: InitOptions): Promise<void>;
|
|
209
282
|
|
|
210
283
|
/**
|
|
211
284
|
* Fallom - Model A/B testing and tracing for LLM applications.
|