deadpipe 2.0.1 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +181 -103
- package/dist/index.d.mts +85 -33
- package/dist/index.d.ts +85 -33
- package/dist/index.js +457 -49
- package/dist/index.mjs +445 -49
- package/package.json +5 -1
package/README.md
CHANGED
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
LLM observability that answers one question: **"Is this prompt behaving the same as when it was last safe?"**
|
|
4
4
|
|
|
5
|
+
**Supports:** OpenAI, Anthropic, Google AI (Gemini), Mistral, Cohere
|
|
6
|
+
|
|
5
7
|
## Installation
|
|
6
8
|
|
|
7
9
|
```bash
|
|
@@ -14,40 +16,147 @@ pnpm add deadpipe
|
|
|
14
16
|
|
|
15
17
|
## Quick Start
|
|
16
18
|
|
|
17
|
-
###
|
|
19
|
+
### Universal Wrapper (Recommended)
|
|
20
|
+
|
|
21
|
+
The `wrap()` function auto-detects your provider and wraps appropriately:
|
|
22
|
+
|
|
23
|
+
```typescript
|
|
24
|
+
import { wrap } from 'deadpipe';
|
|
25
|
+
import OpenAI from 'openai';
|
|
26
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
27
|
+
|
|
28
|
+
// Works with any supported provider
|
|
29
|
+
const openai = wrap(new OpenAI(), { promptId: 'checkout_agent' });
|
|
30
|
+
const anthropic = wrap(new Anthropic(), { promptId: 'support_agent' });
|
|
31
|
+
|
|
32
|
+
// All calls automatically tracked with full input/output context
|
|
33
|
+
const response = await openai.chat.completions.create({
|
|
34
|
+
model: 'gpt-4',
|
|
35
|
+
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
36
|
+
});
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### Provider-Specific Wrappers
|
|
40
|
+
|
|
41
|
+
For explicit control, use provider-specific wrappers:
|
|
42
|
+
|
|
43
|
+
```typescript
|
|
44
|
+
import { wrapOpenAI, wrapAnthropic, wrapGoogleAI, wrapMistral, wrapCohere } from 'deadpipe';
|
|
45
|
+
|
|
46
|
+
const openai = wrapOpenAI(new OpenAI(), { promptId: 'my_agent' });
|
|
47
|
+
const anthropic = wrapAnthropic(new Anthropic(), { promptId: 'my_agent' });
|
|
48
|
+
```
|
|
49
|
+
|
|
50
|
+
### Manual Tracking
|
|
51
|
+
|
|
52
|
+
For streaming, custom logic, or unsupported clients:
|
|
18
53
|
|
|
19
54
|
```typescript
|
|
20
55
|
import { track } from 'deadpipe';
|
|
21
56
|
import OpenAI from 'openai';
|
|
22
57
|
|
|
23
58
|
const client = new OpenAI();
|
|
59
|
+
const params = {
|
|
60
|
+
model: 'gpt-4',
|
|
61
|
+
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
62
|
+
};
|
|
24
63
|
|
|
25
64
|
const response = await track('checkout_agent', async (t) => {
|
|
26
|
-
const response = await client.chat.completions.create(
|
|
27
|
-
|
|
28
|
-
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
29
|
-
});
|
|
30
|
-
t.record(response);
|
|
65
|
+
const response = await client.chat.completions.create(params);
|
|
66
|
+
t.record(response, undefined, params); // Pass params to capture input
|
|
31
67
|
return response;
|
|
32
68
|
});
|
|
33
69
|
```
|
|
34
70
|
|
|
35
|
-
|
|
71
|
+
## Provider Examples
|
|
72
|
+
|
|
73
|
+
### OpenAI
|
|
36
74
|
|
|
37
75
|
```typescript
|
|
38
|
-
import {
|
|
76
|
+
import { wrap } from 'deadpipe';
|
|
39
77
|
import OpenAI from 'openai';
|
|
40
78
|
|
|
41
|
-
const client =
|
|
79
|
+
const client = wrap(new OpenAI(), { promptId: 'openai_agent' });
|
|
42
80
|
|
|
43
|
-
// All calls automatically tracked
|
|
44
81
|
const response = await client.chat.completions.create({
|
|
45
|
-
model: 'gpt-
|
|
46
|
-
messages: [{ role: 'user', content: '
|
|
82
|
+
model: 'gpt-4o',
|
|
83
|
+
messages: [{ role: 'user', content: 'Hello!' }]
|
|
84
|
+
});
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
### Anthropic
|
|
88
|
+
|
|
89
|
+
```typescript
|
|
90
|
+
import { wrap } from 'deadpipe';
|
|
91
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
92
|
+
|
|
93
|
+
const client = wrap(new Anthropic(), { promptId: 'claude_agent' });
|
|
94
|
+
|
|
95
|
+
const response = await client.messages.create({
|
|
96
|
+
model: 'claude-sonnet-4-20250514',
|
|
97
|
+
max_tokens: 1024,
|
|
98
|
+
messages: [{ role: 'user', content: 'Hello, Claude!' }]
|
|
47
99
|
});
|
|
48
100
|
```
|
|
49
101
|
|
|
50
|
-
###
|
|
102
|
+
### Google AI (Gemini)
|
|
103
|
+
|
|
104
|
+
```typescript
|
|
105
|
+
import { wrap } from 'deadpipe';
|
|
106
|
+
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
107
|
+
|
|
108
|
+
const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY);
|
|
109
|
+
const model = wrap(genAI, { promptId: 'gemini_agent' }).getGenerativeModel({ model: 'gemini-1.5-pro' });
|
|
110
|
+
|
|
111
|
+
const result = await model.generateContent('Hello, Gemini!');
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Mistral
|
|
115
|
+
|
|
116
|
+
```typescript
|
|
117
|
+
import { wrap } from 'deadpipe';
|
|
118
|
+
import { Mistral } from '@mistralai/mistralai';
|
|
119
|
+
|
|
120
|
+
const client = wrap(new Mistral({ apiKey: process.env.MISTRAL_API_KEY }), { promptId: 'mistral_agent' });
|
|
121
|
+
|
|
122
|
+
const response = await client.chat.complete({
|
|
123
|
+
model: 'mistral-large-latest',
|
|
124
|
+
messages: [{ role: 'user', content: 'Hello, Mistral!' }]
|
|
125
|
+
});
|
|
126
|
+
```
|
|
127
|
+
|
|
128
|
+
### Cohere
|
|
129
|
+
|
|
130
|
+
```typescript
|
|
131
|
+
import { wrap } from 'deadpipe';
|
|
132
|
+
import { CohereClient } from 'cohere-ai';
|
|
133
|
+
|
|
134
|
+
const client = wrap(new CohereClient({ token: process.env.COHERE_API_KEY }), { promptId: 'cohere_agent' });
|
|
135
|
+
|
|
136
|
+
const response = await client.chat({
|
|
137
|
+
model: 'command-r-plus',
|
|
138
|
+
message: 'Hello, Cohere!'
|
|
139
|
+
});
|
|
140
|
+
```
|
|
141
|
+
|
|
142
|
+
## What Gets Tracked
|
|
143
|
+
|
|
144
|
+
Every prompt execution captures:
|
|
145
|
+
|
|
146
|
+
| Category | Metrics |
|
|
147
|
+
|----------|---------|
|
|
148
|
+
| **Identity** | prompt_id, model, provider, app_id, environment, version |
|
|
149
|
+
| **Timing** | request_start, first_token_time, total_latency |
|
|
150
|
+
| **Volume** | input_tokens, output_tokens, estimated_cost_usd |
|
|
151
|
+
| **Reliability** | http_status, timeout, retry_count, error_message |
|
|
152
|
+
| **Output Integrity** | output_length, empty_output, truncated, json_parse_success, schema_validation_pass |
|
|
153
|
+
| **Behavioral Fingerprint** | output_hash, refusal_flag, tool_calls_count |
|
|
154
|
+
| **Safety Proxies** | enum_out_of_range, numeric_out_of_bounds |
|
|
155
|
+
| **Change Context** | prompt_hash, tool_schema_hash, system_prompt_hash |
|
|
156
|
+
|
|
157
|
+
## Advanced Usage
|
|
158
|
+
|
|
159
|
+
### Schema Validation (Zod)
|
|
51
160
|
|
|
52
161
|
```typescript
|
|
53
162
|
import { track } from 'deadpipe';
|
|
@@ -82,35 +191,19 @@ const result = await track('checkout_agent', async (t) => {
|
|
|
82
191
|
}
|
|
83
192
|
}
|
|
84
193
|
});
|
|
85
|
-
// result is typed as RefundResponse | null
|
|
86
194
|
```
|
|
87
195
|
|
|
88
|
-
## What Gets Tracked
|
|
89
|
-
|
|
90
|
-
Every prompt execution captures:
|
|
91
|
-
|
|
92
|
-
| Category | Metrics |
|
|
93
|
-
|----------|---------|
|
|
94
|
-
| **Identity** | prompt_id, model, provider, app_id, environment, version |
|
|
95
|
-
| **Timing** | request_start, first_token_time, total_latency |
|
|
96
|
-
| **Volume** | input_tokens, output_tokens, estimated_cost_usd |
|
|
97
|
-
| **Reliability** | http_status, timeout, retry_count, error_message |
|
|
98
|
-
| **Output Integrity** | output_length, empty_output, truncated, json_parse_success, schema_validation_pass |
|
|
99
|
-
| **Behavioral Fingerprint** | output_hash, refusal_flag, tool_calls_count |
|
|
100
|
-
| **Safety Proxies** | enum_out_of_range, numeric_out_of_bounds |
|
|
101
|
-
| **Change Context** | prompt_hash, tool_schema_hash, system_prompt_hash |
|
|
102
|
-
|
|
103
|
-
## Advanced Usage
|
|
104
|
-
|
|
105
196
|
### Track Streaming Responses
|
|
106
197
|
|
|
107
198
|
```typescript
|
|
199
|
+
const params = {
|
|
200
|
+
model: 'gpt-4',
|
|
201
|
+
messages: [{ role: 'user', content: 'Tell me a story' }],
|
|
202
|
+
stream: true,
|
|
203
|
+
};
|
|
204
|
+
|
|
108
205
|
const response = await track('streaming_agent', async (t) => {
|
|
109
|
-
const stream = await client.chat.completions.create(
|
|
110
|
-
model: 'gpt-4',
|
|
111
|
-
messages: [{ role: 'user', content: 'Tell me a story' }],
|
|
112
|
-
stream: true,
|
|
113
|
-
});
|
|
206
|
+
const stream = await client.chat.completions.create(params);
|
|
114
207
|
|
|
115
208
|
let fullContent = '';
|
|
116
209
|
for await (const chunk of stream) {
|
|
@@ -120,12 +213,11 @@ const response = await track('streaming_agent', async (t) => {
|
|
|
120
213
|
}
|
|
121
214
|
}
|
|
122
215
|
|
|
123
|
-
// Record manually for streams
|
|
124
216
|
t.record({
|
|
125
217
|
model: 'gpt-4',
|
|
126
218
|
choices: [{ message: { content: fullContent } }],
|
|
127
219
|
usage: { prompt_tokens: 10, completion_tokens: 100, total_tokens: 110 }
|
|
128
|
-
});
|
|
220
|
+
}, undefined, params);
|
|
129
221
|
|
|
130
222
|
return fullContent;
|
|
131
223
|
});
|
|
@@ -148,25 +240,6 @@ const response = await track('checkout_agent', async (t) => {
|
|
|
148
240
|
});
|
|
149
241
|
```
|
|
150
242
|
|
|
151
|
-
### With Anthropic
|
|
152
|
-
|
|
153
|
-
```typescript
|
|
154
|
-
import { track } from 'deadpipe';
|
|
155
|
-
import Anthropic from '@anthropic-ai/sdk';
|
|
156
|
-
|
|
157
|
-
const client = new Anthropic();
|
|
158
|
-
|
|
159
|
-
const response = await track('claude_agent', async (t) => {
|
|
160
|
-
const response = await client.messages.create({
|
|
161
|
-
model: 'claude-3-sonnet-20240229',
|
|
162
|
-
max_tokens: 1024,
|
|
163
|
-
messages: [{ role: 'user', content: 'Hello, Claude!' }]
|
|
164
|
-
});
|
|
165
|
-
t.record(response);
|
|
166
|
-
return response;
|
|
167
|
-
}, { provider: 'anthropic' });
|
|
168
|
-
```
|
|
169
|
-
|
|
170
243
|
### Environment-Based Configuration
|
|
171
244
|
|
|
172
245
|
```typescript
|
|
@@ -176,18 +249,18 @@ const response = await track('claude_agent', async (t) => {
|
|
|
176
249
|
// DEADPIPE_ENVIRONMENT - e.g., 'production', 'staging'
|
|
177
250
|
// DEADPIPE_VERSION or GIT_COMMIT - Version/commit hash
|
|
178
251
|
|
|
179
|
-
import {
|
|
252
|
+
import { wrap } from 'deadpipe';
|
|
180
253
|
|
|
181
254
|
// API key auto-loaded from DEADPIPE_API_KEY
|
|
182
|
-
|
|
183
|
-
// ...
|
|
184
|
-
});
|
|
255
|
+
const client = wrap(new OpenAI(), { promptId: 'my_agent' });
|
|
185
256
|
```
|
|
186
257
|
|
|
187
258
|
### Full Options
|
|
188
259
|
|
|
189
260
|
```typescript
|
|
190
|
-
|
|
261
|
+
const client = wrap(new OpenAI(), {
|
|
262
|
+
promptId: 'checkout_agent',
|
|
263
|
+
|
|
191
264
|
// Authentication
|
|
192
265
|
apiKey: 'dp_...',
|
|
193
266
|
baseUrl: 'https://www.deadpipe.com/api/v1',
|
|
@@ -197,62 +270,50 @@ await track('checkout_agent', fn, {
|
|
|
197
270
|
appId: 'my-app',
|
|
198
271
|
environment: 'production',
|
|
199
272
|
version: '1.2.3',
|
|
200
|
-
provider: 'openai', // or 'anthropic'
|
|
201
273
|
|
|
202
274
|
// Validation
|
|
203
275
|
schema: { validate: (data) => ({ success: true, data }) },
|
|
204
276
|
enumFields: { status: ['pending', 'approved', 'rejected'] },
|
|
205
277
|
numericBounds: { amount: [0, 10000] },
|
|
206
|
-
|
|
207
|
-
// Context (for change detection)
|
|
208
|
-
messages: [...],
|
|
209
|
-
tools: [...],
|
|
210
|
-
systemPrompt: 'You are a helpful assistant...',
|
|
211
278
|
});
|
|
212
279
|
```
|
|
213
280
|
|
|
214
|
-
##
|
|
281
|
+
## Framework Examples
|
|
282
|
+
|
|
283
|
+
### Next.js API Routes
|
|
215
284
|
|
|
216
285
|
```typescript
|
|
217
|
-
import {
|
|
286
|
+
import { wrap } from 'deadpipe';
|
|
218
287
|
import OpenAI from 'openai';
|
|
219
288
|
|
|
220
|
-
const client = new OpenAI();
|
|
289
|
+
const client = wrap(new OpenAI(), { promptId: 'api_handler' });
|
|
221
290
|
|
|
222
291
|
export async function POST(request: Request) {
|
|
223
292
|
const { prompt } = await request.json();
|
|
224
293
|
|
|
225
|
-
const response = await
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
messages: [{ role: 'user', content: prompt }]
|
|
229
|
-
});
|
|
230
|
-
t.record(completion);
|
|
231
|
-
return completion;
|
|
294
|
+
const response = await client.chat.completions.create({
|
|
295
|
+
model: 'gpt-4',
|
|
296
|
+
messages: [{ role: 'user', content: prompt }]
|
|
232
297
|
});
|
|
233
298
|
|
|
234
299
|
return Response.json({ result: response.choices[0].message.content });
|
|
235
300
|
}
|
|
236
301
|
```
|
|
237
302
|
|
|
238
|
-
|
|
303
|
+
### Express.js
|
|
239
304
|
|
|
240
305
|
```typescript
|
|
241
306
|
import express from 'express';
|
|
242
|
-
import {
|
|
307
|
+
import { wrap } from 'deadpipe';
|
|
243
308
|
import OpenAI from 'openai';
|
|
244
309
|
|
|
245
310
|
const app = express();
|
|
246
|
-
const client = new OpenAI();
|
|
311
|
+
const client = wrap(new OpenAI(), { promptId: 'express_endpoint' });
|
|
247
312
|
|
|
248
313
|
app.post('/generate', async (req, res) => {
|
|
249
|
-
const response = await
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
messages: req.body.messages
|
|
253
|
-
});
|
|
254
|
-
t.record(completion);
|
|
255
|
-
return completion;
|
|
314
|
+
const response = await client.chat.completions.create({
|
|
315
|
+
model: 'gpt-4',
|
|
316
|
+
messages: req.body.messages
|
|
256
317
|
});
|
|
257
318
|
|
|
258
319
|
res.json(response);
|
|
@@ -261,42 +322,59 @@ app.post('/generate', async (req, res) => {
|
|
|
261
322
|
|
|
262
323
|
## API Reference
|
|
263
324
|
|
|
325
|
+
### `wrap(client, options)`
|
|
326
|
+
|
|
327
|
+
Universal wrapper that auto-detects provider.
|
|
328
|
+
|
|
329
|
+
- `client`: Any supported LLM client
|
|
330
|
+
- `options.promptId`: Unique identifier for prompts
|
|
331
|
+
|
|
332
|
+
Returns: Wrapped client with identical API
|
|
333
|
+
|
|
334
|
+
### Provider-Specific Wrappers
|
|
335
|
+
|
|
336
|
+
- `wrapOpenAI(client, options)` - OpenAI client
|
|
337
|
+
- `wrapAnthropic(client, options)` - Anthropic client
|
|
338
|
+
- `wrapGoogleAI(client, options)` - Google AI client
|
|
339
|
+
- `wrapMistral(client, options)` - Mistral client
|
|
340
|
+
- `wrapCohere(client, options)` - Cohere client
|
|
341
|
+
|
|
264
342
|
### `track(promptId, fn, options?)`
|
|
265
343
|
|
|
266
344
|
Track a prompt execution with full telemetry.
|
|
267
345
|
|
|
268
346
|
- `promptId`: Unique identifier for this prompt
|
|
269
347
|
- `fn`: Async function that receives a `PromptTracker`
|
|
270
|
-
- `options`: Configuration options
|
|
348
|
+
- `options`: Configuration options
|
|
271
349
|
|
|
272
350
|
Returns: `Promise<T>` (result of fn)
|
|
273
351
|
|
|
274
|
-
### `wrapOpenAI(client, options)`
|
|
275
|
-
|
|
276
|
-
Wrap an OpenAI client to auto-track all completions.
|
|
277
|
-
|
|
278
|
-
- `client`: OpenAI client instance
|
|
279
|
-
- `options.promptId`: Unique identifier for prompts
|
|
280
|
-
|
|
281
|
-
Returns: Wrapped client with identical API
|
|
282
|
-
|
|
283
352
|
### `PromptTracker`
|
|
284
353
|
|
|
285
354
|
The tracker object passed to your function:
|
|
286
355
|
|
|
287
|
-
- `record(response)` - Record the LLM response
|
|
356
|
+
- `record(response, parsedOutput?, input?)` - Record the LLM response
|
|
288
357
|
- `markFirstToken()` - Mark when first token received (streaming)
|
|
289
358
|
- `markRetry()` - Mark a retry attempt
|
|
290
359
|
- `recordError(error)` - Record an error
|
|
291
360
|
- `getTelemetry()` - Get the telemetry object
|
|
292
361
|
|
|
293
|
-
###
|
|
362
|
+
### Utility Functions
|
|
294
363
|
|
|
295
|
-
Estimate USD cost
|
|
364
|
+
- `estimateCost(model, inputTokens, outputTokens)` - Estimate USD cost
|
|
365
|
+
- `detectRefusal(text)` - Detect if response is a refusal
|
|
366
|
+
- `detectProvider(response)` - Detect provider from response
|
|
367
|
+
- `detectClientProvider(client)` - Detect provider from client
|
|
296
368
|
|
|
297
|
-
|
|
369
|
+
## Supported Models & Pricing
|
|
298
370
|
|
|
299
|
-
|
|
371
|
+
| Provider | Models |
|
|
372
|
+
|----------|--------|
|
|
373
|
+
| **OpenAI** | gpt-4, gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-3.5-turbo, o1, o1-mini, o1-pro |
|
|
374
|
+
| **Anthropic** | claude-3-opus, claude-3-sonnet, claude-3-haiku, claude-3.5-sonnet, claude-sonnet-4, claude-opus-4 |
|
|
375
|
+
| **Google AI** | gemini-1.5-pro, gemini-1.5-flash, gemini-2.0-flash, gemini-2.0-pro |
|
|
376
|
+
| **Mistral** | mistral-large, mistral-medium, mistral-small, mistral-nemo, codestral, pixtral |
|
|
377
|
+
| **Cohere** | command-r-plus, command-r, command, command-light |
|
|
300
378
|
|
|
301
379
|
## Zero Dependencies
|
|
302
380
|
|
package/dist/index.d.mts
CHANGED
|
@@ -2,31 +2,35 @@
|
|
|
2
2
|
* Deadpipe - LLM observability that answers one question:
|
|
3
3
|
* "Is this prompt behaving the same as when it was last safe?"
|
|
4
4
|
*
|
|
5
|
-
*
|
|
6
|
-
*
|
|
5
|
+
* Supports: OpenAI, Anthropic, Google AI (Gemini), Mistral, Cohere
|
|
6
|
+
*
|
|
7
|
+
* @example Recommended: Universal wrapper (auto-detects provider)
|
|
8
|
+
* import { wrap } from 'deadpipe';
|
|
7
9
|
* import OpenAI from 'openai';
|
|
10
|
+
* import Anthropic from '@anthropic-ai/sdk';
|
|
8
11
|
*
|
|
9
|
-
*
|
|
12
|
+
* // Works with any supported provider
|
|
13
|
+
* const openai = wrap(new OpenAI(), { promptId: 'checkout_agent' });
|
|
14
|
+
* const anthropic = wrap(new Anthropic(), { promptId: 'support_agent' });
|
|
10
15
|
*
|
|
11
|
-
*
|
|
12
|
-
*
|
|
13
|
-
* model: 'gpt-4',
|
|
14
|
-
* messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
15
|
-
* });
|
|
16
|
-
* t.record(response);
|
|
17
|
-
* return response;
|
|
18
|
-
* });
|
|
16
|
+
* @example Provider-specific wrappers
|
|
17
|
+
* import { wrapOpenAI, wrapAnthropic } from 'deadpipe';
|
|
19
18
|
*
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
-
*
|
|
19
|
+
* const openai = wrapOpenAI(new OpenAI(), { promptId: 'my_agent' });
|
|
20
|
+
* const anthropic = wrapAnthropic(new Anthropic(), { promptId: 'my_agent' });
|
|
21
|
+
*
|
|
22
|
+
* @example Advanced: Manual tracking (for streaming, custom logic, etc.)
|
|
23
|
+
* import { track } from 'deadpipe';
|
|
23
24
|
*
|
|
24
|
-
* const
|
|
25
|
-
*
|
|
26
|
-
*
|
|
25
|
+
* const response = await track('checkout_agent', async (t) => {
|
|
26
|
+
* const response = await client.chat.completions.create(params);
|
|
27
|
+
* t.record(response, undefined, params);
|
|
28
|
+
* return response;
|
|
29
|
+
* });
|
|
27
30
|
*/
|
|
28
|
-
declare const VERSION = "
|
|
31
|
+
declare const VERSION = "3.0.0";
|
|
29
32
|
type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
|
|
33
|
+
type ProviderType = 'openai' | 'anthropic' | 'google' | 'mistral' | 'cohere' | 'unknown';
|
|
30
34
|
interface PromptTelemetry {
|
|
31
35
|
prompt_id: string;
|
|
32
36
|
model?: string;
|
|
@@ -77,17 +81,9 @@ interface TrackOptions {
|
|
|
77
81
|
appId?: string;
|
|
78
82
|
environment?: string;
|
|
79
83
|
version?: string;
|
|
80
|
-
provider?: 'openai' | 'anthropic' | string;
|
|
81
84
|
schema?: SchemaValidator;
|
|
82
85
|
enumFields?: Record<string, unknown[]>;
|
|
83
86
|
numericBounds?: Record<string, [number | null, number | null]>;
|
|
84
|
-
messages?: Array<{
|
|
85
|
-
role: string;
|
|
86
|
-
content: string;
|
|
87
|
-
[key: string]: unknown;
|
|
88
|
-
}>;
|
|
89
|
-
tools?: Array<Record<string, unknown>>;
|
|
90
|
-
systemPrompt?: string;
|
|
91
87
|
}
|
|
92
88
|
interface SchemaValidator {
|
|
93
89
|
validate: (data: unknown) => {
|
|
@@ -96,13 +92,24 @@ interface SchemaValidator {
|
|
|
96
92
|
errors?: string[];
|
|
97
93
|
};
|
|
98
94
|
}
|
|
99
|
-
interface
|
|
95
|
+
interface WrapOptions extends TrackOptions {
|
|
100
96
|
promptId: string;
|
|
101
97
|
}
|
|
98
|
+
type WrapOpenAIOptions = WrapOptions;
|
|
102
99
|
declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
|
|
103
100
|
declare function detectRefusal(text: string): boolean;
|
|
104
101
|
declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
|
|
105
102
|
declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
|
|
103
|
+
/**
|
|
104
|
+
* Auto-detect provider from response object.
|
|
105
|
+
* Checks for provider-specific response structures.
|
|
106
|
+
*/
|
|
107
|
+
declare function detectProvider(response: any): ProviderType;
|
|
108
|
+
/**
|
|
109
|
+
* Auto-detect provider from client object.
|
|
110
|
+
* Checks for provider-specific client structures.
|
|
111
|
+
*/
|
|
112
|
+
declare function detectClientProvider(client: any): ProviderType;
|
|
106
113
|
interface ExtractedResponse {
|
|
107
114
|
model: string;
|
|
108
115
|
content: string;
|
|
@@ -118,6 +125,13 @@ interface ExtractedResponse {
|
|
|
118
125
|
}
|
|
119
126
|
declare function extractOpenAIResponse(response: any): ExtractedResponse;
|
|
120
127
|
declare function extractAnthropicResponse(response: any): ExtractedResponse;
|
|
128
|
+
declare function extractGoogleAIResponse(response: any): ExtractedResponse;
|
|
129
|
+
declare function extractMistralResponse(response: any): ExtractedResponse;
|
|
130
|
+
declare function extractCohereResponse(response: any): ExtractedResponse;
|
|
131
|
+
/**
|
|
132
|
+
* Extract response data based on detected or specified provider.
|
|
133
|
+
*/
|
|
134
|
+
declare function extractResponse(response: any, provider?: ProviderType): ExtractedResponse;
|
|
121
135
|
declare class PromptTracker {
|
|
122
136
|
private promptId;
|
|
123
137
|
private apiKey;
|
|
@@ -126,7 +140,6 @@ declare class PromptTracker {
|
|
|
126
140
|
private appId;
|
|
127
141
|
private environment;
|
|
128
142
|
private versionStr;
|
|
129
|
-
private provider;
|
|
130
143
|
private schema;
|
|
131
144
|
private enumFields;
|
|
132
145
|
private numericBounds;
|
|
@@ -145,23 +158,62 @@ declare class PromptTracker {
|
|
|
145
158
|
start(): void;
|
|
146
159
|
markFirstToken(): void;
|
|
147
160
|
markRetry(): void;
|
|
148
|
-
record(response: any, parsedOutput?: unknown): unknown;
|
|
161
|
+
record(response: any, parsedOutput?: unknown, input?: any): unknown;
|
|
149
162
|
recordError(error: Error): void;
|
|
150
163
|
private send;
|
|
151
164
|
isRecorded(): boolean;
|
|
152
165
|
getTelemetry(): PromptTelemetry;
|
|
153
166
|
}
|
|
154
167
|
declare function track<T>(promptId: string, fn: (tracker: PromptTracker) => Promise<T>, options?: TrackOptions): Promise<T>;
|
|
155
|
-
type
|
|
168
|
+
type AnyClient = any;
|
|
156
169
|
interface TrackedCompletions {
|
|
157
170
|
create: (params: any) => Promise<any>;
|
|
158
171
|
}
|
|
159
172
|
interface TrackedChat {
|
|
160
173
|
completions: TrackedCompletions;
|
|
161
174
|
}
|
|
162
|
-
interface TrackedOpenAIClient extends
|
|
175
|
+
interface TrackedOpenAIClient extends AnyClient {
|
|
163
176
|
chat: TrackedChat;
|
|
164
177
|
}
|
|
165
|
-
declare function wrapOpenAI(client:
|
|
178
|
+
declare function wrapOpenAI(client: AnyClient, options: WrapOptions): TrackedOpenAIClient;
|
|
179
|
+
interface TrackedMessages {
|
|
180
|
+
create: (params: any) => Promise<any>;
|
|
181
|
+
}
|
|
182
|
+
interface TrackedAnthropicClient extends AnyClient {
|
|
183
|
+
messages: TrackedMessages;
|
|
184
|
+
}
|
|
185
|
+
declare function wrapAnthropic(client: AnyClient, options: WrapOptions): TrackedAnthropicClient;
|
|
186
|
+
interface TrackedGenerativeModel {
|
|
187
|
+
generateContent: (params: any) => Promise<any>;
|
|
188
|
+
startChat: (params?: any) => any;
|
|
189
|
+
}
|
|
190
|
+
interface TrackedGoogleAIClient extends AnyClient {
|
|
191
|
+
getGenerativeModel: (params: any) => TrackedGenerativeModel;
|
|
192
|
+
}
|
|
193
|
+
declare function wrapGoogleAI(client: AnyClient, options: WrapOptions): TrackedGoogleAIClient;
|
|
194
|
+
interface TrackedMistralChat {
|
|
195
|
+
complete: (params: any) => Promise<any>;
|
|
196
|
+
}
|
|
197
|
+
interface TrackedMistralClient extends AnyClient {
|
|
198
|
+
chat: TrackedMistralChat;
|
|
199
|
+
}
|
|
200
|
+
declare function wrapMistral(client: AnyClient, options: WrapOptions): TrackedMistralClient;
|
|
201
|
+
interface TrackedCohereClient extends AnyClient {
|
|
202
|
+
chat: (params: any) => Promise<any>;
|
|
203
|
+
generate?: (params: any) => Promise<any>;
|
|
204
|
+
}
|
|
205
|
+
declare function wrapCohere(client: AnyClient, options: WrapOptions): TrackedCohereClient;
|
|
206
|
+
/**
|
|
207
|
+
* Universal wrapper that auto-detects the provider and wraps appropriately.
|
|
208
|
+
*
|
|
209
|
+
* @example
|
|
210
|
+
* import { wrap } from 'deadpipe';
|
|
211
|
+
* import OpenAI from 'openai';
|
|
212
|
+
* import Anthropic from '@anthropic-ai/sdk';
|
|
213
|
+
*
|
|
214
|
+
* const openai = wrap(new OpenAI(), { promptId: 'my_agent' });
|
|
215
|
+
* const anthropic = wrap(new Anthropic(), { promptId: 'my_agent' });
|
|
216
|
+
*/
|
|
217
|
+
declare function wrap(client: AnyClient, options: WrapOptions): AnyClient;
|
|
166
218
|
|
|
167
|
-
export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
|
|
219
|
+
export { type PromptTelemetry, PromptTracker, type ProviderType, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, type WrapOptions, wrap as default, detectClientProvider, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractCohereResponse, extractGoogleAIResponse, extractMistralResponse, extractOpenAIResponse, extractResponse, track, validateEnumBounds, validateNumericBounds, wrap, wrapAnthropic, wrap as wrapClient, wrapCohere, wrapGoogleAI, wrapMistral, wrapOpenAI };
|