deadpipe 1.0.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +29 -0
- package/README.md +221 -89
- package/dist/index.d.mts +144 -87
- package/dist/index.d.ts +144 -87
- package/dist/index.js +471 -114
- package/dist/index.mjs +461 -111
- package/package.json +14 -11
package/LICENSE
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
Deadpipe SDK License
|
|
2
|
+
|
|
3
|
+
Copyright 2024 Deadpipe
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to use
|
|
7
|
+
the Software solely for the purpose of integrating with the Deadpipe monitoring
|
|
8
|
+
service (https://deadpipe.com), subject to the following conditions:
|
|
9
|
+
|
|
10
|
+
1. The Software may only be used to send data to and receive data from the
|
|
11
|
+
official Deadpipe service.
|
|
12
|
+
|
|
13
|
+
2. The Software may not be modified, distributed, sublicensed, or used in any
|
|
14
|
+
commercial product or service without explicit written permission from
|
|
15
|
+
Deadpipe.
|
|
16
|
+
|
|
17
|
+
3. The Software may not be used to create competing monitoring services or
|
|
18
|
+
products.
|
|
19
|
+
|
|
20
|
+
4. The above copyright notice and this permission notice shall be included in
|
|
21
|
+
all copies or substantial portions of the Software.
|
|
22
|
+
|
|
23
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
24
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
25
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
26
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
27
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
28
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
29
|
+
SOFTWARE.
|
package/README.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Deadpipe Node.js SDK
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
LLM observability that answers one question: **"Is this prompt behaving the same as when it was last safe?"**
|
|
4
4
|
|
|
5
5
|
## Installation
|
|
6
6
|
|
|
@@ -14,160 +14,293 @@ pnpm add deadpipe
|
|
|
14
14
|
|
|
15
15
|
## Quick Start
|
|
16
16
|
|
|
17
|
-
### Option 1:
|
|
17
|
+
### Option 1: Context Manager (Recommended)
|
|
18
18
|
|
|
19
19
|
```typescript
|
|
20
|
-
import {
|
|
20
|
+
import { track } from 'deadpipe';
|
|
21
|
+
import OpenAI from 'openai';
|
|
21
22
|
|
|
22
|
-
const
|
|
23
|
+
const client = new OpenAI();
|
|
23
24
|
|
|
24
|
-
|
|
25
|
-
await
|
|
26
|
-
|
|
27
|
-
|
|
25
|
+
const response = await track('checkout_agent', async (t) => {
|
|
26
|
+
const response = await client.chat.completions.create({
|
|
27
|
+
model: 'gpt-4',
|
|
28
|
+
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
29
|
+
});
|
|
30
|
+
t.record(response);
|
|
31
|
+
return response;
|
|
28
32
|
});
|
|
29
|
-
|
|
30
|
-
// Deadpipe automatically sends success/failed heartbeat when done
|
|
31
33
|
```
|
|
32
34
|
|
|
33
|
-
### Option 2:
|
|
35
|
+
### Option 2: Auto-Wrapping (Zero Code Changes)
|
|
34
36
|
|
|
35
37
|
```typescript
|
|
36
|
-
import {
|
|
38
|
+
import { wrapOpenAI } from 'deadpipe';
|
|
39
|
+
import OpenAI from 'openai';
|
|
37
40
|
|
|
38
|
-
const
|
|
41
|
+
const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
39
42
|
|
|
40
|
-
//
|
|
41
|
-
const
|
|
42
|
-
|
|
43
|
+
// All calls automatically tracked
|
|
44
|
+
const response = await client.chat.completions.create({
|
|
45
|
+
model: 'gpt-4',
|
|
46
|
+
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
43
47
|
});
|
|
44
|
-
|
|
45
|
-
// Call it later
|
|
46
|
-
await myPipeline();
|
|
47
48
|
```
|
|
48
49
|
|
|
49
|
-
### Option 3:
|
|
50
|
+
### Option 3: With Schema Validation (Zod)
|
|
50
51
|
|
|
51
52
|
```typescript
|
|
52
|
-
import {
|
|
53
|
+
import { track } from 'deadpipe';
|
|
54
|
+
import { z } from 'zod';
|
|
55
|
+
import OpenAI from 'openai';
|
|
56
|
+
|
|
57
|
+
const RefundResponse = z.object({
|
|
58
|
+
order_id: z.string(),
|
|
59
|
+
amount: z.number(),
|
|
60
|
+
status: z.string(),
|
|
61
|
+
});
|
|
53
62
|
|
|
54
|
-
const
|
|
63
|
+
const client = new OpenAI();
|
|
55
64
|
|
|
56
|
-
|
|
57
|
-
await
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
65
|
+
const result = await track('checkout_agent', async (t) => {
|
|
66
|
+
const response = await client.chat.completions.create({
|
|
67
|
+
model: 'gpt-4',
|
|
68
|
+
messages: [{ role: 'user', content: 'Process refund for order 1938' }],
|
|
69
|
+
response_format: { type: 'json_object' }
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
return t.record(response);
|
|
73
|
+
}, {
|
|
74
|
+
schema: {
|
|
75
|
+
validate: (data) => {
|
|
76
|
+
const result = RefundResponse.safeParse(data);
|
|
77
|
+
return {
|
|
78
|
+
success: result.success,
|
|
79
|
+
data: result.success ? result.data : undefined,
|
|
80
|
+
errors: result.success ? undefined : result.error.errors.map(e => e.message)
|
|
81
|
+
};
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
});
|
|
85
|
+
// result is typed as RefundResponse | null
|
|
63
86
|
```
|
|
64
87
|
|
|
65
|
-
|
|
88
|
+
## What Gets Tracked
|
|
89
|
+
|
|
90
|
+
Every prompt execution captures:
|
|
91
|
+
|
|
92
|
+
| Category | Metrics |
|
|
93
|
+
|----------|---------|
|
|
94
|
+
| **Identity** | prompt_id, model, provider, app_id, environment, version |
|
|
95
|
+
| **Timing** | request_start, first_token_time, total_latency |
|
|
96
|
+
| **Volume** | input_tokens, output_tokens, estimated_cost_usd |
|
|
97
|
+
| **Reliability** | http_status, timeout, retry_count, error_message |
|
|
98
|
+
| **Output Integrity** | output_length, empty_output, truncated, json_parse_success, schema_validation_pass |
|
|
99
|
+
| **Behavioral Fingerprint** | output_hash, refusal_flag, tool_calls_count |
|
|
100
|
+
| **Safety Proxies** | enum_out_of_range, numeric_out_of_bounds |
|
|
101
|
+
| **Change Context** | prompt_hash, tool_schema_hash, system_prompt_hash |
|
|
102
|
+
|
|
103
|
+
## Advanced Usage
|
|
66
104
|
|
|
67
|
-
|
|
105
|
+
### Track Streaming Responses
|
|
68
106
|
|
|
69
107
|
```typescript
|
|
70
|
-
|
|
108
|
+
const response = await track('streaming_agent', async (t) => {
|
|
109
|
+
const stream = await client.chat.completions.create({
|
|
110
|
+
model: 'gpt-4',
|
|
111
|
+
messages: [{ role: 'user', content: 'Tell me a story' }],
|
|
112
|
+
stream: true,
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
let fullContent = '';
|
|
116
|
+
for await (const chunk of stream) {
|
|
117
|
+
if (chunk.choices[0]?.delta?.content) {
|
|
118
|
+
t.markFirstToken(); // Call once on first token
|
|
119
|
+
fullContent += chunk.choices[0].delta.content;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Record manually for streams
|
|
124
|
+
t.record({
|
|
125
|
+
model: 'gpt-4',
|
|
126
|
+
choices: [{ message: { content: fullContent } }],
|
|
127
|
+
usage: { prompt_tokens: 10, completion_tokens: 100, total_tokens: 110 }
|
|
128
|
+
});
|
|
71
129
|
|
|
72
|
-
|
|
73
|
-
await run('my-pipeline', async () => {
|
|
74
|
-
await doWork();
|
|
130
|
+
return fullContent;
|
|
75
131
|
});
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
### Track Retries
|
|
135
|
+
|
|
136
|
+
```typescript
|
|
137
|
+
const response = await track('checkout_agent', async (t) => {
|
|
138
|
+
for (let attempt = 0; attempt < 3; attempt++) {
|
|
139
|
+
try {
|
|
140
|
+
const response = await client.chat.completions.create({...});
|
|
141
|
+
t.record(response);
|
|
142
|
+
return response;
|
|
143
|
+
} catch (error) {
|
|
144
|
+
t.markRetry();
|
|
145
|
+
if (attempt === 2) throw error;
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
});
|
|
149
|
+
```
|
|
76
150
|
|
|
77
|
-
|
|
78
|
-
|
|
151
|
+
### With Anthropic
|
|
152
|
+
|
|
153
|
+
```typescript
|
|
154
|
+
import { track } from 'deadpipe';
|
|
155
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
156
|
+
|
|
157
|
+
const client = new Anthropic();
|
|
158
|
+
|
|
159
|
+
const response = await track('claude_agent', async (t) => {
|
|
160
|
+
const response = await client.messages.create({
|
|
161
|
+
model: 'claude-3-sonnet-20240229',
|
|
162
|
+
max_tokens: 1024,
|
|
163
|
+
messages: [{ role: 'user', content: 'Hello, Claude!' }]
|
|
164
|
+
});
|
|
165
|
+
t.record(response);
|
|
166
|
+
return response;
|
|
167
|
+
}, { provider: 'anthropic' });
|
|
79
168
|
```
|
|
80
169
|
|
|
81
|
-
|
|
170
|
+
### Environment-Based Configuration
|
|
82
171
|
|
|
83
172
|
```typescript
|
|
84
|
-
|
|
173
|
+
// Uses these environment variables:
|
|
174
|
+
// DEADPIPE_API_KEY - Your API key
|
|
175
|
+
// DEADPIPE_APP_ID - Application identifier
|
|
176
|
+
// DEADPIPE_ENVIRONMENT - e.g., 'production', 'staging'
|
|
177
|
+
// DEADPIPE_VERSION or GIT_COMMIT - Version/commit hash
|
|
85
178
|
|
|
86
|
-
|
|
179
|
+
import { track } from 'deadpipe';
|
|
87
180
|
|
|
88
|
-
//
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
181
|
+
// API key auto-loaded from DEADPIPE_API_KEY
|
|
182
|
+
await track('my_prompt', async (t) => {
|
|
183
|
+
// ...
|
|
184
|
+
});
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### Full Options
|
|
188
|
+
|
|
189
|
+
```typescript
|
|
190
|
+
await track('checkout_agent', fn, {
|
|
191
|
+
// Authentication
|
|
192
|
+
apiKey: 'dp_...',
|
|
193
|
+
baseUrl: 'https://www.deadpipe.com/api/v1',
|
|
194
|
+
timeout: 10000,
|
|
195
|
+
|
|
196
|
+
// Identity
|
|
197
|
+
appId: 'my-app',
|
|
198
|
+
environment: 'production',
|
|
199
|
+
version: '1.2.3',
|
|
200
|
+
provider: 'openai', // or 'anthropic'
|
|
201
|
+
|
|
202
|
+
// Validation
|
|
203
|
+
schema: { validate: (data) => ({ success: true, data }) },
|
|
204
|
+
enumFields: { status: ['pending', 'approved', 'rejected'] },
|
|
205
|
+
numericBounds: { amount: [0, 10000] },
|
|
206
|
+
|
|
207
|
+
// Context (for change detection)
|
|
208
|
+
messages: [...],
|
|
209
|
+
tools: [...],
|
|
210
|
+
systemPrompt: 'You are a helpful assistant...',
|
|
93
211
|
});
|
|
94
212
|
```
|
|
95
213
|
|
|
96
214
|
## Next.js API Routes
|
|
97
215
|
|
|
98
216
|
```typescript
|
|
99
|
-
import {
|
|
217
|
+
import { track } from 'deadpipe';
|
|
218
|
+
import OpenAI from 'openai';
|
|
100
219
|
|
|
101
|
-
const
|
|
220
|
+
const client = new OpenAI();
|
|
102
221
|
|
|
103
222
|
export async function POST(request: Request) {
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
223
|
+
const { prompt } = await request.json();
|
|
224
|
+
|
|
225
|
+
const response = await track('api_handler', async (t) => {
|
|
226
|
+
const completion = await client.chat.completions.create({
|
|
227
|
+
model: 'gpt-4',
|
|
228
|
+
messages: [{ role: 'user', content: prompt }]
|
|
229
|
+
});
|
|
230
|
+
t.record(completion);
|
|
231
|
+
return completion;
|
|
108
232
|
});
|
|
233
|
+
|
|
234
|
+
return Response.json({ result: response.choices[0].message.content });
|
|
109
235
|
}
|
|
110
236
|
```
|
|
111
237
|
|
|
112
|
-
##
|
|
113
|
-
|
|
114
|
-
```yaml
|
|
115
|
-
- name: Run ETL
|
|
116
|
-
env:
|
|
117
|
-
DEADPIPE_API_KEY: ${{ secrets.DEADPIPE_API_KEY }}
|
|
118
|
-
run: node scripts/etl.js
|
|
119
|
-
```
|
|
238
|
+
## Express.js
|
|
120
239
|
|
|
121
|
-
```
|
|
122
|
-
|
|
123
|
-
import {
|
|
240
|
+
```typescript
|
|
241
|
+
import express from 'express';
|
|
242
|
+
import { track } from 'deadpipe';
|
|
243
|
+
import OpenAI from 'openai';
|
|
244
|
+
|
|
245
|
+
const app = express();
|
|
246
|
+
const client = new OpenAI();
|
|
247
|
+
|
|
248
|
+
app.post('/generate', async (req, res) => {
|
|
249
|
+
const response = await track('express_endpoint', async (t) => {
|
|
250
|
+
const completion = await client.chat.completions.create({
|
|
251
|
+
model: 'gpt-4',
|
|
252
|
+
messages: req.body.messages
|
|
253
|
+
});
|
|
254
|
+
t.record(completion);
|
|
255
|
+
return completion;
|
|
256
|
+
});
|
|
124
257
|
|
|
125
|
-
|
|
126
|
-
// Your ETL code
|
|
258
|
+
res.json(response);
|
|
127
259
|
});
|
|
128
260
|
```
|
|
129
261
|
|
|
130
262
|
## API Reference
|
|
131
263
|
|
|
132
|
-
### `
|
|
264
|
+
### `track(promptId, fn, options?)`
|
|
133
265
|
|
|
134
|
-
|
|
266
|
+
Track a prompt execution with full telemetry.
|
|
135
267
|
|
|
136
|
-
- `
|
|
137
|
-
- `
|
|
138
|
-
- `options
|
|
268
|
+
- `promptId`: Unique identifier for this prompt
|
|
269
|
+
- `fn`: Async function that receives a `PromptTracker`
|
|
270
|
+
- `options`: Configuration options (see above)
|
|
139
271
|
|
|
140
|
-
|
|
272
|
+
Returns: `Promise<T>` (result of fn)
|
|
141
273
|
|
|
142
|
-
|
|
274
|
+
### `wrapOpenAI(client, options)`
|
|
143
275
|
|
|
144
|
-
|
|
145
|
-
- `options.status`: `'success'` or `'failed'` (default: `'success'`)
|
|
146
|
-
- `options.durationMs`: How long the run took
|
|
147
|
-
- `options.recordsProcessed`: Number of records
|
|
148
|
-
- `options.appName`: Group pipelines under an app
|
|
276
|
+
Wrap an OpenAI client to auto-track all completions.
|
|
149
277
|
|
|
150
|
-
|
|
278
|
+
- `client`: OpenAI client instance
|
|
279
|
+
- `options.promptId`: Unique identifier for prompts
|
|
151
280
|
|
|
152
|
-
|
|
281
|
+
Returns: Wrapped client with identical API
|
|
153
282
|
|
|
154
|
-
|
|
283
|
+
### `PromptTracker`
|
|
155
284
|
|
|
156
|
-
|
|
157
|
-
- `fn`: Async function to run
|
|
158
|
-
- `options.appName`: Group pipelines under an app
|
|
285
|
+
The tracker object passed to your function:
|
|
159
286
|
|
|
160
|
-
|
|
287
|
+
- `record(response)` - Record the LLM response
|
|
288
|
+
- `markFirstToken()` - Mark when first token received (streaming)
|
|
289
|
+
- `markRetry()` - Mark a retry attempt
|
|
290
|
+
- `recordError(error)` - Record an error
|
|
291
|
+
- `getTelemetry()` - Get the telemetry object
|
|
161
292
|
|
|
162
|
-
### `
|
|
293
|
+
### `estimateCost(model, inputTokens, outputTokens)`
|
|
163
294
|
|
|
164
|
-
|
|
295
|
+
Estimate USD cost for a completion.
|
|
165
296
|
|
|
166
|
-
|
|
297
|
+
### `detectRefusal(text)`
|
|
298
|
+
|
|
299
|
+
Detect if response is a refusal/decline.
|
|
167
300
|
|
|
168
301
|
## Zero Dependencies
|
|
169
302
|
|
|
170
|
-
This SDK has zero runtime dependencies.
|
|
303
|
+
This SDK has zero runtime dependencies. Uses native `fetch` (Node 18+).
|
|
171
304
|
|
|
172
305
|
## TypeScript
|
|
173
306
|
|
|
@@ -175,5 +308,4 @@ Full TypeScript support with type definitions included.
|
|
|
175
308
|
|
|
176
309
|
## License
|
|
177
310
|
|
|
178
|
-
|
|
179
|
-
|
|
311
|
+
Deadpipe SDK License - see [LICENSE](LICENSE) file.
|
package/dist/index.d.mts
CHANGED
|
@@ -1,105 +1,162 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Deadpipe -
|
|
2
|
+
* Deadpipe - LLM observability that answers one question:
|
|
3
|
+
* "Is this prompt behaving the same as when it was last safe?"
|
|
3
4
|
*
|
|
4
5
|
* @example
|
|
5
|
-
* import {
|
|
6
|
+
* import { track } from 'deadpipe';
|
|
7
|
+
* import OpenAI from 'openai';
|
|
6
8
|
*
|
|
7
|
-
* const
|
|
9
|
+
* const client = new OpenAI();
|
|
8
10
|
*
|
|
9
|
-
*
|
|
10
|
-
* await
|
|
11
|
-
*
|
|
11
|
+
* const { response, tracker } = await track('checkout_agent', async (t) => {
|
|
12
|
+
* const response = await client.chat.completions.create({
|
|
13
|
+
* model: 'gpt-4',
|
|
14
|
+
* messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
15
|
+
* });
|
|
16
|
+
* t.record(response);
|
|
17
|
+
* return response;
|
|
12
18
|
* });
|
|
13
19
|
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
20
|
+
* @example Auto-wrapping (zero code changes):
|
|
21
|
+
* import { wrapOpenAI } from 'deadpipe';
|
|
22
|
+
* import OpenAI from 'openai';
|
|
23
|
+
*
|
|
24
|
+
* const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
25
|
+
* // All calls automatically tracked
|
|
26
|
+
* const response = await client.chat.completions.create(...);
|
|
16
27
|
*/
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
28
|
+
declare const VERSION = "2.0.0";
|
|
29
|
+
type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
|
|
30
|
+
interface PromptTelemetry {
|
|
31
|
+
prompt_id: string;
|
|
32
|
+
model?: string;
|
|
33
|
+
provider?: string;
|
|
34
|
+
app_id?: string;
|
|
35
|
+
environment?: string;
|
|
36
|
+
version?: string;
|
|
37
|
+
request_start?: string;
|
|
38
|
+
first_token_time?: number;
|
|
39
|
+
end_time?: string;
|
|
40
|
+
total_latency?: number;
|
|
41
|
+
input_tokens?: number;
|
|
42
|
+
output_tokens?: number;
|
|
43
|
+
total_tokens?: number;
|
|
44
|
+
estimated_cost_usd?: number;
|
|
45
|
+
http_status?: number;
|
|
46
|
+
timeout?: boolean;
|
|
47
|
+
retry_count?: number;
|
|
48
|
+
provider_error_code?: string;
|
|
49
|
+
error_message?: string;
|
|
50
|
+
output_length?: number;
|
|
51
|
+
empty_output?: boolean;
|
|
52
|
+
truncated?: boolean;
|
|
53
|
+
json_parse_success?: boolean;
|
|
54
|
+
schema_validation_pass?: boolean;
|
|
55
|
+
missing_required_fields?: string;
|
|
56
|
+
output_hash?: string;
|
|
57
|
+
output_embedding?: string;
|
|
58
|
+
top_logprob_mean?: number;
|
|
59
|
+
refusal_flag?: boolean;
|
|
60
|
+
tool_call_flag?: boolean;
|
|
61
|
+
tool_calls_count?: number;
|
|
62
|
+
enum_out_of_range?: boolean;
|
|
63
|
+
numeric_out_of_bounds?: boolean;
|
|
64
|
+
hallucination_flags?: string;
|
|
65
|
+
prompt_hash?: string;
|
|
66
|
+
tool_schema_hash?: string;
|
|
67
|
+
system_prompt_hash?: string;
|
|
68
|
+
status?: StatusType;
|
|
23
69
|
}
|
|
24
|
-
interface
|
|
70
|
+
interface TrackOptions {
|
|
25
71
|
apiKey?: string;
|
|
26
72
|
baseUrl?: string;
|
|
27
73
|
timeout?: number;
|
|
74
|
+
appId?: string;
|
|
75
|
+
environment?: string;
|
|
76
|
+
version?: string;
|
|
77
|
+
provider?: 'openai' | 'anthropic' | string;
|
|
78
|
+
schema?: SchemaValidator;
|
|
79
|
+
enumFields?: Record<string, unknown[]>;
|
|
80
|
+
numericBounds?: Record<string, [number | null, number | null]>;
|
|
81
|
+
messages?: Array<{
|
|
82
|
+
role: string;
|
|
83
|
+
content: string;
|
|
84
|
+
[key: string]: unknown;
|
|
85
|
+
}>;
|
|
86
|
+
tools?: Array<Record<string, unknown>>;
|
|
87
|
+
systemPrompt?: string;
|
|
88
|
+
}
|
|
89
|
+
interface SchemaValidator {
|
|
90
|
+
validate: (data: unknown) => {
|
|
91
|
+
success: boolean;
|
|
92
|
+
data?: unknown;
|
|
93
|
+
errors?: string[];
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
interface WrapOpenAIOptions extends Omit<TrackOptions, 'messages' | 'tools' | 'systemPrompt'> {
|
|
97
|
+
promptId: string;
|
|
28
98
|
}
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
99
|
+
declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
|
|
100
|
+
declare function detectRefusal(text: string): boolean;
|
|
101
|
+
declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
|
|
102
|
+
declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
|
|
103
|
+
interface ExtractedResponse {
|
|
104
|
+
model: string;
|
|
105
|
+
content: string;
|
|
106
|
+
inputTokens: number | null;
|
|
107
|
+
outputTokens: number | null;
|
|
108
|
+
totalTokens: number | null;
|
|
109
|
+
finishReason: string | null;
|
|
110
|
+
toolCalls: Array<{
|
|
111
|
+
name: string;
|
|
112
|
+
arguments: string;
|
|
113
|
+
}>;
|
|
114
|
+
logprobs: unknown;
|
|
37
115
|
}
|
|
38
|
-
declare
|
|
116
|
+
declare function extractOpenAIResponse(response: any): ExtractedResponse;
|
|
117
|
+
declare function extractAnthropicResponse(response: any): ExtractedResponse;
|
|
118
|
+
declare class PromptTracker {
|
|
119
|
+
private promptId;
|
|
39
120
|
private apiKey;
|
|
40
121
|
private baseUrl;
|
|
41
|
-
private
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
* const records = await processData();
|
|
68
|
-
* return { recordsProcessed: records.length };
|
|
69
|
-
* });
|
|
70
|
-
*/
|
|
71
|
-
run<T>(pipelineId: string, fn: () => T | Promise<T>, options?: {
|
|
72
|
-
appName?: string;
|
|
73
|
-
}): Promise<T>;
|
|
74
|
-
/**
|
|
75
|
-
* Create a wrapper function that auto-sends heartbeats.
|
|
76
|
-
*
|
|
77
|
-
* @param pipelineId - Unique identifier for this pipeline.
|
|
78
|
-
* @param fn - The function to wrap.
|
|
79
|
-
* @param options - Additional options.
|
|
80
|
-
* @returns A wrapped function.
|
|
81
|
-
*
|
|
82
|
-
* @example
|
|
83
|
-
* const myPipeline = dp.wrap('daily-etl', async () => {
|
|
84
|
-
* await processData();
|
|
85
|
-
* });
|
|
86
|
-
*
|
|
87
|
-
* // Later...
|
|
88
|
-
* await myPipeline();
|
|
89
|
-
*/
|
|
90
|
-
wrap<T extends (...args: unknown[]) => unknown>(pipelineId: string, fn: T, options?: {
|
|
91
|
-
appName?: string;
|
|
92
|
-
}): (...args: Parameters<T>) => Promise<Awaited<ReturnType<T>>>;
|
|
122
|
+
private timeoutMs;
|
|
123
|
+
private appId;
|
|
124
|
+
private environment;
|
|
125
|
+
private versionStr;
|
|
126
|
+
private provider;
|
|
127
|
+
private schema;
|
|
128
|
+
private enumFields;
|
|
129
|
+
private numericBounds;
|
|
130
|
+
private promptHash;
|
|
131
|
+
private toolSchemaHash;
|
|
132
|
+
private systemPromptHash;
|
|
133
|
+
private startTime;
|
|
134
|
+
private firstTokenTime;
|
|
135
|
+
private endTime;
|
|
136
|
+
private telemetry;
|
|
137
|
+
private recorded;
|
|
138
|
+
private retryCount;
|
|
139
|
+
constructor(promptId: string, options?: TrackOptions);
|
|
140
|
+
start(): void;
|
|
141
|
+
markFirstToken(): void;
|
|
142
|
+
markRetry(): void;
|
|
143
|
+
record(response: any, parsedOutput?: unknown): unknown;
|
|
144
|
+
recordError(error: Error): void;
|
|
145
|
+
private send;
|
|
146
|
+
isRecorded(): boolean;
|
|
147
|
+
getTelemetry(): PromptTelemetry;
|
|
93
148
|
}
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
149
|
+
declare function track<T>(promptId: string, fn: (tracker: PromptTracker) => Promise<T>, options?: TrackOptions): Promise<T>;
|
|
150
|
+
type OpenAIClient = any;
|
|
151
|
+
interface TrackedCompletions {
|
|
152
|
+
create: (params: any) => Promise<any>;
|
|
153
|
+
}
|
|
154
|
+
interface TrackedChat {
|
|
155
|
+
completions: TrackedCompletions;
|
|
156
|
+
}
|
|
157
|
+
interface TrackedOpenAIClient extends OpenAIClient {
|
|
158
|
+
chat: TrackedChat;
|
|
159
|
+
}
|
|
160
|
+
declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
|
|
104
161
|
|
|
105
|
-
export {
|
|
162
|
+
export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
|