deadpipe 1.0.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,29 @@
1
+ Deadpipe SDK License
2
+
3
+ Copyright 2024 Deadpipe
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to use
7
+ the Software solely for the purpose of integrating with the Deadpipe monitoring
8
+ service (https://deadpipe.com), subject to the following conditions:
9
+
10
+ 1. The Software may only be used to send data to and receive data from the
11
+ official Deadpipe service.
12
+
13
+ 2. The Software may not be modified, distributed, sublicensed, or used in any
14
+ commercial product or service without explicit written permission from
15
+ Deadpipe.
16
+
17
+ 3. The Software may not be used to create competing monitoring services or
18
+ products.
19
+
20
+ 4. The above copyright notice and this permission notice shall be included in
21
+ all copies or substantial portions of the Software.
22
+
23
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
+ SOFTWARE.
package/README.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Deadpipe Node.js SDK
2
2
 
3
- Dead simple pipeline monitoring. Know when your pipelines die.
3
+ LLM observability that answers one question: **"Is this prompt behaving the same as when it was last safe?"**
4
4
 
5
5
  ## Installation
6
6
 
@@ -14,160 +14,293 @@ pnpm add deadpipe
14
14
 
15
15
  ## Quick Start
16
16
 
17
- ### Option 1: Wrapper (Recommended)
17
+ ### Option 1: Context Manager (Recommended)
18
18
 
19
19
  ```typescript
20
- import { Deadpipe } from 'deadpipe';
20
+ import { track } from 'deadpipe';
21
+ import OpenAI from 'openai';
21
22
 
22
- const dp = new Deadpipe('your-api-key');
23
+ const client = new OpenAI();
23
24
 
24
- // Wrap any async function
25
- await dp.run('daily-sales-etl', async () => {
26
- await processData();
27
- return { recordsProcessed: 1500 }; // Optional: track records
25
+ const response = await track('checkout_agent', async (t) => {
26
+ const response = await client.chat.completions.create({
27
+ model: 'gpt-4',
28
+ messages: [{ role: 'user', content: 'Process refund for order 1938' }]
29
+ });
30
+ t.record(response);
31
+ return response;
28
32
  });
29
-
30
- // Deadpipe automatically sends success/failed heartbeat when done
31
33
  ```
32
34
 
33
- ### Option 2: Create a Wrapped Function
35
+ ### Option 2: Auto-Wrapping (Zero Code Changes)
34
36
 
35
37
  ```typescript
36
- import { Deadpipe } from 'deadpipe';
38
+ import { wrapOpenAI } from 'deadpipe';
39
+ import OpenAI from 'openai';
37
40
 
38
- const dp = new Deadpipe('your-api-key');
41
+ const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
39
42
 
40
- // Create a wrapped version of your function
41
- const myPipeline = dp.wrap('hourly-sync', async () => {
42
- await syncData();
43
+ // All calls automatically tracked
44
+ const response = await client.chat.completions.create({
45
+ model: 'gpt-4',
46
+ messages: [{ role: 'user', content: 'Process refund for order 1938' }]
43
47
  });
44
-
45
- // Call it later
46
- await myPipeline();
47
48
  ```
48
49
 
49
- ### Option 3: Manual Ping
50
+ ### Option 3: With Schema Validation (Zod)
50
51
 
51
52
  ```typescript
52
- import { Deadpipe } from 'deadpipe';
53
+ import { track } from 'deadpipe';
54
+ import { z } from 'zod';
55
+ import OpenAI from 'openai';
56
+
57
+ const RefundResponse = z.object({
58
+ order_id: z.string(),
59
+ amount: z.number(),
60
+ status: z.string(),
61
+ });
53
62
 
54
- const dp = new Deadpipe('your-api-key');
63
+ const client = new OpenAI();
55
64
 
56
- try {
57
- await runMyJob();
58
- await dp.ping('my-job', { status: 'success', recordsProcessed: 1000 });
59
- } catch (error) {
60
- await dp.ping('my-job', { status: 'failed' });
61
- throw error;
62
- }
65
+ const result = await track('checkout_agent', async (t) => {
66
+ const response = await client.chat.completions.create({
67
+ model: 'gpt-4',
68
+ messages: [{ role: 'user', content: 'Process refund for order 1938' }],
69
+ response_format: { type: 'json_object' }
70
+ });
71
+
72
+ return t.record(response);
73
+ }, {
74
+ schema: {
75
+ validate: (data) => {
76
+ const result = RefundResponse.safeParse(data);
77
+ return {
78
+ success: result.success,
79
+ data: result.success ? result.data : undefined,
80
+ errors: result.success ? undefined : result.error.errors.map(e => e.message)
81
+ };
82
+ }
83
+ }
84
+ });
85
+ // result is typed as RefundResponse | null
63
86
  ```
64
87
 
65
- ### Option 4: Environment Variable
88
+ ## What Gets Tracked
89
+
90
+ Every prompt execution captures:
91
+
92
+ | Category | Metrics |
93
+ |----------|---------|
94
+ | **Identity** | prompt_id, model, provider, app_id, environment, version |
95
+ | **Timing** | request_start, first_token_time, total_latency |
96
+ | **Volume** | input_tokens, output_tokens, estimated_cost_usd |
97
+ | **Reliability** | http_status, timeout, retry_count, error_message |
98
+ | **Output Integrity** | output_length, empty_output, truncated, json_parse_success, schema_validation_pass |
99
+ | **Behavioral Fingerprint** | output_hash, refusal_flag, tool_calls_count |
100
+ | **Safety Proxies** | enum_out_of_range, numeric_out_of_bounds |
101
+ | **Change Context** | prompt_hash, tool_schema_hash, system_prompt_hash |
102
+
103
+ ## Advanced Usage
66
104
 
67
- Set `DEADPIPE_API_KEY` and use module-level functions:
105
+ ### Track Streaming Responses
68
106
 
69
107
  ```typescript
70
- import { run, ping } from 'deadpipe';
108
+ const response = await track('streaming_agent', async (t) => {
109
+ const stream = await client.chat.completions.create({
110
+ model: 'gpt-4',
111
+ messages: [{ role: 'user', content: 'Tell me a story' }],
112
+ stream: true,
113
+ });
114
+
115
+ let fullContent = '';
116
+ for await (const chunk of stream) {
117
+ if (chunk.choices[0]?.delta?.content) {
118
+ t.markFirstToken(); // Call once on first token
119
+ fullContent += chunk.choices[0].delta.content;
120
+ }
121
+ }
122
+
123
+ // Record manually for streams
124
+ t.record({
125
+ model: 'gpt-4',
126
+ choices: [{ message: { content: fullContent } }],
127
+ usage: { prompt_tokens: 10, completion_tokens: 100, total_tokens: 110 }
128
+ });
71
129
 
72
- // Uses DEADPIPE_API_KEY from environment
73
- await run('my-pipeline', async () => {
74
- await doWork();
130
+ return fullContent;
75
131
  });
132
+ ```
133
+
134
+ ### Track Retries
135
+
136
+ ```typescript
137
+ const response = await track('checkout_agent', async (t) => {
138
+ for (let attempt = 0; attempt < 3; attempt++) {
139
+ try {
140
+ const response = await client.chat.completions.create({...});
141
+ t.record(response);
142
+ return response;
143
+ } catch (error) {
144
+ t.markRetry();
145
+ if (attempt === 2) throw error;
146
+ }
147
+ }
148
+ });
149
+ ```
76
150
 
77
- // Or manual ping
78
- await ping('my-pipeline', { status: 'success' });
151
+ ### With Anthropic
152
+
153
+ ```typescript
154
+ import { track } from 'deadpipe';
155
+ import Anthropic from '@anthropic-ai/sdk';
156
+
157
+ const client = new Anthropic();
158
+
159
+ const response = await track('claude_agent', async (t) => {
160
+ const response = await client.messages.create({
161
+ model: 'claude-3-sonnet-20240229',
162
+ max_tokens: 1024,
163
+ messages: [{ role: 'user', content: 'Hello, Claude!' }]
164
+ });
165
+ t.record(response);
166
+ return response;
167
+ }, { provider: 'anthropic' });
79
168
  ```
80
169
 
81
- ## Express.js / API Routes
170
+ ### Environment-Based Configuration
82
171
 
83
172
  ```typescript
84
- import { Deadpipe } from 'deadpipe';
173
+ // Uses these environment variables:
174
+ // DEADPIPE_API_KEY - Your API key
175
+ // DEADPIPE_APP_ID - Application identifier
176
+ // DEADPIPE_ENVIRONMENT - e.g., 'production', 'staging'
177
+ // DEADPIPE_VERSION or GIT_COMMIT - Version/commit hash
85
178
 
86
- const dp = new Deadpipe(process.env.DEADPIPE_API_KEY);
179
+ import { track } from 'deadpipe';
87
180
 
88
- // Wrap a cron job handler
89
- export const handler = dp.wrap('daily-report', async () => {
90
- const report = await generateReport();
91
- await sendReport(report);
92
- return { recordsProcessed: report.rows.length };
181
+ // API key auto-loaded from DEADPIPE_API_KEY
182
+ await track('my_prompt', async (t) => {
183
+ // ...
184
+ });
185
+ ```
186
+
187
+ ### Full Options
188
+
189
+ ```typescript
190
+ await track('checkout_agent', fn, {
191
+ // Authentication
192
+ apiKey: 'dp_...',
193
+ baseUrl: 'https://www.deadpipe.com/api/v1',
194
+ timeout: 10000,
195
+
196
+ // Identity
197
+ appId: 'my-app',
198
+ environment: 'production',
199
+ version: '1.2.3',
200
+ provider: 'openai', // or 'anthropic'
201
+
202
+ // Validation
203
+ schema: { validate: (data) => ({ success: true, data }) },
204
+ enumFields: { status: ['pending', 'approved', 'rejected'] },
205
+ numericBounds: { amount: [0, 10000] },
206
+
207
+ // Context (for change detection)
208
+ messages: [...],
209
+ tools: [...],
210
+ systemPrompt: 'You are a helpful assistant...',
93
211
  });
94
212
  ```
95
213
 
96
214
  ## Next.js API Routes
97
215
 
98
216
  ```typescript
99
- import { Deadpipe } from 'deadpipe';
217
+ import { track } from 'deadpipe';
218
+ import OpenAI from 'openai';
100
219
 
101
- const dp = new Deadpipe(process.env.DEADPIPE_API_KEY);
220
+ const client = new OpenAI();
102
221
 
103
222
  export async function POST(request: Request) {
104
- return dp.run('webhook-processor', async () => {
105
- const data = await request.json();
106
- await processWebhook(data);
107
- return Response.json({ success: true });
223
+ const { prompt } = await request.json();
224
+
225
+ const response = await track('api_handler', async (t) => {
226
+ const completion = await client.chat.completions.create({
227
+ model: 'gpt-4',
228
+ messages: [{ role: 'user', content: prompt }]
229
+ });
230
+ t.record(completion);
231
+ return completion;
108
232
  });
233
+
234
+ return Response.json({ result: response.choices[0].message.content });
109
235
  }
110
236
  ```
111
237
 
112
- ## GitHub Actions
113
-
114
- ```yaml
115
- - name: Run ETL
116
- env:
117
- DEADPIPE_API_KEY: ${{ secrets.DEADPIPE_API_KEY }}
118
- run: node scripts/etl.js
119
- ```
238
+ ## Express.js
120
239
 
121
- ```javascript
122
- // scripts/etl.js
123
- import { run } from 'deadpipe';
240
+ ```typescript
241
+ import express from 'express';
242
+ import { track } from 'deadpipe';
243
+ import OpenAI from 'openai';
244
+
245
+ const app = express();
246
+ const client = new OpenAI();
247
+
248
+ app.post('/generate', async (req, res) => {
249
+ const response = await track('express_endpoint', async (t) => {
250
+ const completion = await client.chat.completions.create({
251
+ model: 'gpt-4',
252
+ messages: req.body.messages
253
+ });
254
+ t.record(completion);
255
+ return completion;
256
+ });
124
257
 
125
- await run('github-actions-etl', async () => {
126
- // Your ETL code
258
+ res.json(response);
127
259
  });
128
260
  ```
129
261
 
130
262
  ## API Reference
131
263
 
132
- ### `new Deadpipe(apiKey, options)`
264
+ ### `track(promptId, fn, options?)`
133
265
 
134
- Create a client instance.
266
+ Track a prompt execution with full telemetry.
135
267
 
136
- - `apiKey`: Your API key (or set `DEADPIPE_API_KEY` env var)
137
- - `options.baseUrl`: Override for self-hosted (default: `https://www.deadpipe.com/api/v1`)
138
- - `options.timeout`: Request timeout in ms (default: 10000)
268
+ - `promptId`: Unique identifier for this prompt
269
+ - `fn`: Async function that receives a `PromptTracker`
270
+ - `options`: Configuration options (see above)
139
271
 
140
- ### `dp.ping(pipelineId, options)`
272
+ Returns: `Promise<T>` (result of fn)
141
273
 
142
- Send a heartbeat.
274
+ ### `wrapOpenAI(client, options)`
143
275
 
144
- - `pipelineId`: Unique identifier for this pipeline
145
- - `options.status`: `'success'` or `'failed'` (default: `'success'`)
146
- - `options.durationMs`: How long the run took
147
- - `options.recordsProcessed`: Number of records
148
- - `options.appName`: Group pipelines under an app
276
+ Wrap an OpenAI client to auto-track all completions.
149
277
 
150
- Returns: `Promise<HeartbeatResponse | null>`
278
+ - `client`: OpenAI client instance
279
+ - `options.promptId`: Unique identifier for prompts
151
280
 
152
- ### `dp.run(pipelineId, fn, options)`
281
+ Returns: Wrapped client with identical API
153
282
 
154
- Run a function with automatic heartbeat.
283
+ ### `PromptTracker`
155
284
 
156
- - `pipelineId`: Unique identifier for this pipeline
157
- - `fn`: Async function to run
158
- - `options.appName`: Group pipelines under an app
285
+ The tracker object passed to your function:
159
286
 
160
- Returns: `Promise<T>` (result of fn)
287
+ - `record(response)` - Record the LLM response
288
+ - `markFirstToken()` - Mark when first token received (streaming)
289
+ - `markRetry()` - Mark a retry attempt
290
+ - `recordError(error)` - Record an error
291
+ - `getTelemetry()` - Get the telemetry object
161
292
 
162
- ### `dp.wrap(pipelineId, fn, options)`
293
+ ### `estimateCost(model, inputTokens, outputTokens)`
163
294
 
164
- Create a wrapped version of a function.
295
+ Estimate USD cost for a completion.
165
296
 
166
- Returns: A new function that auto-sends heartbeats.
297
+ ### `detectRefusal(text)`
298
+
299
+ Detect if response is a refusal/decline.
167
300
 
168
301
  ## Zero Dependencies
169
302
 
170
- This SDK has zero runtime dependencies. It uses native `fetch` (Node 18+).
303
+ This SDK has zero runtime dependencies. Uses native `fetch` (Node 18+).
171
304
 
172
305
  ## TypeScript
173
306
 
@@ -175,5 +308,4 @@ Full TypeScript support with type definitions included.
175
308
 
176
309
  ## License
177
310
 
178
- MIT
179
-
311
+ Deadpipe SDK License - see [LICENSE](LICENSE) file.
package/dist/index.d.mts CHANGED
@@ -1,105 +1,162 @@
1
1
  /**
2
- * Deadpipe - Dead simple pipeline monitoring.
2
+ * Deadpipe - LLM observability that answers one question:
3
+ * "Is this prompt behaving the same as when it was last safe?"
3
4
  *
4
5
  * @example
5
- * import { Deadpipe } from 'deadpipe';
6
+ * import { track } from 'deadpipe';
7
+ * import OpenAI from 'openai';
6
8
  *
7
- * const dp = new Deadpipe('your-api-key');
9
+ * const client = new OpenAI();
8
10
  *
9
- * // Option 1: Wrapper
10
- * await dp.run('my-pipeline', async () => {
11
- * // your code here
11
+ * const { response, tracker } = await track('checkout_agent', async (t) => {
12
+ * const response = await client.chat.completions.create({
13
+ * model: 'gpt-4',
14
+ * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
15
+ * });
16
+ * t.record(response);
17
+ * return response;
12
18
  * });
13
19
  *
14
- * // Option 2: Manual
15
- * await dp.ping('my-pipeline', { status: 'success' });
20
+ * @example Auto-wrapping (zero code changes):
21
+ * import { wrapOpenAI } from 'deadpipe';
22
+ * import OpenAI from 'openai';
23
+ *
24
+ * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
25
+ * // All calls automatically tracked
26
+ * const response = await client.chat.completions.create(...);
16
27
  */
17
- type Status = 'success' | 'failed';
18
- interface PingOptions {
19
- status?: Status;
20
- durationMs?: number;
21
- recordsProcessed?: number;
22
- appName?: string;
28
+ declare const VERSION = "2.0.0";
29
+ type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
30
+ interface PromptTelemetry {
31
+ prompt_id: string;
32
+ model?: string;
33
+ provider?: string;
34
+ app_id?: string;
35
+ environment?: string;
36
+ version?: string;
37
+ request_start?: string;
38
+ first_token_time?: number;
39
+ end_time?: string;
40
+ total_latency?: number;
41
+ input_tokens?: number;
42
+ output_tokens?: number;
43
+ total_tokens?: number;
44
+ estimated_cost_usd?: number;
45
+ http_status?: number;
46
+ timeout?: boolean;
47
+ retry_count?: number;
48
+ provider_error_code?: string;
49
+ error_message?: string;
50
+ output_length?: number;
51
+ empty_output?: boolean;
52
+ truncated?: boolean;
53
+ json_parse_success?: boolean;
54
+ schema_validation_pass?: boolean;
55
+ missing_required_fields?: string;
56
+ output_hash?: string;
57
+ output_embedding?: string;
58
+ top_logprob_mean?: number;
59
+ refusal_flag?: boolean;
60
+ tool_call_flag?: boolean;
61
+ tool_calls_count?: number;
62
+ enum_out_of_range?: boolean;
63
+ numeric_out_of_bounds?: boolean;
64
+ hallucination_flags?: string;
65
+ prompt_hash?: string;
66
+ tool_schema_hash?: string;
67
+ system_prompt_hash?: string;
68
+ status?: StatusType;
23
69
  }
24
- interface DeadpipeOptions {
70
+ interface TrackOptions {
25
71
  apiKey?: string;
26
72
  baseUrl?: string;
27
73
  timeout?: number;
74
+ appId?: string;
75
+ environment?: string;
76
+ version?: string;
77
+ provider?: 'openai' | 'anthropic' | string;
78
+ schema?: SchemaValidator;
79
+ enumFields?: Record<string, unknown[]>;
80
+ numericBounds?: Record<string, [number | null, number | null]>;
81
+ messages?: Array<{
82
+ role: string;
83
+ content: string;
84
+ [key: string]: unknown;
85
+ }>;
86
+ tools?: Array<Record<string, unknown>>;
87
+ systemPrompt?: string;
88
+ }
89
+ interface SchemaValidator {
90
+ validate: (data: unknown) => {
91
+ success: boolean;
92
+ data?: unknown;
93
+ errors?: string[];
94
+ };
95
+ }
96
+ interface WrapOpenAIOptions extends Omit<TrackOptions, 'messages' | 'tools' | 'systemPrompt'> {
97
+ promptId: string;
28
98
  }
29
- interface HeartbeatResponse {
30
- received: boolean;
31
- pipeline_id: string;
32
- timestamp: string;
33
- freshness: 'fresh' | 'stale' | 'critical';
34
- next_expected: string;
35
- created?: boolean;
36
- message?: string;
99
+ declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
100
+ declare function detectRefusal(text: string): boolean;
101
+ declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
102
+ declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
103
+ interface ExtractedResponse {
104
+ model: string;
105
+ content: string;
106
+ inputTokens: number | null;
107
+ outputTokens: number | null;
108
+ totalTokens: number | null;
109
+ finishReason: string | null;
110
+ toolCalls: Array<{
111
+ name: string;
112
+ arguments: string;
113
+ }>;
114
+ logprobs: unknown;
37
115
  }
38
- declare class Deadpipe {
116
+ declare function extractOpenAIResponse(response: any): ExtractedResponse;
117
+ declare function extractAnthropicResponse(response: any): ExtractedResponse;
118
+ declare class PromptTracker {
119
+ private promptId;
39
120
  private apiKey;
40
121
  private baseUrl;
41
- private timeout;
42
- /**
43
- * Create a Deadpipe client.
44
- *
45
- * @param apiKey - Your Deadpipe API key. Falls back to DEADPIPE_API_KEY env var.
46
- * @param options - Configuration options.
47
- */
48
- constructor(apiKey?: string, options?: Omit<DeadpipeOptions, 'apiKey'>);
49
- /**
50
- * Send a heartbeat ping for a pipeline.
51
- *
52
- * @param pipelineId - Unique identifier for this pipeline.
53
- * @param options - Ping options.
54
- * @returns The heartbeat response, or null if the request failed.
55
- */
56
- ping(pipelineId: string, options?: PingOptions): Promise<HeartbeatResponse | null>;
57
- /**
58
- * Run a function with automatic heartbeat on completion.
59
- *
60
- * @param pipelineId - Unique identifier for this pipeline.
61
- * @param fn - The function to run.
62
- * @param options - Additional options.
63
- * @returns The result of the function.
64
- *
65
- * @example
66
- * const result = await dp.run('daily-etl', async () => {
67
- * const records = await processData();
68
- * return { recordsProcessed: records.length };
69
- * });
70
- */
71
- run<T>(pipelineId: string, fn: () => T | Promise<T>, options?: {
72
- appName?: string;
73
- }): Promise<T>;
74
- /**
75
- * Create a wrapper function that auto-sends heartbeats.
76
- *
77
- * @param pipelineId - Unique identifier for this pipeline.
78
- * @param fn - The function to wrap.
79
- * @param options - Additional options.
80
- * @returns A wrapped function.
81
- *
82
- * @example
83
- * const myPipeline = dp.wrap('daily-etl', async () => {
84
- * await processData();
85
- * });
86
- *
87
- * // Later...
88
- * await myPipeline();
89
- */
90
- wrap<T extends (...args: unknown[]) => unknown>(pipelineId: string, fn: T, options?: {
91
- appName?: string;
92
- }): (...args: Parameters<T>) => Promise<Awaited<ReturnType<T>>>;
122
+ private timeoutMs;
123
+ private appId;
124
+ private environment;
125
+ private versionStr;
126
+ private provider;
127
+ private schema;
128
+ private enumFields;
129
+ private numericBounds;
130
+ private promptHash;
131
+ private toolSchemaHash;
132
+ private systemPromptHash;
133
+ private startTime;
134
+ private firstTokenTime;
135
+ private endTime;
136
+ private telemetry;
137
+ private recorded;
138
+ private retryCount;
139
+ constructor(promptId: string, options?: TrackOptions);
140
+ start(): void;
141
+ markFirstToken(): void;
142
+ markRetry(): void;
143
+ record(response: any, parsedOutput?: unknown): unknown;
144
+ recordError(error: Error): void;
145
+ private send;
146
+ isRecorded(): boolean;
147
+ getTelemetry(): PromptTelemetry;
93
148
  }
94
- /**
95
- * Send a heartbeat using DEADPIPE_API_KEY from environment.
96
- */
97
- declare function ping(pipelineId: string, options?: PingOptions): Promise<HeartbeatResponse | null>;
98
- /**
99
- * Run a function with automatic heartbeat using DEADPIPE_API_KEY from environment.
100
- */
101
- declare function run<T>(pipelineId: string, fn: () => T | Promise<T>, options?: {
102
- appName?: string;
103
- }): Promise<T>;
149
+ declare function track<T>(promptId: string, fn: (tracker: PromptTracker) => Promise<T>, options?: TrackOptions): Promise<T>;
150
+ type OpenAIClient = any;
151
+ interface TrackedCompletions {
152
+ create: (params: any) => Promise<any>;
153
+ }
154
+ interface TrackedChat {
155
+ completions: TrackedCompletions;
156
+ }
157
+ interface TrackedOpenAIClient extends OpenAIClient {
158
+ chat: TrackedChat;
159
+ }
160
+ declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
104
161
 
105
- export { Deadpipe, type DeadpipeOptions, type HeartbeatResponse, type PingOptions, type Status, Deadpipe as default, ping, run };
162
+ export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };