deadpipe 2.0.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,8 @@
2
2
 
3
3
  LLM observability that answers one question: **"Is this prompt behaving the same as when it was last safe?"**
4
4
 
5
+ **Supports:** OpenAI, Anthropic, Google AI (Gemini), Mistral, Cohere
6
+
5
7
  ## Installation
6
8
 
7
9
  ```bash
@@ -14,22 +16,40 @@ pnpm add deadpipe
14
16
 
15
17
  ## Quick Start
16
18
 
17
- **Recommended: Wrap your client (zero code changes, automatic context capture)**
19
+ ### Universal Wrapper (Recommended)
20
+
21
+ The `wrap()` function auto-detects your provider and wraps appropriately:
18
22
 
19
23
  ```typescript
20
- import { wrapOpenAI } from 'deadpipe';
24
+ import { wrap } from 'deadpipe';
21
25
  import OpenAI from 'openai';
26
+ import Anthropic from '@anthropic-ai/sdk';
22
27
 
23
- const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
28
+ // Works with any supported provider
29
+ const openai = wrap(new OpenAI(), { promptId: 'checkout_agent' });
30
+ const anthropic = wrap(new Anthropic(), { promptId: 'support_agent' });
24
31
 
25
32
  // All calls automatically tracked with full input/output context
26
- const response = await client.chat.completions.create({
33
+ const response = await openai.chat.completions.create({
27
34
  model: 'gpt-4',
28
35
  messages: [{ role: 'user', content: 'Process refund for order 1938' }]
29
36
  });
30
37
  ```
31
38
 
32
- **Advanced: Manual tracking (for streaming, custom logic, etc.)**
39
+ ### Provider-Specific Wrappers
40
+
41
+ For explicit control, use provider-specific wrappers:
42
+
43
+ ```typescript
44
+ import { wrapOpenAI, wrapAnthropic, wrapGoogleAI, wrapMistral, wrapCohere } from 'deadpipe';
45
+
46
+ const openai = wrapOpenAI(new OpenAI(), { promptId: 'my_agent' });
47
+ const anthropic = wrapAnthropic(new Anthropic(), { promptId: 'my_agent' });
48
+ ```
49
+
50
+ ### Manual Tracking
51
+
52
+ For streaming, custom logic, or unsupported clients:
33
53
 
34
54
  ```typescript
35
55
  import { track } from 'deadpipe';
@@ -48,7 +68,95 @@ const response = await track('checkout_agent', async (t) => {
48
68
  });
49
69
  ```
50
70
 
51
- ### Option 3: With Schema Validation (Zod)
71
+ ## Provider Examples
72
+
73
+ ### OpenAI
74
+
75
+ ```typescript
76
+ import { wrap } from 'deadpipe';
77
+ import OpenAI from 'openai';
78
+
79
+ const client = wrap(new OpenAI(), { promptId: 'openai_agent' });
80
+
81
+ const response = await client.chat.completions.create({
82
+ model: 'gpt-4o',
83
+ messages: [{ role: 'user', content: 'Hello!' }]
84
+ });
85
+ ```
86
+
87
+ ### Anthropic
88
+
89
+ ```typescript
90
+ import { wrap } from 'deadpipe';
91
+ import Anthropic from '@anthropic-ai/sdk';
92
+
93
+ const client = wrap(new Anthropic(), { promptId: 'claude_agent' });
94
+
95
+ const response = await client.messages.create({
96
+ model: 'claude-sonnet-4-20250514',
97
+ max_tokens: 1024,
98
+ messages: [{ role: 'user', content: 'Hello, Claude!' }]
99
+ });
100
+ ```
101
+
102
+ ### Google AI (Gemini)
103
+
104
+ ```typescript
105
+ import { wrap } from 'deadpipe';
106
+ import { GoogleGenerativeAI } from '@google/generative-ai';
107
+
108
+ const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY);
109
+ const model = wrap(genAI, { promptId: 'gemini_agent' }).getGenerativeModel({ model: 'gemini-1.5-pro' });
110
+
111
+ const result = await model.generateContent('Hello, Gemini!');
112
+ ```
113
+
114
+ ### Mistral
115
+
116
+ ```typescript
117
+ import { wrap } from 'deadpipe';
118
+ import { Mistral } from '@mistralai/mistralai';
119
+
120
+ const client = wrap(new Mistral({ apiKey: process.env.MISTRAL_API_KEY }), { promptId: 'mistral_agent' });
121
+
122
+ const response = await client.chat.complete({
123
+ model: 'mistral-large-latest',
124
+ messages: [{ role: 'user', content: 'Hello, Mistral!' }]
125
+ });
126
+ ```
127
+
128
+ ### Cohere
129
+
130
+ ```typescript
131
+ import { wrap } from 'deadpipe';
132
+ import { CohereClient } from 'cohere-ai';
133
+
134
+ const client = wrap(new CohereClient({ token: process.env.COHERE_API_KEY }), { promptId: 'cohere_agent' });
135
+
136
+ const response = await client.chat({
137
+ model: 'command-r-plus',
138
+ message: 'Hello, Cohere!'
139
+ });
140
+ ```
141
+
142
+ ## What Gets Tracked
143
+
144
+ Every prompt execution captures:
145
+
146
+ | Category | Metrics |
147
+ |----------|---------|
148
+ | **Identity** | prompt_id, model, provider, app_id, environment, version |
149
+ | **Timing** | request_start, first_token_time, total_latency |
150
+ | **Volume** | input_tokens, output_tokens, estimated_cost_usd |
151
+ | **Reliability** | http_status, timeout, retry_count, error_message |
152
+ | **Output Integrity** | output_length, empty_output, truncated, json_parse_success, schema_validation_pass |
153
+ | **Behavioral Fingerprint** | output_hash, refusal_flag, tool_calls_count |
154
+ | **Safety Proxies** | enum_out_of_range, numeric_out_of_bounds |
155
+ | **Change Context** | prompt_hash, tool_schema_hash, system_prompt_hash |
156
+
157
+ ## Advanced Usage
158
+
159
+ ### Schema Validation (Zod)
52
160
 
53
161
  ```typescript
54
162
  import { track } from 'deadpipe';
@@ -83,26 +191,8 @@ const result = await track('checkout_agent', async (t) => {
83
191
  }
84
192
  }
85
193
  });
86
- // result is typed as RefundResponse | null
87
194
  ```
88
195
 
89
- ## What Gets Tracked
90
-
91
- Every prompt execution captures:
92
-
93
- | Category | Metrics |
94
- |----------|---------|
95
- | **Identity** | prompt_id, model, provider, app_id, environment, version |
96
- | **Timing** | request_start, first_token_time, total_latency |
97
- | **Volume** | input_tokens, output_tokens, estimated_cost_usd |
98
- | **Reliability** | http_status, timeout, retry_count, error_message |
99
- | **Output Integrity** | output_length, empty_output, truncated, json_parse_success, schema_validation_pass |
100
- | **Behavioral Fingerprint** | output_hash, refusal_flag, tool_calls_count |
101
- | **Safety Proxies** | enum_out_of_range, numeric_out_of_bounds |
102
- | **Change Context** | prompt_hash, tool_schema_hash, system_prompt_hash |
103
-
104
- ## Advanced Usage
105
-
106
196
  ### Track Streaming Responses
107
197
 
108
198
  ```typescript
@@ -123,7 +213,6 @@ const response = await track('streaming_agent', async (t) => {
123
213
  }
124
214
  }
125
215
 
126
- // Record manually for streams - pass input params to capture context
127
216
  t.record({
128
217
  model: 'gpt-4',
129
218
  choices: [{ message: { content: fullContent } }],
@@ -151,25 +240,6 @@ const response = await track('checkout_agent', async (t) => {
151
240
  });
152
241
  ```
153
242
 
154
- ### With Anthropic
155
-
156
- ```typescript
157
- import { track } from 'deadpipe';
158
- import Anthropic from '@anthropic-ai/sdk';
159
-
160
- const client = new Anthropic();
161
-
162
- const response = await track('claude_agent', async (t) => {
163
- const response = await client.messages.create({
164
- model: 'claude-3-sonnet-20240229',
165
- max_tokens: 1024,
166
- messages: [{ role: 'user', content: 'Hello, Claude!' }]
167
- });
168
- t.record(response); // Provider auto-detected from response
169
- return response;
170
- });
171
- ```
172
-
173
243
  ### Environment-Based Configuration
174
244
 
175
245
  ```typescript
@@ -179,18 +249,18 @@ const response = await track('claude_agent', async (t) => {
179
249
  // DEADPIPE_ENVIRONMENT - e.g., 'production', 'staging'
180
250
  // DEADPIPE_VERSION or GIT_COMMIT - Version/commit hash
181
251
 
182
- import { track } from 'deadpipe';
252
+ import { wrap } from 'deadpipe';
183
253
 
184
254
  // API key auto-loaded from DEADPIPE_API_KEY
185
- await track('my_prompt', async (t) => {
186
- // ...
187
- });
255
+ const client = wrap(new OpenAI(), { promptId: 'my_agent' });
188
256
  ```
189
257
 
190
258
  ### Full Options
191
259
 
192
260
  ```typescript
193
- await track('checkout_agent', fn, {
261
+ const client = wrap(new OpenAI(), {
262
+ promptId: 'checkout_agent',
263
+
194
264
  // Authentication
195
265
  apiKey: 'dp_...',
196
266
  baseUrl: 'https://www.deadpipe.com/api/v1',
@@ -208,48 +278,42 @@ await track('checkout_agent', fn, {
208
278
  });
209
279
  ```
210
280
 
211
- ## Next.js API Routes
281
+ ## Framework Examples
282
+
283
+ ### Next.js API Routes
212
284
 
213
285
  ```typescript
214
- import { track } from 'deadpipe';
286
+ import { wrap } from 'deadpipe';
215
287
  import OpenAI from 'openai';
216
288
 
217
- const client = new OpenAI();
289
+ const client = wrap(new OpenAI(), { promptId: 'api_handler' });
218
290
 
219
291
  export async function POST(request: Request) {
220
292
  const { prompt } = await request.json();
221
293
 
222
- const response = await track('api_handler', async (t) => {
223
- const completion = await client.chat.completions.create({
224
- model: 'gpt-4',
225
- messages: [{ role: 'user', content: prompt }]
226
- });
227
- t.record(completion);
228
- return completion;
294
+ const response = await client.chat.completions.create({
295
+ model: 'gpt-4',
296
+ messages: [{ role: 'user', content: prompt }]
229
297
  });
230
298
 
231
299
  return Response.json({ result: response.choices[0].message.content });
232
300
  }
233
301
  ```
234
302
 
235
- ## Express.js
303
+ ### Express.js
236
304
 
237
305
  ```typescript
238
306
  import express from 'express';
239
- import { track } from 'deadpipe';
307
+ import { wrap } from 'deadpipe';
240
308
  import OpenAI from 'openai';
241
309
 
242
310
  const app = express();
243
- const client = new OpenAI();
311
+ const client = wrap(new OpenAI(), { promptId: 'express_endpoint' });
244
312
 
245
313
  app.post('/generate', async (req, res) => {
246
- const response = await track('express_endpoint', async (t) => {
247
- const completion = await client.chat.completions.create({
248
- model: 'gpt-4',
249
- messages: req.body.messages
250
- });
251
- t.record(completion);
252
- return completion;
314
+ const response = await client.chat.completions.create({
315
+ model: 'gpt-4',
316
+ messages: req.body.messages
253
317
  });
254
318
 
255
319
  res.json(response);
@@ -258,52 +322,59 @@ app.post('/generate', async (req, res) => {
258
322
 
259
323
  ## API Reference
260
324
 
325
+ ### `wrap(client, options)`
326
+
327
+ Universal wrapper that auto-detects provider.
328
+
329
+ - `client`: Any supported LLM client
330
+ - `options.promptId`: Unique identifier for prompts
331
+
332
+ Returns: Wrapped client with identical API
333
+
334
+ ### Provider-Specific Wrappers
335
+
336
+ - `wrapOpenAI(client, options)` - OpenAI client
337
+ - `wrapAnthropic(client, options)` - Anthropic client
338
+ - `wrapGoogleAI(client, options)` - Google AI client
339
+ - `wrapMistral(client, options)` - Mistral client
340
+ - `wrapCohere(client, options)` - Cohere client
341
+
261
342
  ### `track(promptId, fn, options?)`
262
343
 
263
344
  Track a prompt execution with full telemetry.
264
345
 
265
346
  - `promptId`: Unique identifier for this prompt
266
347
  - `fn`: Async function that receives a `PromptTracker`
267
- - `options`: Configuration options (see above)
348
+ - `options`: Configuration options
268
349
 
269
350
  Returns: `Promise<T>` (result of fn)
270
351
 
271
- ### `wrapOpenAI(client, options)`
272
-
273
- Wrap an OpenAI client to auto-track all completions.
274
-
275
- - `client`: OpenAI client instance
276
- - `options.promptId`: Unique identifier for prompts
277
-
278
- Returns: Wrapped client with identical API
279
-
280
352
  ### `PromptTracker`
281
353
 
282
354
  The tracker object passed to your function:
283
355
 
284
356
  - `record(response, parsedOutput?, input?)` - Record the LLM response
285
- - `response` - The LLM response object
286
- - `parsedOutput` - Optional pre-parsed output (if you already parsed JSON)
287
- - `input` - Optional input parameters (messages, tools, etc.) to capture context
288
357
  - `markFirstToken()` - Mark when first token received (streaming)
289
358
  - `markRetry()` - Mark a retry attempt
290
359
  - `recordError(error)` - Record an error
291
360
  - `getTelemetry()` - Get the telemetry object
292
361
 
293
- **Tip:** Always pass the input parameters to `record()` to capture full context:
294
- ```typescript
295
- const params = { model: 'gpt-4', messages: [...] };
296
- const response = await client.chat.completions.create(params);
297
- t.record(response, undefined, params); // Pass params to capture input context
298
- ```
299
-
300
- ### `estimateCost(model, inputTokens, outputTokens)`
362
+ ### Utility Functions
301
363
 
302
- Estimate USD cost for a completion.
364
+ - `estimateCost(model, inputTokens, outputTokens)` - Estimate USD cost
365
+ - `detectRefusal(text)` - Detect if response is a refusal
366
+ - `detectProvider(response)` - Detect provider from response
367
+ - `detectClientProvider(client)` - Detect provider from client
303
368
 
304
- ### `detectRefusal(text)`
369
+ ## Supported Models & Pricing
305
370
 
306
- Detect if response is a refusal/decline.
371
+ | Provider | Models |
372
+ |----------|--------|
373
+ | **OpenAI** | gpt-4, gpt-4o, gpt-4o-mini, gpt-4-turbo, gpt-3.5-turbo, o1, o1-mini, o1-pro |
374
+ | **Anthropic** | claude-3-opus, claude-3-sonnet, claude-3-haiku, claude-3.5-sonnet, claude-sonnet-4, claude-opus-4 |
375
+ | **Google AI** | gemini-1.5-pro, gemini-1.5-flash, gemini-2.0-flash, gemini-2.0-pro |
376
+ | **Mistral** | mistral-large, mistral-medium, mistral-small, mistral-nemo, codestral, pixtral |
377
+ | **Cohere** | command-r-plus, command-r, command, command-light |
307
378
 
308
379
  ## Zero Dependencies
309
380
 
package/dist/index.d.mts CHANGED
@@ -2,32 +2,35 @@
2
2
  * Deadpipe - LLM observability that answers one question:
3
3
  * "Is this prompt behaving the same as when it was last safe?"
4
4
  *
5
- * @example Recommended: Wrap your client (zero code changes)
6
- * import { wrapOpenAI } from 'deadpipe';
5
+ * Supports: OpenAI, Anthropic, Google AI (Gemini), Mistral, Cohere
6
+ *
7
+ * @example Recommended: Universal wrapper (auto-detects provider)
8
+ * import { wrap } from 'deadpipe';
7
9
  * import OpenAI from 'openai';
10
+ * import Anthropic from '@anthropic-ai/sdk';
8
11
  *
9
- * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
10
- * // All calls automatically tracked with full context
11
- * const response = await client.chat.completions.create({
12
- * model: 'gpt-4',
13
- * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
14
- * });
12
+ * // Works with any supported provider
13
+ * const openai = wrap(new OpenAI(), { promptId: 'checkout_agent' });
14
+ * const anthropic = wrap(new Anthropic(), { promptId: 'support_agent' });
15
+ *
16
+ * @example Provider-specific wrappers
17
+ * import { wrapOpenAI, wrapAnthropic } from 'deadpipe';
18
+ *
19
+ * const openai = wrapOpenAI(new OpenAI(), { promptId: 'my_agent' });
20
+ * const anthropic = wrapAnthropic(new Anthropic(), { promptId: 'my_agent' });
15
21
  *
16
22
  * @example Advanced: Manual tracking (for streaming, custom logic, etc.)
17
23
  * import { track } from 'deadpipe';
18
- * import OpenAI from 'openai';
19
- *
20
- * const client = new OpenAI();
21
- * const params = { model: 'gpt-4', messages: [...] };
22
24
  *
23
25
  * const response = await track('checkout_agent', async (t) => {
24
26
  * const response = await client.chat.completions.create(params);
25
- * t.record(response, undefined, params); // Pass params to capture input
27
+ * t.record(response, undefined, params);
26
28
  * return response;
27
29
  * });
28
30
  */
29
- declare const VERSION = "2.0.0";
31
+ declare const VERSION = "3.0.0";
30
32
  type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
33
+ type ProviderType = 'openai' | 'anthropic' | 'google' | 'mistral' | 'cohere' | 'unknown';
31
34
  interface PromptTelemetry {
32
35
  prompt_id: string;
33
36
  model?: string;
@@ -89,9 +92,10 @@ interface SchemaValidator {
89
92
  errors?: string[];
90
93
  };
91
94
  }
92
- interface WrapOpenAIOptions extends TrackOptions {
95
+ interface WrapOptions extends TrackOptions {
93
96
  promptId: string;
94
97
  }
98
+ type WrapOpenAIOptions = WrapOptions;
95
99
  declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
96
100
  declare function detectRefusal(text: string): boolean;
97
101
  declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
@@ -100,7 +104,12 @@ declare function validateNumericBounds(data: Record<string, unknown>, numericBou
100
104
  * Auto-detect provider from response object.
101
105
  * Checks for provider-specific response structures.
102
106
  */
103
- declare function detectProvider(response: any): 'openai' | 'anthropic' | 'unknown';
107
+ declare function detectProvider(response: any): ProviderType;
108
+ /**
109
+ * Auto-detect provider from client object.
110
+ * Checks for provider-specific client structures.
111
+ */
112
+ declare function detectClientProvider(client: any): ProviderType;
104
113
  interface ExtractedResponse {
105
114
  model: string;
106
115
  content: string;
@@ -116,6 +125,13 @@ interface ExtractedResponse {
116
125
  }
117
126
  declare function extractOpenAIResponse(response: any): ExtractedResponse;
118
127
  declare function extractAnthropicResponse(response: any): ExtractedResponse;
128
+ declare function extractGoogleAIResponse(response: any): ExtractedResponse;
129
+ declare function extractMistralResponse(response: any): ExtractedResponse;
130
+ declare function extractCohereResponse(response: any): ExtractedResponse;
131
+ /**
132
+ * Extract response data based on detected or specified provider.
133
+ */
134
+ declare function extractResponse(response: any, provider?: ProviderType): ExtractedResponse;
119
135
  declare class PromptTracker {
120
136
  private promptId;
121
137
  private apiKey;
@@ -149,16 +165,55 @@ declare class PromptTracker {
149
165
  getTelemetry(): PromptTelemetry;
150
166
  }
151
167
  declare function track<T>(promptId: string, fn: (tracker: PromptTracker) => Promise<T>, options?: TrackOptions): Promise<T>;
152
- type OpenAIClient = any;
168
+ type AnyClient = any;
153
169
  interface TrackedCompletions {
154
170
  create: (params: any) => Promise<any>;
155
171
  }
156
172
  interface TrackedChat {
157
173
  completions: TrackedCompletions;
158
174
  }
159
- interface TrackedOpenAIClient extends OpenAIClient {
175
+ interface TrackedOpenAIClient extends AnyClient {
160
176
  chat: TrackedChat;
161
177
  }
162
- declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
178
+ declare function wrapOpenAI(client: AnyClient, options: WrapOptions): TrackedOpenAIClient;
179
+ interface TrackedMessages {
180
+ create: (params: any) => Promise<any>;
181
+ }
182
+ interface TrackedAnthropicClient extends AnyClient {
183
+ messages: TrackedMessages;
184
+ }
185
+ declare function wrapAnthropic(client: AnyClient, options: WrapOptions): TrackedAnthropicClient;
186
+ interface TrackedGenerativeModel {
187
+ generateContent: (params: any) => Promise<any>;
188
+ startChat: (params?: any) => any;
189
+ }
190
+ interface TrackedGoogleAIClient extends AnyClient {
191
+ getGenerativeModel: (params: any) => TrackedGenerativeModel;
192
+ }
193
+ declare function wrapGoogleAI(client: AnyClient, options: WrapOptions): TrackedGoogleAIClient;
194
+ interface TrackedMistralChat {
195
+ complete: (params: any) => Promise<any>;
196
+ }
197
+ interface TrackedMistralClient extends AnyClient {
198
+ chat: TrackedMistralChat;
199
+ }
200
+ declare function wrapMistral(client: AnyClient, options: WrapOptions): TrackedMistralClient;
201
+ interface TrackedCohereClient extends AnyClient {
202
+ chat: (params: any) => Promise<any>;
203
+ generate?: (params: any) => Promise<any>;
204
+ }
205
+ declare function wrapCohere(client: AnyClient, options: WrapOptions): TrackedCohereClient;
206
+ /**
207
+ * Universal wrapper that auto-detects the provider and wraps appropriately.
208
+ *
209
+ * @example
210
+ * import { wrap } from 'deadpipe';
211
+ * import OpenAI from 'openai';
212
+ * import Anthropic from '@anthropic-ai/sdk';
213
+ *
214
+ * const openai = wrap(new OpenAI(), { promptId: 'my_agent' });
215
+ * const anthropic = wrap(new Anthropic(), { promptId: 'my_agent' });
216
+ */
217
+ declare function wrap(client: AnyClient, options: WrapOptions): AnyClient;
163
218
 
164
- export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, wrapOpenAI as default, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
219
+ export { type PromptTelemetry, PromptTracker, type ProviderType, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, type WrapOptions, wrap as default, detectClientProvider, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractCohereResponse, extractGoogleAIResponse, extractMistralResponse, extractOpenAIResponse, extractResponse, track, validateEnumBounds, validateNumericBounds, wrap, wrapAnthropic, wrap as wrapClient, wrapCohere, wrapGoogleAI, wrapMistral, wrapOpenAI };
package/dist/index.d.ts CHANGED
@@ -2,32 +2,35 @@
2
2
  * Deadpipe - LLM observability that answers one question:
3
3
  * "Is this prompt behaving the same as when it was last safe?"
4
4
  *
5
- * @example Recommended: Wrap your client (zero code changes)
6
- * import { wrapOpenAI } from 'deadpipe';
5
+ * Supports: OpenAI, Anthropic, Google AI (Gemini), Mistral, Cohere
6
+ *
7
+ * @example Recommended: Universal wrapper (auto-detects provider)
8
+ * import { wrap } from 'deadpipe';
7
9
  * import OpenAI from 'openai';
10
+ * import Anthropic from '@anthropic-ai/sdk';
8
11
  *
9
- * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
10
- * // All calls automatically tracked with full context
11
- * const response = await client.chat.completions.create({
12
- * model: 'gpt-4',
13
- * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
14
- * });
12
+ * // Works with any supported provider
13
+ * const openai = wrap(new OpenAI(), { promptId: 'checkout_agent' });
14
+ * const anthropic = wrap(new Anthropic(), { promptId: 'support_agent' });
15
+ *
16
+ * @example Provider-specific wrappers
17
+ * import { wrapOpenAI, wrapAnthropic } from 'deadpipe';
18
+ *
19
+ * const openai = wrapOpenAI(new OpenAI(), { promptId: 'my_agent' });
20
+ * const anthropic = wrapAnthropic(new Anthropic(), { promptId: 'my_agent' });
15
21
  *
16
22
  * @example Advanced: Manual tracking (for streaming, custom logic, etc.)
17
23
  * import { track } from 'deadpipe';
18
- * import OpenAI from 'openai';
19
- *
20
- * const client = new OpenAI();
21
- * const params = { model: 'gpt-4', messages: [...] };
22
24
  *
23
25
  * const response = await track('checkout_agent', async (t) => {
24
26
  * const response = await client.chat.completions.create(params);
25
- * t.record(response, undefined, params); // Pass params to capture input
27
+ * t.record(response, undefined, params);
26
28
  * return response;
27
29
  * });
28
30
  */
29
- declare const VERSION = "2.0.0";
31
+ declare const VERSION = "3.0.0";
30
32
  type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
33
+ type ProviderType = 'openai' | 'anthropic' | 'google' | 'mistral' | 'cohere' | 'unknown';
31
34
  interface PromptTelemetry {
32
35
  prompt_id: string;
33
36
  model?: string;
@@ -89,9 +92,10 @@ interface SchemaValidator {
89
92
  errors?: string[];
90
93
  };
91
94
  }
92
- interface WrapOpenAIOptions extends TrackOptions {
95
+ interface WrapOptions extends TrackOptions {
93
96
  promptId: string;
94
97
  }
98
+ type WrapOpenAIOptions = WrapOptions;
95
99
  declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
96
100
  declare function detectRefusal(text: string): boolean;
97
101
  declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
@@ -100,7 +104,12 @@ declare function validateNumericBounds(data: Record<string, unknown>, numericBou
100
104
  * Auto-detect provider from response object.
101
105
  * Checks for provider-specific response structures.
102
106
  */
103
- declare function detectProvider(response: any): 'openai' | 'anthropic' | 'unknown';
107
+ declare function detectProvider(response: any): ProviderType;
108
+ /**
109
+ * Auto-detect provider from client object.
110
+ * Checks for provider-specific client structures.
111
+ */
112
+ declare function detectClientProvider(client: any): ProviderType;
104
113
  interface ExtractedResponse {
105
114
  model: string;
106
115
  content: string;
@@ -116,6 +125,13 @@ interface ExtractedResponse {
116
125
  }
117
126
  declare function extractOpenAIResponse(response: any): ExtractedResponse;
118
127
  declare function extractAnthropicResponse(response: any): ExtractedResponse;
128
+ declare function extractGoogleAIResponse(response: any): ExtractedResponse;
129
+ declare function extractMistralResponse(response: any): ExtractedResponse;
130
+ declare function extractCohereResponse(response: any): ExtractedResponse;
131
+ /**
132
+ * Extract response data based on detected or specified provider.
133
+ */
134
+ declare function extractResponse(response: any, provider?: ProviderType): ExtractedResponse;
119
135
  declare class PromptTracker {
120
136
  private promptId;
121
137
  private apiKey;
@@ -149,16 +165,55 @@ declare class PromptTracker {
149
165
  getTelemetry(): PromptTelemetry;
150
166
  }
151
167
  declare function track<T>(promptId: string, fn: (tracker: PromptTracker) => Promise<T>, options?: TrackOptions): Promise<T>;
152
- type OpenAIClient = any;
168
+ type AnyClient = any;
153
169
  interface TrackedCompletions {
154
170
  create: (params: any) => Promise<any>;
155
171
  }
156
172
  interface TrackedChat {
157
173
  completions: TrackedCompletions;
158
174
  }
159
- interface TrackedOpenAIClient extends OpenAIClient {
175
+ interface TrackedOpenAIClient extends AnyClient {
160
176
  chat: TrackedChat;
161
177
  }
162
- declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
178
+ declare function wrapOpenAI(client: AnyClient, options: WrapOptions): TrackedOpenAIClient;
179
+ interface TrackedMessages {
180
+ create: (params: any) => Promise<any>;
181
+ }
182
+ interface TrackedAnthropicClient extends AnyClient {
183
+ messages: TrackedMessages;
184
+ }
185
+ declare function wrapAnthropic(client: AnyClient, options: WrapOptions): TrackedAnthropicClient;
186
+ interface TrackedGenerativeModel {
187
+ generateContent: (params: any) => Promise<any>;
188
+ startChat: (params?: any) => any;
189
+ }
190
+ interface TrackedGoogleAIClient extends AnyClient {
191
+ getGenerativeModel: (params: any) => TrackedGenerativeModel;
192
+ }
193
+ declare function wrapGoogleAI(client: AnyClient, options: WrapOptions): TrackedGoogleAIClient;
194
+ interface TrackedMistralChat {
195
+ complete: (params: any) => Promise<any>;
196
+ }
197
+ interface TrackedMistralClient extends AnyClient {
198
+ chat: TrackedMistralChat;
199
+ }
200
+ declare function wrapMistral(client: AnyClient, options: WrapOptions): TrackedMistralClient;
201
+ interface TrackedCohereClient extends AnyClient {
202
+ chat: (params: any) => Promise<any>;
203
+ generate?: (params: any) => Promise<any>;
204
+ }
205
+ declare function wrapCohere(client: AnyClient, options: WrapOptions): TrackedCohereClient;
206
+ /**
207
+ * Universal wrapper that auto-detects the provider and wraps appropriately.
208
+ *
209
+ * @example
210
+ * import { wrap } from 'deadpipe';
211
+ * import OpenAI from 'openai';
212
+ * import Anthropic from '@anthropic-ai/sdk';
213
+ *
214
+ * const openai = wrap(new OpenAI(), { promptId: 'my_agent' });
215
+ * const anthropic = wrap(new Anthropic(), { promptId: 'my_agent' });
216
+ */
217
+ declare function wrap(client: AnyClient, options: WrapOptions): AnyClient;
163
218
 
164
- export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, wrapOpenAI as default, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
219
+ export { type PromptTelemetry, PromptTracker, type ProviderType, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, type WrapOptions, wrap as default, detectClientProvider, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractCohereResponse, extractGoogleAIResponse, extractMistralResponse, extractOpenAIResponse, extractResponse, track, validateEnumBounds, validateNumericBounds, wrap, wrapAnthropic, wrap as wrapClient, wrapCohere, wrapGoogleAI, wrapMistral, wrapOpenAI };