deadpipe 2.0.0 → 2.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,36 +14,37 @@ pnpm add deadpipe
14
14
 
15
15
  ## Quick Start
16
16
 
17
- ### Option 1: Context Manager (Recommended)
17
+ **Recommended: Wrap your client (zero code changes, automatic context capture)**
18
18
 
19
19
  ```typescript
20
- import { track } from 'deadpipe';
20
+ import { wrapOpenAI } from 'deadpipe';
21
21
  import OpenAI from 'openai';
22
22
 
23
- const client = new OpenAI();
23
+ const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
24
24
 
25
- const response = await track('checkout_agent', async (t) => {
26
- const response = await client.chat.completions.create({
27
- model: 'gpt-4',
28
- messages: [{ role: 'user', content: 'Process refund for order 1938' }]
29
- });
30
- t.record(response);
31
- return response;
25
+ // All calls automatically tracked with full input/output context
26
+ const response = await client.chat.completions.create({
27
+ model: 'gpt-4',
28
+ messages: [{ role: 'user', content: 'Process refund for order 1938' }]
32
29
  });
33
30
  ```
34
31
 
35
- ### Option 2: Auto-Wrapping (Zero Code Changes)
32
+ **Advanced: Manual tracking (for streaming, custom logic, etc.)**
36
33
 
37
34
  ```typescript
38
- import { wrapOpenAI } from 'deadpipe';
35
+ import { track } from 'deadpipe';
39
36
  import OpenAI from 'openai';
40
37
 
41
- const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
42
-
43
- // All calls automatically tracked
44
- const response = await client.chat.completions.create({
38
+ const client = new OpenAI();
39
+ const params = {
45
40
  model: 'gpt-4',
46
41
  messages: [{ role: 'user', content: 'Process refund for order 1938' }]
42
+ };
43
+
44
+ const response = await track('checkout_agent', async (t) => {
45
+ const response = await client.chat.completions.create(params);
46
+ t.record(response, undefined, params); // Pass params to capture input
47
+ return response;
47
48
  });
48
49
  ```
49
50
 
@@ -105,12 +106,14 @@ Every prompt execution captures:
105
106
  ### Track Streaming Responses
106
107
 
107
108
  ```typescript
109
+ const params = {
110
+ model: 'gpt-4',
111
+ messages: [{ role: 'user', content: 'Tell me a story' }],
112
+ stream: true,
113
+ };
114
+
108
115
  const response = await track('streaming_agent', async (t) => {
109
- const stream = await client.chat.completions.create({
110
- model: 'gpt-4',
111
- messages: [{ role: 'user', content: 'Tell me a story' }],
112
- stream: true,
113
- });
116
+ const stream = await client.chat.completions.create(params);
114
117
 
115
118
  let fullContent = '';
116
119
  for await (const chunk of stream) {
@@ -120,12 +123,12 @@ const response = await track('streaming_agent', async (t) => {
120
123
  }
121
124
  }
122
125
 
123
- // Record manually for streams
126
+ // Record manually for streams - pass input params to capture context
124
127
  t.record({
125
128
  model: 'gpt-4',
126
129
  choices: [{ message: { content: fullContent } }],
127
130
  usage: { prompt_tokens: 10, completion_tokens: 100, total_tokens: 110 }
128
- });
131
+ }, undefined, params);
129
132
 
130
133
  return fullContent;
131
134
  });
@@ -162,9 +165,9 @@ const response = await track('claude_agent', async (t) => {
162
165
  max_tokens: 1024,
163
166
  messages: [{ role: 'user', content: 'Hello, Claude!' }]
164
167
  });
165
- t.record(response);
168
+ t.record(response); // Provider auto-detected from response
166
169
  return response;
167
- }, { provider: 'anthropic' });
170
+ });
168
171
  ```
169
172
 
170
173
  ### Environment-Based Configuration
@@ -197,17 +200,11 @@ await track('checkout_agent', fn, {
197
200
  appId: 'my-app',
198
201
  environment: 'production',
199
202
  version: '1.2.3',
200
- provider: 'openai', // or 'anthropic'
201
203
 
202
204
  // Validation
203
205
  schema: { validate: (data) => ({ success: true, data }) },
204
206
  enumFields: { status: ['pending', 'approved', 'rejected'] },
205
207
  numericBounds: { amount: [0, 10000] },
206
-
207
- // Context (for change detection)
208
- messages: [...],
209
- tools: [...],
210
- systemPrompt: 'You are a helpful assistant...',
211
208
  });
212
209
  ```
213
210
 
@@ -284,12 +281,22 @@ Returns: Wrapped client with identical API
284
281
 
285
282
  The tracker object passed to your function:
286
283
 
287
- - `record(response)` - Record the LLM response
284
+ - `record(response, parsedOutput?, input?)` - Record the LLM response
285
+ - `response` - The LLM response object
286
+ - `parsedOutput` - Optional pre-parsed output (if you already parsed JSON)
287
+ - `input` - Optional input parameters (messages, tools, etc.) to capture context
288
288
  - `markFirstToken()` - Mark when first token received (streaming)
289
289
  - `markRetry()` - Mark a retry attempt
290
290
  - `recordError(error)` - Record an error
291
291
  - `getTelemetry()` - Get the telemetry object
292
292
 
293
+ **Tip:** Always pass the input parameters to `record()` to capture full context:
294
+ ```typescript
295
+ const params = { model: 'gpt-4', messages: [...] };
296
+ const response = await client.chat.completions.create(params);
297
+ t.record(response, undefined, params); // Pass params to capture input context
298
+ ```
299
+
293
300
  ### `estimateCost(model, inputTokens, outputTokens)`
294
301
 
295
302
  Estimate USD cost for a completion.
package/dist/index.d.mts CHANGED
@@ -2,28 +2,29 @@
2
2
  * Deadpipe - LLM observability that answers one question:
3
3
  * "Is this prompt behaving the same as when it was last safe?"
4
4
  *
5
- * @example
5
+ * @example Recommended: Wrap your client (zero code changes)
6
+ * import { wrapOpenAI } from 'deadpipe';
7
+ * import OpenAI from 'openai';
8
+ *
9
+ * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
10
+ * // All calls automatically tracked with full context
11
+ * const response = await client.chat.completions.create({
12
+ * model: 'gpt-4',
13
+ * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
14
+ * });
15
+ *
16
+ * @example Advanced: Manual tracking (for streaming, custom logic, etc.)
6
17
  * import { track } from 'deadpipe';
7
18
  * import OpenAI from 'openai';
8
19
  *
9
20
  * const client = new OpenAI();
21
+ * const params = { model: 'gpt-4', messages: [...] };
10
22
  *
11
- * const { response, tracker } = await track('checkout_agent', async (t) => {
12
- * const response = await client.chat.completions.create({
13
- * model: 'gpt-4',
14
- * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
15
- * });
16
- * t.record(response);
23
+ * const response = await track('checkout_agent', async (t) => {
24
+ * const response = await client.chat.completions.create(params);
25
+ * t.record(response, undefined, params); // Pass params to capture input
17
26
  * return response;
18
27
  * });
19
- *
20
- * @example Auto-wrapping (zero code changes):
21
- * import { wrapOpenAI } from 'deadpipe';
22
- * import OpenAI from 'openai';
23
- *
24
- * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
25
- * // All calls automatically tracked
26
- * const response = await client.chat.completions.create(...);
27
28
  */
28
29
  declare const VERSION = "2.0.0";
29
30
  type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
@@ -65,6 +66,9 @@ interface PromptTelemetry {
65
66
  prompt_hash?: string;
66
67
  tool_schema_hash?: string;
67
68
  system_prompt_hash?: string;
69
+ input_preview?: string;
70
+ output_preview?: string;
71
+ system_prompt_preview?: string;
68
72
  status?: StatusType;
69
73
  }
70
74
  interface TrackOptions {
@@ -74,17 +78,9 @@ interface TrackOptions {
74
78
  appId?: string;
75
79
  environment?: string;
76
80
  version?: string;
77
- provider?: 'openai' | 'anthropic' | string;
78
81
  schema?: SchemaValidator;
79
82
  enumFields?: Record<string, unknown[]>;
80
83
  numericBounds?: Record<string, [number | null, number | null]>;
81
- messages?: Array<{
82
- role: string;
83
- content: string;
84
- [key: string]: unknown;
85
- }>;
86
- tools?: Array<Record<string, unknown>>;
87
- systemPrompt?: string;
88
84
  }
89
85
  interface SchemaValidator {
90
86
  validate: (data: unknown) => {
@@ -93,13 +89,18 @@ interface SchemaValidator {
93
89
  errors?: string[];
94
90
  };
95
91
  }
96
- interface WrapOpenAIOptions extends Omit<TrackOptions, 'messages' | 'tools' | 'systemPrompt'> {
92
+ interface WrapOpenAIOptions extends TrackOptions {
97
93
  promptId: string;
98
94
  }
99
95
  declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
100
96
  declare function detectRefusal(text: string): boolean;
101
97
  declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
102
98
  declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
99
+ /**
100
+ * Auto-detect provider from response object.
101
+ * Checks for provider-specific response structures.
102
+ */
103
+ declare function detectProvider(response: any): 'openai' | 'anthropic' | 'unknown';
103
104
  interface ExtractedResponse {
104
105
  model: string;
105
106
  content: string;
@@ -123,13 +124,14 @@ declare class PromptTracker {
123
124
  private appId;
124
125
  private environment;
125
126
  private versionStr;
126
- private provider;
127
127
  private schema;
128
128
  private enumFields;
129
129
  private numericBounds;
130
130
  private promptHash;
131
131
  private toolSchemaHash;
132
132
  private systemPromptHash;
133
+ private messages;
134
+ private systemPrompt;
133
135
  private startTime;
134
136
  private firstTokenTime;
135
137
  private endTime;
@@ -140,7 +142,7 @@ declare class PromptTracker {
140
142
  start(): void;
141
143
  markFirstToken(): void;
142
144
  markRetry(): void;
143
- record(response: any, parsedOutput?: unknown): unknown;
145
+ record(response: any, parsedOutput?: unknown, input?: any): unknown;
144
146
  recordError(error: Error): void;
145
147
  private send;
146
148
  isRecorded(): boolean;
@@ -159,4 +161,4 @@ interface TrackedOpenAIClient extends OpenAIClient {
159
161
  }
160
162
  declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
161
163
 
162
- export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
164
+ export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, wrapOpenAI as default, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
package/dist/index.d.ts CHANGED
@@ -2,28 +2,29 @@
2
2
  * Deadpipe - LLM observability that answers one question:
3
3
  * "Is this prompt behaving the same as when it was last safe?"
4
4
  *
5
- * @example
5
+ * @example Recommended: Wrap your client (zero code changes)
6
+ * import { wrapOpenAI } from 'deadpipe';
7
+ * import OpenAI from 'openai';
8
+ *
9
+ * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
10
+ * // All calls automatically tracked with full context
11
+ * const response = await client.chat.completions.create({
12
+ * model: 'gpt-4',
13
+ * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
14
+ * });
15
+ *
16
+ * @example Advanced: Manual tracking (for streaming, custom logic, etc.)
6
17
  * import { track } from 'deadpipe';
7
18
  * import OpenAI from 'openai';
8
19
  *
9
20
  * const client = new OpenAI();
21
+ * const params = { model: 'gpt-4', messages: [...] };
10
22
  *
11
- * const { response, tracker } = await track('checkout_agent', async (t) => {
12
- * const response = await client.chat.completions.create({
13
- * model: 'gpt-4',
14
- * messages: [{ role: 'user', content: 'Process refund for order 1938' }]
15
- * });
16
- * t.record(response);
23
+ * const response = await track('checkout_agent', async (t) => {
24
+ * const response = await client.chat.completions.create(params);
25
+ * t.record(response, undefined, params); // Pass params to capture input
17
26
  * return response;
18
27
  * });
19
- *
20
- * @example Auto-wrapping (zero code changes):
21
- * import { wrapOpenAI } from 'deadpipe';
22
- * import OpenAI from 'openai';
23
- *
24
- * const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
25
- * // All calls automatically tracked
26
- * const response = await client.chat.completions.create(...);
27
28
  */
28
29
  declare const VERSION = "2.0.0";
29
30
  type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
@@ -65,6 +66,9 @@ interface PromptTelemetry {
65
66
  prompt_hash?: string;
66
67
  tool_schema_hash?: string;
67
68
  system_prompt_hash?: string;
69
+ input_preview?: string;
70
+ output_preview?: string;
71
+ system_prompt_preview?: string;
68
72
  status?: StatusType;
69
73
  }
70
74
  interface TrackOptions {
@@ -74,17 +78,9 @@ interface TrackOptions {
74
78
  appId?: string;
75
79
  environment?: string;
76
80
  version?: string;
77
- provider?: 'openai' | 'anthropic' | string;
78
81
  schema?: SchemaValidator;
79
82
  enumFields?: Record<string, unknown[]>;
80
83
  numericBounds?: Record<string, [number | null, number | null]>;
81
- messages?: Array<{
82
- role: string;
83
- content: string;
84
- [key: string]: unknown;
85
- }>;
86
- tools?: Array<Record<string, unknown>>;
87
- systemPrompt?: string;
88
84
  }
89
85
  interface SchemaValidator {
90
86
  validate: (data: unknown) => {
@@ -93,13 +89,18 @@ interface SchemaValidator {
93
89
  errors?: string[];
94
90
  };
95
91
  }
96
- interface WrapOpenAIOptions extends Omit<TrackOptions, 'messages' | 'tools' | 'systemPrompt'> {
92
+ interface WrapOpenAIOptions extends TrackOptions {
97
93
  promptId: string;
98
94
  }
99
95
  declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
100
96
  declare function detectRefusal(text: string): boolean;
101
97
  declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
102
98
  declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
99
+ /**
100
+ * Auto-detect provider from response object.
101
+ * Checks for provider-specific response structures.
102
+ */
103
+ declare function detectProvider(response: any): 'openai' | 'anthropic' | 'unknown';
103
104
  interface ExtractedResponse {
104
105
  model: string;
105
106
  content: string;
@@ -123,13 +124,14 @@ declare class PromptTracker {
123
124
  private appId;
124
125
  private environment;
125
126
  private versionStr;
126
- private provider;
127
127
  private schema;
128
128
  private enumFields;
129
129
  private numericBounds;
130
130
  private promptHash;
131
131
  private toolSchemaHash;
132
132
  private systemPromptHash;
133
+ private messages;
134
+ private systemPrompt;
133
135
  private startTime;
134
136
  private firstTokenTime;
135
137
  private endTime;
@@ -140,7 +142,7 @@ declare class PromptTracker {
140
142
  start(): void;
141
143
  markFirstToken(): void;
142
144
  markRetry(): void;
143
- record(response: any, parsedOutput?: unknown): unknown;
145
+ record(response: any, parsedOutput?: unknown, input?: any): unknown;
144
146
  recordError(error: Error): void;
145
147
  private send;
146
148
  isRecorded(): boolean;
@@ -159,4 +161,4 @@ interface TrackedOpenAIClient extends OpenAIClient {
159
161
  }
160
162
  declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
161
163
 
162
- export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
164
+ export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, wrapOpenAI as default, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
package/dist/index.js CHANGED
@@ -22,6 +22,8 @@ var index_exports = {};
22
22
  __export(index_exports, {
23
23
  PromptTracker: () => PromptTracker,
24
24
  VERSION: () => VERSION,
25
+ default: () => index_default,
26
+ detectProvider: () => detectProvider,
25
27
  detectRefusal: () => detectRefusal,
26
28
  estimateCost: () => estimateCost,
27
29
  extractAnthropicResponse: () => extractAnthropicResponse,
@@ -132,6 +134,25 @@ function validateNumericBounds(data, numericBounds) {
132
134
  }
133
135
  return true;
134
136
  }
137
+ function detectProvider(response) {
138
+ if (!response) return "unknown";
139
+ if (Array.isArray(response.content) || response.stop_reason !== void 0) {
140
+ return "anthropic";
141
+ }
142
+ if (response.choices !== void 0 || response.output !== void 0) {
143
+ return "openai";
144
+ }
145
+ if (response.model) {
146
+ const modelLower = String(response.model).toLowerCase();
147
+ if (modelLower.includes("claude")) {
148
+ return "anthropic";
149
+ }
150
+ if (modelLower.includes("gpt") || modelLower.includes("o1")) {
151
+ return "openai";
152
+ }
153
+ }
154
+ return "unknown";
155
+ }
135
156
  function extractOpenAIResponse(response) {
136
157
  const result = {
137
158
  model: "",
@@ -231,15 +252,17 @@ var PromptTracker = class {
231
252
  appId;
232
253
  environment;
233
254
  versionStr;
234
- provider;
235
255
  // Validation
236
256
  schema;
237
257
  enumFields;
238
258
  numericBounds;
239
- // Context hashes
259
+ // Context hashes (auto-extracted from response)
240
260
  promptHash;
241
261
  toolSchemaHash;
242
262
  systemPromptHash;
263
+ // Context for previews (auto-extracted from response)
264
+ messages;
265
+ systemPrompt;
243
266
  // Timing
244
267
  startTime = null;
245
268
  firstTokenTime = null;
@@ -256,22 +279,19 @@ var PromptTracker = class {
256
279
  this.appId = options.appId || process.env.DEADPIPE_APP_ID;
257
280
  this.environment = options.environment || process.env.DEADPIPE_ENVIRONMENT;
258
281
  this.versionStr = options.version || process.env.DEADPIPE_VERSION || process.env.GIT_COMMIT;
259
- this.provider = options.provider || "openai";
260
282
  this.schema = options.schema;
261
283
  this.enumFields = options.enumFields;
262
284
  this.numericBounds = options.numericBounds;
263
- this.promptHash = options.messages ? hashMessages(options.messages) : void 0;
264
- this.toolSchemaHash = hashTools(options.tools);
265
- this.systemPromptHash = options.systemPrompt ? hashContentSync(options.systemPrompt) : void 0;
285
+ this.messages = void 0;
286
+ this.systemPrompt = void 0;
287
+ this.promptHash = void 0;
288
+ this.toolSchemaHash = void 0;
289
+ this.systemPromptHash = void 0;
266
290
  this.telemetry = {
267
291
  prompt_id: this.promptId,
268
- provider: this.provider,
269
292
  app_id: this.appId,
270
293
  environment: this.environment,
271
294
  version: this.versionStr,
272
- prompt_hash: this.promptHash,
273
- tool_schema_hash: this.toolSchemaHash,
274
- system_prompt_hash: this.systemPromptHash,
275
295
  status: "success"
276
296
  };
277
297
  }
@@ -290,11 +310,44 @@ var PromptTracker = class {
290
310
  this.telemetry.retry_count = this.retryCount;
291
311
  }
292
312
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
293
- record(response, parsedOutput) {
313
+ record(response, parsedOutput, input) {
294
314
  this.endTime = Date.now();
295
315
  this.telemetry.end_time = new Date(this.endTime).toISOString();
296
316
  this.telemetry.total_latency = this.startTime ? this.endTime - this.startTime : 0;
297
- const extracted = this.provider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
317
+ const detectedProvider = detectProvider(response);
318
+ this.telemetry.provider = detectedProvider !== "unknown" ? detectedProvider : "openai";
319
+ const extracted = detectedProvider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
320
+ if (input) {
321
+ const messages = input.messages || [];
322
+ const tools = input.tools;
323
+ let systemPrompt;
324
+ for (const msg of messages) {
325
+ if (msg.role === "system") {
326
+ systemPrompt = msg.content || "";
327
+ break;
328
+ }
329
+ }
330
+ if (messages.length > 0) {
331
+ this.promptHash = hashMessages(messages);
332
+ this.messages = messages;
333
+ }
334
+ if (tools) {
335
+ this.toolSchemaHash = hashTools(tools);
336
+ }
337
+ if (systemPrompt) {
338
+ this.systemPromptHash = hashContentSync(systemPrompt);
339
+ this.systemPrompt = systemPrompt;
340
+ }
341
+ }
342
+ if (this.promptHash) {
343
+ this.telemetry.prompt_hash = this.promptHash;
344
+ }
345
+ if (this.toolSchemaHash) {
346
+ this.telemetry.tool_schema_hash = this.toolSchemaHash;
347
+ }
348
+ if (this.systemPromptHash) {
349
+ this.telemetry.system_prompt_hash = this.systemPromptHash;
350
+ }
298
351
  this.telemetry.model = extracted.model;
299
352
  this.telemetry.input_tokens = extracted.inputTokens ?? void 0;
300
353
  this.telemetry.output_tokens = extracted.outputTokens ?? void 0;
@@ -304,6 +357,20 @@ var PromptTracker = class {
304
357
  this.telemetry.output_length = content?.length ?? 0;
305
358
  this.telemetry.empty_output = !content || content.trim().length === 0;
306
359
  this.telemetry.truncated = extracted.finishReason === "length";
360
+ const MAX_PREVIEW_LENGTH = 2e3;
361
+ if (content) {
362
+ this.telemetry.output_preview = content.length > MAX_PREVIEW_LENGTH ? content.substring(0, MAX_PREVIEW_LENGTH) + "..." : content;
363
+ }
364
+ if (this.messages && this.messages.length > 0) {
365
+ const userMessages = this.messages.filter((m) => m.role === "user");
366
+ if (userMessages.length > 0) {
367
+ const lastUserMsg = userMessages[userMessages.length - 1].content;
368
+ this.telemetry.input_preview = lastUserMsg.length > MAX_PREVIEW_LENGTH ? lastUserMsg.substring(0, MAX_PREVIEW_LENGTH) + "..." : lastUserMsg;
369
+ }
370
+ }
371
+ if (this.systemPrompt) {
372
+ this.telemetry.system_prompt_preview = this.systemPrompt.length > MAX_PREVIEW_LENGTH ? this.systemPrompt.substring(0, MAX_PREVIEW_LENGTH) + "..." : this.systemPrompt;
373
+ }
307
374
  this.telemetry.tool_call_flag = extracted.toolCalls.length > 0;
308
375
  this.telemetry.tool_calls_count = extracted.toolCalls.length;
309
376
  if (content) {
@@ -399,17 +466,25 @@ var PromptTracker = class {
399
466
  this.recorded = true;
400
467
  }
401
468
  async send() {
402
- if (!this.apiKey) return;
469
+ if (!this.apiKey) {
470
+ if (process.env.NODE_ENV === "development" || process.env.DEADPIPE_DEBUG === "1") {
471
+ console.warn("[Deadpipe] DEADPIPE_API_KEY not set. Telemetry will not be sent.");
472
+ }
473
+ return;
474
+ }
403
475
  try {
404
476
  const controller = new AbortController();
405
477
  const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs);
406
478
  const payload = {};
407
479
  for (const [key, value] of Object.entries(this.telemetry)) {
408
- if (value !== void 0) {
409
- payload[key] = value;
410
- }
480
+ if (value === void 0 || value === null) continue;
481
+ if (typeof value === "string" && value === "") continue;
482
+ if (typeof value === "boolean" && value === false) continue;
483
+ if (Array.isArray(value) && value.length === 0) continue;
484
+ if (typeof value === "object" && Object.keys(value).length === 0) continue;
485
+ payload[key] = value;
411
486
  }
412
- await fetch(`${this.baseUrl}/prompt`, {
487
+ fetch(`${this.baseUrl}/prompt`, {
413
488
  method: "POST",
414
489
  headers: {
415
490
  "Content-Type": "application/json",
@@ -417,8 +492,10 @@ var PromptTracker = class {
417
492
  },
418
493
  body: JSON.stringify(payload),
419
494
  signal: controller.signal
495
+ }).catch(() => {
496
+ }).finally(() => {
497
+ clearTimeout(timeoutId);
420
498
  });
421
- clearTimeout(timeoutId);
422
499
  } catch {
423
500
  }
424
501
  }
@@ -468,15 +545,10 @@ function wrapOpenAI(client, options) {
468
545
  promptId,
469
546
  async (t) => {
470
547
  const response = await client.chat.completions.create(params);
471
- t.record(response);
548
+ t.record(response, void 0, params);
472
549
  return response;
473
550
  },
474
- {
475
- ...trackOptions,
476
- messages,
477
- tools,
478
- systemPrompt
479
- }
551
+ trackOptions
480
552
  );
481
553
  }
482
554
  }
@@ -493,23 +565,22 @@ function wrapOpenAI(client, options) {
493
565
  promptId,
494
566
  async (t) => {
495
567
  const response = await client.responses.create(params);
496
- t.record(response);
568
+ t.record(response, void 0, params);
497
569
  return response;
498
570
  },
499
- {
500
- ...trackOptions,
501
- messages
502
- }
571
+ trackOptions
503
572
  );
504
573
  }
505
574
  };
506
575
  }
507
576
  return wrappedClient;
508
577
  }
578
+ var index_default = wrapOpenAI;
509
579
  // Annotate the CommonJS export names for ESM import in node:
510
580
  0 && (module.exports = {
511
581
  PromptTracker,
512
582
  VERSION,
583
+ detectProvider,
513
584
  detectRefusal,
514
585
  estimateCost,
515
586
  extractAnthropicResponse,
package/dist/index.mjs CHANGED
@@ -99,6 +99,25 @@ function validateNumericBounds(data, numericBounds) {
99
99
  }
100
100
  return true;
101
101
  }
102
+ function detectProvider(response) {
103
+ if (!response) return "unknown";
104
+ if (Array.isArray(response.content) || response.stop_reason !== void 0) {
105
+ return "anthropic";
106
+ }
107
+ if (response.choices !== void 0 || response.output !== void 0) {
108
+ return "openai";
109
+ }
110
+ if (response.model) {
111
+ const modelLower = String(response.model).toLowerCase();
112
+ if (modelLower.includes("claude")) {
113
+ return "anthropic";
114
+ }
115
+ if (modelLower.includes("gpt") || modelLower.includes("o1")) {
116
+ return "openai";
117
+ }
118
+ }
119
+ return "unknown";
120
+ }
102
121
  function extractOpenAIResponse(response) {
103
122
  const result = {
104
123
  model: "",
@@ -198,15 +217,17 @@ var PromptTracker = class {
198
217
  appId;
199
218
  environment;
200
219
  versionStr;
201
- provider;
202
220
  // Validation
203
221
  schema;
204
222
  enumFields;
205
223
  numericBounds;
206
- // Context hashes
224
+ // Context hashes (auto-extracted from response)
207
225
  promptHash;
208
226
  toolSchemaHash;
209
227
  systemPromptHash;
228
+ // Context for previews (auto-extracted from response)
229
+ messages;
230
+ systemPrompt;
210
231
  // Timing
211
232
  startTime = null;
212
233
  firstTokenTime = null;
@@ -223,22 +244,19 @@ var PromptTracker = class {
223
244
  this.appId = options.appId || process.env.DEADPIPE_APP_ID;
224
245
  this.environment = options.environment || process.env.DEADPIPE_ENVIRONMENT;
225
246
  this.versionStr = options.version || process.env.DEADPIPE_VERSION || process.env.GIT_COMMIT;
226
- this.provider = options.provider || "openai";
227
247
  this.schema = options.schema;
228
248
  this.enumFields = options.enumFields;
229
249
  this.numericBounds = options.numericBounds;
230
- this.promptHash = options.messages ? hashMessages(options.messages) : void 0;
231
- this.toolSchemaHash = hashTools(options.tools);
232
- this.systemPromptHash = options.systemPrompt ? hashContentSync(options.systemPrompt) : void 0;
250
+ this.messages = void 0;
251
+ this.systemPrompt = void 0;
252
+ this.promptHash = void 0;
253
+ this.toolSchemaHash = void 0;
254
+ this.systemPromptHash = void 0;
233
255
  this.telemetry = {
234
256
  prompt_id: this.promptId,
235
- provider: this.provider,
236
257
  app_id: this.appId,
237
258
  environment: this.environment,
238
259
  version: this.versionStr,
239
- prompt_hash: this.promptHash,
240
- tool_schema_hash: this.toolSchemaHash,
241
- system_prompt_hash: this.systemPromptHash,
242
260
  status: "success"
243
261
  };
244
262
  }
@@ -257,11 +275,44 @@ var PromptTracker = class {
257
275
  this.telemetry.retry_count = this.retryCount;
258
276
  }
259
277
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
260
- record(response, parsedOutput) {
278
+ record(response, parsedOutput, input) {
261
279
  this.endTime = Date.now();
262
280
  this.telemetry.end_time = new Date(this.endTime).toISOString();
263
281
  this.telemetry.total_latency = this.startTime ? this.endTime - this.startTime : 0;
264
- const extracted = this.provider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
282
+ const detectedProvider = detectProvider(response);
283
+ this.telemetry.provider = detectedProvider !== "unknown" ? detectedProvider : "openai";
284
+ const extracted = detectedProvider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
285
+ if (input) {
286
+ const messages = input.messages || [];
287
+ const tools = input.tools;
288
+ let systemPrompt;
289
+ for (const msg of messages) {
290
+ if (msg.role === "system") {
291
+ systemPrompt = msg.content || "";
292
+ break;
293
+ }
294
+ }
295
+ if (messages.length > 0) {
296
+ this.promptHash = hashMessages(messages);
297
+ this.messages = messages;
298
+ }
299
+ if (tools) {
300
+ this.toolSchemaHash = hashTools(tools);
301
+ }
302
+ if (systemPrompt) {
303
+ this.systemPromptHash = hashContentSync(systemPrompt);
304
+ this.systemPrompt = systemPrompt;
305
+ }
306
+ }
307
+ if (this.promptHash) {
308
+ this.telemetry.prompt_hash = this.promptHash;
309
+ }
310
+ if (this.toolSchemaHash) {
311
+ this.telemetry.tool_schema_hash = this.toolSchemaHash;
312
+ }
313
+ if (this.systemPromptHash) {
314
+ this.telemetry.system_prompt_hash = this.systemPromptHash;
315
+ }
265
316
  this.telemetry.model = extracted.model;
266
317
  this.telemetry.input_tokens = extracted.inputTokens ?? void 0;
267
318
  this.telemetry.output_tokens = extracted.outputTokens ?? void 0;
@@ -271,6 +322,20 @@ var PromptTracker = class {
271
322
  this.telemetry.output_length = content?.length ?? 0;
272
323
  this.telemetry.empty_output = !content || content.trim().length === 0;
273
324
  this.telemetry.truncated = extracted.finishReason === "length";
325
+ const MAX_PREVIEW_LENGTH = 2e3;
326
+ if (content) {
327
+ this.telemetry.output_preview = content.length > MAX_PREVIEW_LENGTH ? content.substring(0, MAX_PREVIEW_LENGTH) + "..." : content;
328
+ }
329
+ if (this.messages && this.messages.length > 0) {
330
+ const userMessages = this.messages.filter((m) => m.role === "user");
331
+ if (userMessages.length > 0) {
332
+ const lastUserMsg = userMessages[userMessages.length - 1].content;
333
+ this.telemetry.input_preview = lastUserMsg.length > MAX_PREVIEW_LENGTH ? lastUserMsg.substring(0, MAX_PREVIEW_LENGTH) + "..." : lastUserMsg;
334
+ }
335
+ }
336
+ if (this.systemPrompt) {
337
+ this.telemetry.system_prompt_preview = this.systemPrompt.length > MAX_PREVIEW_LENGTH ? this.systemPrompt.substring(0, MAX_PREVIEW_LENGTH) + "..." : this.systemPrompt;
338
+ }
274
339
  this.telemetry.tool_call_flag = extracted.toolCalls.length > 0;
275
340
  this.telemetry.tool_calls_count = extracted.toolCalls.length;
276
341
  if (content) {
@@ -366,17 +431,25 @@ var PromptTracker = class {
366
431
  this.recorded = true;
367
432
  }
368
433
  async send() {
369
- if (!this.apiKey) return;
434
+ if (!this.apiKey) {
435
+ if (process.env.NODE_ENV === "development" || process.env.DEADPIPE_DEBUG === "1") {
436
+ console.warn("[Deadpipe] DEADPIPE_API_KEY not set. Telemetry will not be sent.");
437
+ }
438
+ return;
439
+ }
370
440
  try {
371
441
  const controller = new AbortController();
372
442
  const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs);
373
443
  const payload = {};
374
444
  for (const [key, value] of Object.entries(this.telemetry)) {
375
- if (value !== void 0) {
376
- payload[key] = value;
377
- }
445
+ if (value === void 0 || value === null) continue;
446
+ if (typeof value === "string" && value === "") continue;
447
+ if (typeof value === "boolean" && value === false) continue;
448
+ if (Array.isArray(value) && value.length === 0) continue;
449
+ if (typeof value === "object" && Object.keys(value).length === 0) continue;
450
+ payload[key] = value;
378
451
  }
379
- await fetch(`${this.baseUrl}/prompt`, {
452
+ fetch(`${this.baseUrl}/prompt`, {
380
453
  method: "POST",
381
454
  headers: {
382
455
  "Content-Type": "application/json",
@@ -384,8 +457,10 @@ var PromptTracker = class {
384
457
  },
385
458
  body: JSON.stringify(payload),
386
459
  signal: controller.signal
460
+ }).catch(() => {
461
+ }).finally(() => {
462
+ clearTimeout(timeoutId);
387
463
  });
388
- clearTimeout(timeoutId);
389
464
  } catch {
390
465
  }
391
466
  }
@@ -435,15 +510,10 @@ function wrapOpenAI(client, options) {
435
510
  promptId,
436
511
  async (t) => {
437
512
  const response = await client.chat.completions.create(params);
438
- t.record(response);
513
+ t.record(response, void 0, params);
439
514
  return response;
440
515
  },
441
- {
442
- ...trackOptions,
443
- messages,
444
- tools,
445
- systemPrompt
446
- }
516
+ trackOptions
447
517
  );
448
518
  }
449
519
  }
@@ -460,22 +530,22 @@ function wrapOpenAI(client, options) {
460
530
  promptId,
461
531
  async (t) => {
462
532
  const response = await client.responses.create(params);
463
- t.record(response);
533
+ t.record(response, void 0, params);
464
534
  return response;
465
535
  },
466
- {
467
- ...trackOptions,
468
- messages
469
- }
536
+ trackOptions
470
537
  );
471
538
  }
472
539
  };
473
540
  }
474
541
  return wrappedClient;
475
542
  }
543
+ var index_default = wrapOpenAI;
476
544
  export {
477
545
  PromptTracker,
478
546
  VERSION,
547
+ index_default as default,
548
+ detectProvider,
479
549
  detectRefusal,
480
550
  estimateCost,
481
551
  extractAnthropicResponse,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "deadpipe",
3
- "version": "2.0.0",
3
+ "version": "2.0.2",
4
4
  "description": "LLM observability that answers: Is this prompt behaving the same as when it was last safe?",
5
5
  "keywords": [
6
6
  "llm",