deadpipe 2.0.1 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +39 -32
- package/dist/index.d.mts +24 -27
- package/dist/index.d.ts +24 -27
- package/dist/index.js +85 -33
- package/dist/index.mjs +84 -33
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -14,36 +14,37 @@ pnpm add deadpipe
|
|
|
14
14
|
|
|
15
15
|
## Quick Start
|
|
16
16
|
|
|
17
|
-
|
|
17
|
+
**Recommended: Wrap your client (zero code changes, automatic context capture)**
|
|
18
18
|
|
|
19
19
|
```typescript
|
|
20
|
-
import {
|
|
20
|
+
import { wrapOpenAI } from 'deadpipe';
|
|
21
21
|
import OpenAI from 'openai';
|
|
22
22
|
|
|
23
|
-
const client = new OpenAI();
|
|
23
|
+
const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
24
24
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
});
|
|
30
|
-
t.record(response);
|
|
31
|
-
return response;
|
|
25
|
+
// All calls automatically tracked with full input/output context
|
|
26
|
+
const response = await client.chat.completions.create({
|
|
27
|
+
model: 'gpt-4',
|
|
28
|
+
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
32
29
|
});
|
|
33
30
|
```
|
|
34
31
|
|
|
35
|
-
|
|
32
|
+
**Advanced: Manual tracking (for streaming, custom logic, etc.)**
|
|
36
33
|
|
|
37
34
|
```typescript
|
|
38
|
-
import {
|
|
35
|
+
import { track } from 'deadpipe';
|
|
39
36
|
import OpenAI from 'openai';
|
|
40
37
|
|
|
41
|
-
const client =
|
|
42
|
-
|
|
43
|
-
// All calls automatically tracked
|
|
44
|
-
const response = await client.chat.completions.create({
|
|
38
|
+
const client = new OpenAI();
|
|
39
|
+
const params = {
|
|
45
40
|
model: 'gpt-4',
|
|
46
41
|
messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
const response = await track('checkout_agent', async (t) => {
|
|
45
|
+
const response = await client.chat.completions.create(params);
|
|
46
|
+
t.record(response, undefined, params); // Pass params to capture input
|
|
47
|
+
return response;
|
|
47
48
|
});
|
|
48
49
|
```
|
|
49
50
|
|
|
@@ -105,12 +106,14 @@ Every prompt execution captures:
|
|
|
105
106
|
### Track Streaming Responses
|
|
106
107
|
|
|
107
108
|
```typescript
|
|
109
|
+
const params = {
|
|
110
|
+
model: 'gpt-4',
|
|
111
|
+
messages: [{ role: 'user', content: 'Tell me a story' }],
|
|
112
|
+
stream: true,
|
|
113
|
+
};
|
|
114
|
+
|
|
108
115
|
const response = await track('streaming_agent', async (t) => {
|
|
109
|
-
const stream = await client.chat.completions.create(
|
|
110
|
-
model: 'gpt-4',
|
|
111
|
-
messages: [{ role: 'user', content: 'Tell me a story' }],
|
|
112
|
-
stream: true,
|
|
113
|
-
});
|
|
116
|
+
const stream = await client.chat.completions.create(params);
|
|
114
117
|
|
|
115
118
|
let fullContent = '';
|
|
116
119
|
for await (const chunk of stream) {
|
|
@@ -120,12 +123,12 @@ const response = await track('streaming_agent', async (t) => {
|
|
|
120
123
|
}
|
|
121
124
|
}
|
|
122
125
|
|
|
123
|
-
// Record manually for streams
|
|
126
|
+
// Record manually for streams - pass input params to capture context
|
|
124
127
|
t.record({
|
|
125
128
|
model: 'gpt-4',
|
|
126
129
|
choices: [{ message: { content: fullContent } }],
|
|
127
130
|
usage: { prompt_tokens: 10, completion_tokens: 100, total_tokens: 110 }
|
|
128
|
-
});
|
|
131
|
+
}, undefined, params);
|
|
129
132
|
|
|
130
133
|
return fullContent;
|
|
131
134
|
});
|
|
@@ -162,9 +165,9 @@ const response = await track('claude_agent', async (t) => {
|
|
|
162
165
|
max_tokens: 1024,
|
|
163
166
|
messages: [{ role: 'user', content: 'Hello, Claude!' }]
|
|
164
167
|
});
|
|
165
|
-
t.record(response);
|
|
168
|
+
t.record(response); // Provider auto-detected from response
|
|
166
169
|
return response;
|
|
167
|
-
}
|
|
170
|
+
});
|
|
168
171
|
```
|
|
169
172
|
|
|
170
173
|
### Environment-Based Configuration
|
|
@@ -197,17 +200,11 @@ await track('checkout_agent', fn, {
|
|
|
197
200
|
appId: 'my-app',
|
|
198
201
|
environment: 'production',
|
|
199
202
|
version: '1.2.3',
|
|
200
|
-
provider: 'openai', // or 'anthropic'
|
|
201
203
|
|
|
202
204
|
// Validation
|
|
203
205
|
schema: { validate: (data) => ({ success: true, data }) },
|
|
204
206
|
enumFields: { status: ['pending', 'approved', 'rejected'] },
|
|
205
207
|
numericBounds: { amount: [0, 10000] },
|
|
206
|
-
|
|
207
|
-
// Context (for change detection)
|
|
208
|
-
messages: [...],
|
|
209
|
-
tools: [...],
|
|
210
|
-
systemPrompt: 'You are a helpful assistant...',
|
|
211
208
|
});
|
|
212
209
|
```
|
|
213
210
|
|
|
@@ -284,12 +281,22 @@ Returns: Wrapped client with identical API
|
|
|
284
281
|
|
|
285
282
|
The tracker object passed to your function:
|
|
286
283
|
|
|
287
|
-
- `record(response)` - Record the LLM response
|
|
284
|
+
- `record(response, parsedOutput?, input?)` - Record the LLM response
|
|
285
|
+
- `response` - The LLM response object
|
|
286
|
+
- `parsedOutput` - Optional pre-parsed output (if you already parsed JSON)
|
|
287
|
+
- `input` - Optional input parameters (messages, tools, etc.) to capture context
|
|
288
288
|
- `markFirstToken()` - Mark when first token received (streaming)
|
|
289
289
|
- `markRetry()` - Mark a retry attempt
|
|
290
290
|
- `recordError(error)` - Record an error
|
|
291
291
|
- `getTelemetry()` - Get the telemetry object
|
|
292
292
|
|
|
293
|
+
**Tip:** Always pass the input parameters to `record()` to capture full context:
|
|
294
|
+
```typescript
|
|
295
|
+
const params = { model: 'gpt-4', messages: [...] };
|
|
296
|
+
const response = await client.chat.completions.create(params);
|
|
297
|
+
t.record(response, undefined, params); // Pass params to capture input context
|
|
298
|
+
```
|
|
299
|
+
|
|
293
300
|
### `estimateCost(model, inputTokens, outputTokens)`
|
|
294
301
|
|
|
295
302
|
Estimate USD cost for a completion.
|
package/dist/index.d.mts
CHANGED
|
@@ -2,28 +2,29 @@
|
|
|
2
2
|
* Deadpipe - LLM observability that answers one question:
|
|
3
3
|
* "Is this prompt behaving the same as when it was last safe?"
|
|
4
4
|
*
|
|
5
|
-
* @example
|
|
5
|
+
* @example Recommended: Wrap your client (zero code changes)
|
|
6
|
+
* import { wrapOpenAI } from 'deadpipe';
|
|
7
|
+
* import OpenAI from 'openai';
|
|
8
|
+
*
|
|
9
|
+
* const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
10
|
+
* // All calls automatically tracked with full context
|
|
11
|
+
* const response = await client.chat.completions.create({
|
|
12
|
+
* model: 'gpt-4',
|
|
13
|
+
* messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
14
|
+
* });
|
|
15
|
+
*
|
|
16
|
+
* @example Advanced: Manual tracking (for streaming, custom logic, etc.)
|
|
6
17
|
* import { track } from 'deadpipe';
|
|
7
18
|
* import OpenAI from 'openai';
|
|
8
19
|
*
|
|
9
20
|
* const client = new OpenAI();
|
|
21
|
+
* const params = { model: 'gpt-4', messages: [...] };
|
|
10
22
|
*
|
|
11
|
-
* const
|
|
12
|
-
* const response = await client.chat.completions.create(
|
|
13
|
-
*
|
|
14
|
-
* messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
15
|
-
* });
|
|
16
|
-
* t.record(response);
|
|
23
|
+
* const response = await track('checkout_agent', async (t) => {
|
|
24
|
+
* const response = await client.chat.completions.create(params);
|
|
25
|
+
* t.record(response, undefined, params); // Pass params to capture input
|
|
17
26
|
* return response;
|
|
18
27
|
* });
|
|
19
|
-
*
|
|
20
|
-
* @example Auto-wrapping (zero code changes):
|
|
21
|
-
* import { wrapOpenAI } from 'deadpipe';
|
|
22
|
-
* import OpenAI from 'openai';
|
|
23
|
-
*
|
|
24
|
-
* const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
25
|
-
* // All calls automatically tracked
|
|
26
|
-
* const response = await client.chat.completions.create(...);
|
|
27
28
|
*/
|
|
28
29
|
declare const VERSION = "2.0.0";
|
|
29
30
|
type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
|
|
@@ -77,17 +78,9 @@ interface TrackOptions {
|
|
|
77
78
|
appId?: string;
|
|
78
79
|
environment?: string;
|
|
79
80
|
version?: string;
|
|
80
|
-
provider?: 'openai' | 'anthropic' | string;
|
|
81
81
|
schema?: SchemaValidator;
|
|
82
82
|
enumFields?: Record<string, unknown[]>;
|
|
83
83
|
numericBounds?: Record<string, [number | null, number | null]>;
|
|
84
|
-
messages?: Array<{
|
|
85
|
-
role: string;
|
|
86
|
-
content: string;
|
|
87
|
-
[key: string]: unknown;
|
|
88
|
-
}>;
|
|
89
|
-
tools?: Array<Record<string, unknown>>;
|
|
90
|
-
systemPrompt?: string;
|
|
91
84
|
}
|
|
92
85
|
interface SchemaValidator {
|
|
93
86
|
validate: (data: unknown) => {
|
|
@@ -96,13 +89,18 @@ interface SchemaValidator {
|
|
|
96
89
|
errors?: string[];
|
|
97
90
|
};
|
|
98
91
|
}
|
|
99
|
-
interface WrapOpenAIOptions extends
|
|
92
|
+
interface WrapOpenAIOptions extends TrackOptions {
|
|
100
93
|
promptId: string;
|
|
101
94
|
}
|
|
102
95
|
declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
|
|
103
96
|
declare function detectRefusal(text: string): boolean;
|
|
104
97
|
declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
|
|
105
98
|
declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
|
|
99
|
+
/**
|
|
100
|
+
* Auto-detect provider from response object.
|
|
101
|
+
* Checks for provider-specific response structures.
|
|
102
|
+
*/
|
|
103
|
+
declare function detectProvider(response: any): 'openai' | 'anthropic' | 'unknown';
|
|
106
104
|
interface ExtractedResponse {
|
|
107
105
|
model: string;
|
|
108
106
|
content: string;
|
|
@@ -126,7 +124,6 @@ declare class PromptTracker {
|
|
|
126
124
|
private appId;
|
|
127
125
|
private environment;
|
|
128
126
|
private versionStr;
|
|
129
|
-
private provider;
|
|
130
127
|
private schema;
|
|
131
128
|
private enumFields;
|
|
132
129
|
private numericBounds;
|
|
@@ -145,7 +142,7 @@ declare class PromptTracker {
|
|
|
145
142
|
start(): void;
|
|
146
143
|
markFirstToken(): void;
|
|
147
144
|
markRetry(): void;
|
|
148
|
-
record(response: any, parsedOutput?: unknown): unknown;
|
|
145
|
+
record(response: any, parsedOutput?: unknown, input?: any): unknown;
|
|
149
146
|
recordError(error: Error): void;
|
|
150
147
|
private send;
|
|
151
148
|
isRecorded(): boolean;
|
|
@@ -164,4 +161,4 @@ interface TrackedOpenAIClient extends OpenAIClient {
|
|
|
164
161
|
}
|
|
165
162
|
declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
|
|
166
163
|
|
|
167
|
-
export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
|
|
164
|
+
export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, wrapOpenAI as default, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
|
package/dist/index.d.ts
CHANGED
|
@@ -2,28 +2,29 @@
|
|
|
2
2
|
* Deadpipe - LLM observability that answers one question:
|
|
3
3
|
* "Is this prompt behaving the same as when it was last safe?"
|
|
4
4
|
*
|
|
5
|
-
* @example
|
|
5
|
+
* @example Recommended: Wrap your client (zero code changes)
|
|
6
|
+
* import { wrapOpenAI } from 'deadpipe';
|
|
7
|
+
* import OpenAI from 'openai';
|
|
8
|
+
*
|
|
9
|
+
* const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
10
|
+
* // All calls automatically tracked with full context
|
|
11
|
+
* const response = await client.chat.completions.create({
|
|
12
|
+
* model: 'gpt-4',
|
|
13
|
+
* messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
14
|
+
* });
|
|
15
|
+
*
|
|
16
|
+
* @example Advanced: Manual tracking (for streaming, custom logic, etc.)
|
|
6
17
|
* import { track } from 'deadpipe';
|
|
7
18
|
* import OpenAI from 'openai';
|
|
8
19
|
*
|
|
9
20
|
* const client = new OpenAI();
|
|
21
|
+
* const params = { model: 'gpt-4', messages: [...] };
|
|
10
22
|
*
|
|
11
|
-
* const
|
|
12
|
-
* const response = await client.chat.completions.create(
|
|
13
|
-
*
|
|
14
|
-
* messages: [{ role: 'user', content: 'Process refund for order 1938' }]
|
|
15
|
-
* });
|
|
16
|
-
* t.record(response);
|
|
23
|
+
* const response = await track('checkout_agent', async (t) => {
|
|
24
|
+
* const response = await client.chat.completions.create(params);
|
|
25
|
+
* t.record(response, undefined, params); // Pass params to capture input
|
|
17
26
|
* return response;
|
|
18
27
|
* });
|
|
19
|
-
*
|
|
20
|
-
* @example Auto-wrapping (zero code changes):
|
|
21
|
-
* import { wrapOpenAI } from 'deadpipe';
|
|
22
|
-
* import OpenAI from 'openai';
|
|
23
|
-
*
|
|
24
|
-
* const client = wrapOpenAI(new OpenAI(), { promptId: 'checkout_agent' });
|
|
25
|
-
* // All calls automatically tracked
|
|
26
|
-
* const response = await client.chat.completions.create(...);
|
|
27
28
|
*/
|
|
28
29
|
declare const VERSION = "2.0.0";
|
|
29
30
|
type StatusType = 'success' | 'error' | 'timeout' | 'empty' | 'schema_violation' | 'refusal';
|
|
@@ -77,17 +78,9 @@ interface TrackOptions {
|
|
|
77
78
|
appId?: string;
|
|
78
79
|
environment?: string;
|
|
79
80
|
version?: string;
|
|
80
|
-
provider?: 'openai' | 'anthropic' | string;
|
|
81
81
|
schema?: SchemaValidator;
|
|
82
82
|
enumFields?: Record<string, unknown[]>;
|
|
83
83
|
numericBounds?: Record<string, [number | null, number | null]>;
|
|
84
|
-
messages?: Array<{
|
|
85
|
-
role: string;
|
|
86
|
-
content: string;
|
|
87
|
-
[key: string]: unknown;
|
|
88
|
-
}>;
|
|
89
|
-
tools?: Array<Record<string, unknown>>;
|
|
90
|
-
systemPrompt?: string;
|
|
91
84
|
}
|
|
92
85
|
interface SchemaValidator {
|
|
93
86
|
validate: (data: unknown) => {
|
|
@@ -96,13 +89,18 @@ interface SchemaValidator {
|
|
|
96
89
|
errors?: string[];
|
|
97
90
|
};
|
|
98
91
|
}
|
|
99
|
-
interface WrapOpenAIOptions extends
|
|
92
|
+
interface WrapOpenAIOptions extends TrackOptions {
|
|
100
93
|
promptId: string;
|
|
101
94
|
}
|
|
102
95
|
declare function estimateCost(model: string, inputTokens: number, outputTokens: number): number | null;
|
|
103
96
|
declare function detectRefusal(text: string): boolean;
|
|
104
97
|
declare function validateEnumBounds(data: Record<string, unknown>, enumFields?: Record<string, unknown[]>): boolean;
|
|
105
98
|
declare function validateNumericBounds(data: Record<string, unknown>, numericBounds?: Record<string, [number | null, number | null]>): boolean;
|
|
99
|
+
/**
|
|
100
|
+
* Auto-detect provider from response object.
|
|
101
|
+
* Checks for provider-specific response structures.
|
|
102
|
+
*/
|
|
103
|
+
declare function detectProvider(response: any): 'openai' | 'anthropic' | 'unknown';
|
|
106
104
|
interface ExtractedResponse {
|
|
107
105
|
model: string;
|
|
108
106
|
content: string;
|
|
@@ -126,7 +124,6 @@ declare class PromptTracker {
|
|
|
126
124
|
private appId;
|
|
127
125
|
private environment;
|
|
128
126
|
private versionStr;
|
|
129
|
-
private provider;
|
|
130
127
|
private schema;
|
|
131
128
|
private enumFields;
|
|
132
129
|
private numericBounds;
|
|
@@ -145,7 +142,7 @@ declare class PromptTracker {
|
|
|
145
142
|
start(): void;
|
|
146
143
|
markFirstToken(): void;
|
|
147
144
|
markRetry(): void;
|
|
148
|
-
record(response: any, parsedOutput?: unknown): unknown;
|
|
145
|
+
record(response: any, parsedOutput?: unknown, input?: any): unknown;
|
|
149
146
|
recordError(error: Error): void;
|
|
150
147
|
private send;
|
|
151
148
|
isRecorded(): boolean;
|
|
@@ -164,4 +161,4 @@ interface TrackedOpenAIClient extends OpenAIClient {
|
|
|
164
161
|
}
|
|
165
162
|
declare function wrapOpenAI(client: OpenAIClient, options: WrapOpenAIOptions): TrackedOpenAIClient;
|
|
166
163
|
|
|
167
|
-
export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
|
|
164
|
+
export { type PromptTelemetry, PromptTracker, type SchemaValidator, type StatusType, type TrackOptions, VERSION, type WrapOpenAIOptions, wrapOpenAI as default, detectProvider, detectRefusal, estimateCost, extractAnthropicResponse, extractOpenAIResponse, track, validateEnumBounds, validateNumericBounds, wrapOpenAI };
|
package/dist/index.js
CHANGED
|
@@ -22,6 +22,8 @@ var index_exports = {};
|
|
|
22
22
|
__export(index_exports, {
|
|
23
23
|
PromptTracker: () => PromptTracker,
|
|
24
24
|
VERSION: () => VERSION,
|
|
25
|
+
default: () => index_default,
|
|
26
|
+
detectProvider: () => detectProvider,
|
|
25
27
|
detectRefusal: () => detectRefusal,
|
|
26
28
|
estimateCost: () => estimateCost,
|
|
27
29
|
extractAnthropicResponse: () => extractAnthropicResponse,
|
|
@@ -132,6 +134,25 @@ function validateNumericBounds(data, numericBounds) {
|
|
|
132
134
|
}
|
|
133
135
|
return true;
|
|
134
136
|
}
|
|
137
|
+
function detectProvider(response) {
|
|
138
|
+
if (!response) return "unknown";
|
|
139
|
+
if (Array.isArray(response.content) || response.stop_reason !== void 0) {
|
|
140
|
+
return "anthropic";
|
|
141
|
+
}
|
|
142
|
+
if (response.choices !== void 0 || response.output !== void 0) {
|
|
143
|
+
return "openai";
|
|
144
|
+
}
|
|
145
|
+
if (response.model) {
|
|
146
|
+
const modelLower = String(response.model).toLowerCase();
|
|
147
|
+
if (modelLower.includes("claude")) {
|
|
148
|
+
return "anthropic";
|
|
149
|
+
}
|
|
150
|
+
if (modelLower.includes("gpt") || modelLower.includes("o1")) {
|
|
151
|
+
return "openai";
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
return "unknown";
|
|
155
|
+
}
|
|
135
156
|
function extractOpenAIResponse(response) {
|
|
136
157
|
const result = {
|
|
137
158
|
model: "",
|
|
@@ -231,16 +252,15 @@ var PromptTracker = class {
|
|
|
231
252
|
appId;
|
|
232
253
|
environment;
|
|
233
254
|
versionStr;
|
|
234
|
-
provider;
|
|
235
255
|
// Validation
|
|
236
256
|
schema;
|
|
237
257
|
enumFields;
|
|
238
258
|
numericBounds;
|
|
239
|
-
// Context hashes
|
|
259
|
+
// Context hashes (auto-extracted from response)
|
|
240
260
|
promptHash;
|
|
241
261
|
toolSchemaHash;
|
|
242
262
|
systemPromptHash;
|
|
243
|
-
// Context for previews
|
|
263
|
+
// Context for previews (auto-extracted from response)
|
|
244
264
|
messages;
|
|
245
265
|
systemPrompt;
|
|
246
266
|
// Timing
|
|
@@ -259,24 +279,19 @@ var PromptTracker = class {
|
|
|
259
279
|
this.appId = options.appId || process.env.DEADPIPE_APP_ID;
|
|
260
280
|
this.environment = options.environment || process.env.DEADPIPE_ENVIRONMENT;
|
|
261
281
|
this.versionStr = options.version || process.env.DEADPIPE_VERSION || process.env.GIT_COMMIT;
|
|
262
|
-
this.provider = options.provider || "openai";
|
|
263
282
|
this.schema = options.schema;
|
|
264
283
|
this.enumFields = options.enumFields;
|
|
265
284
|
this.numericBounds = options.numericBounds;
|
|
266
|
-
this.
|
|
267
|
-
this.
|
|
268
|
-
this.
|
|
269
|
-
this.
|
|
270
|
-
this.
|
|
285
|
+
this.messages = void 0;
|
|
286
|
+
this.systemPrompt = void 0;
|
|
287
|
+
this.promptHash = void 0;
|
|
288
|
+
this.toolSchemaHash = void 0;
|
|
289
|
+
this.systemPromptHash = void 0;
|
|
271
290
|
this.telemetry = {
|
|
272
291
|
prompt_id: this.promptId,
|
|
273
|
-
provider: this.provider,
|
|
274
292
|
app_id: this.appId,
|
|
275
293
|
environment: this.environment,
|
|
276
294
|
version: this.versionStr,
|
|
277
|
-
prompt_hash: this.promptHash,
|
|
278
|
-
tool_schema_hash: this.toolSchemaHash,
|
|
279
|
-
system_prompt_hash: this.systemPromptHash,
|
|
280
295
|
status: "success"
|
|
281
296
|
};
|
|
282
297
|
}
|
|
@@ -295,11 +310,44 @@ var PromptTracker = class {
|
|
|
295
310
|
this.telemetry.retry_count = this.retryCount;
|
|
296
311
|
}
|
|
297
312
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
298
|
-
record(response, parsedOutput) {
|
|
313
|
+
record(response, parsedOutput, input) {
|
|
299
314
|
this.endTime = Date.now();
|
|
300
315
|
this.telemetry.end_time = new Date(this.endTime).toISOString();
|
|
301
316
|
this.telemetry.total_latency = this.startTime ? this.endTime - this.startTime : 0;
|
|
302
|
-
const
|
|
317
|
+
const detectedProvider = detectProvider(response);
|
|
318
|
+
this.telemetry.provider = detectedProvider !== "unknown" ? detectedProvider : "openai";
|
|
319
|
+
const extracted = detectedProvider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
|
|
320
|
+
if (input) {
|
|
321
|
+
const messages = input.messages || [];
|
|
322
|
+
const tools = input.tools;
|
|
323
|
+
let systemPrompt;
|
|
324
|
+
for (const msg of messages) {
|
|
325
|
+
if (msg.role === "system") {
|
|
326
|
+
systemPrompt = msg.content || "";
|
|
327
|
+
break;
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
if (messages.length > 0) {
|
|
331
|
+
this.promptHash = hashMessages(messages);
|
|
332
|
+
this.messages = messages;
|
|
333
|
+
}
|
|
334
|
+
if (tools) {
|
|
335
|
+
this.toolSchemaHash = hashTools(tools);
|
|
336
|
+
}
|
|
337
|
+
if (systemPrompt) {
|
|
338
|
+
this.systemPromptHash = hashContentSync(systemPrompt);
|
|
339
|
+
this.systemPrompt = systemPrompt;
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
if (this.promptHash) {
|
|
343
|
+
this.telemetry.prompt_hash = this.promptHash;
|
|
344
|
+
}
|
|
345
|
+
if (this.toolSchemaHash) {
|
|
346
|
+
this.telemetry.tool_schema_hash = this.toolSchemaHash;
|
|
347
|
+
}
|
|
348
|
+
if (this.systemPromptHash) {
|
|
349
|
+
this.telemetry.system_prompt_hash = this.systemPromptHash;
|
|
350
|
+
}
|
|
303
351
|
this.telemetry.model = extracted.model;
|
|
304
352
|
this.telemetry.input_tokens = extracted.inputTokens ?? void 0;
|
|
305
353
|
this.telemetry.output_tokens = extracted.outputTokens ?? void 0;
|
|
@@ -418,17 +466,25 @@ var PromptTracker = class {
|
|
|
418
466
|
this.recorded = true;
|
|
419
467
|
}
|
|
420
468
|
async send() {
|
|
421
|
-
if (!this.apiKey)
|
|
469
|
+
if (!this.apiKey) {
|
|
470
|
+
if (process.env.NODE_ENV === "development" || process.env.DEADPIPE_DEBUG === "1") {
|
|
471
|
+
console.warn("[Deadpipe] DEADPIPE_API_KEY not set. Telemetry will not be sent.");
|
|
472
|
+
}
|
|
473
|
+
return;
|
|
474
|
+
}
|
|
422
475
|
try {
|
|
423
476
|
const controller = new AbortController();
|
|
424
477
|
const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
425
478
|
const payload = {};
|
|
426
479
|
for (const [key, value] of Object.entries(this.telemetry)) {
|
|
427
|
-
if (value
|
|
428
|
-
|
|
429
|
-
|
|
480
|
+
if (value === void 0 || value === null) continue;
|
|
481
|
+
if (typeof value === "string" && value === "") continue;
|
|
482
|
+
if (typeof value === "boolean" && value === false) continue;
|
|
483
|
+
if (Array.isArray(value) && value.length === 0) continue;
|
|
484
|
+
if (typeof value === "object" && Object.keys(value).length === 0) continue;
|
|
485
|
+
payload[key] = value;
|
|
430
486
|
}
|
|
431
|
-
|
|
487
|
+
fetch(`${this.baseUrl}/prompt`, {
|
|
432
488
|
method: "POST",
|
|
433
489
|
headers: {
|
|
434
490
|
"Content-Type": "application/json",
|
|
@@ -436,8 +492,10 @@ var PromptTracker = class {
|
|
|
436
492
|
},
|
|
437
493
|
body: JSON.stringify(payload),
|
|
438
494
|
signal: controller.signal
|
|
495
|
+
}).catch(() => {
|
|
496
|
+
}).finally(() => {
|
|
497
|
+
clearTimeout(timeoutId);
|
|
439
498
|
});
|
|
440
|
-
clearTimeout(timeoutId);
|
|
441
499
|
} catch {
|
|
442
500
|
}
|
|
443
501
|
}
|
|
@@ -487,15 +545,10 @@ function wrapOpenAI(client, options) {
|
|
|
487
545
|
promptId,
|
|
488
546
|
async (t) => {
|
|
489
547
|
const response = await client.chat.completions.create(params);
|
|
490
|
-
t.record(response);
|
|
548
|
+
t.record(response, void 0, params);
|
|
491
549
|
return response;
|
|
492
550
|
},
|
|
493
|
-
|
|
494
|
-
...trackOptions,
|
|
495
|
-
messages,
|
|
496
|
-
tools,
|
|
497
|
-
systemPrompt
|
|
498
|
-
}
|
|
551
|
+
trackOptions
|
|
499
552
|
);
|
|
500
553
|
}
|
|
501
554
|
}
|
|
@@ -512,23 +565,22 @@ function wrapOpenAI(client, options) {
|
|
|
512
565
|
promptId,
|
|
513
566
|
async (t) => {
|
|
514
567
|
const response = await client.responses.create(params);
|
|
515
|
-
t.record(response);
|
|
568
|
+
t.record(response, void 0, params);
|
|
516
569
|
return response;
|
|
517
570
|
},
|
|
518
|
-
|
|
519
|
-
...trackOptions,
|
|
520
|
-
messages
|
|
521
|
-
}
|
|
571
|
+
trackOptions
|
|
522
572
|
);
|
|
523
573
|
}
|
|
524
574
|
};
|
|
525
575
|
}
|
|
526
576
|
return wrappedClient;
|
|
527
577
|
}
|
|
578
|
+
var index_default = wrapOpenAI;
|
|
528
579
|
// Annotate the CommonJS export names for ESM import in node:
|
|
529
580
|
0 && (module.exports = {
|
|
530
581
|
PromptTracker,
|
|
531
582
|
VERSION,
|
|
583
|
+
detectProvider,
|
|
532
584
|
detectRefusal,
|
|
533
585
|
estimateCost,
|
|
534
586
|
extractAnthropicResponse,
|
package/dist/index.mjs
CHANGED
|
@@ -99,6 +99,25 @@ function validateNumericBounds(data, numericBounds) {
|
|
|
99
99
|
}
|
|
100
100
|
return true;
|
|
101
101
|
}
|
|
102
|
+
function detectProvider(response) {
|
|
103
|
+
if (!response) return "unknown";
|
|
104
|
+
if (Array.isArray(response.content) || response.stop_reason !== void 0) {
|
|
105
|
+
return "anthropic";
|
|
106
|
+
}
|
|
107
|
+
if (response.choices !== void 0 || response.output !== void 0) {
|
|
108
|
+
return "openai";
|
|
109
|
+
}
|
|
110
|
+
if (response.model) {
|
|
111
|
+
const modelLower = String(response.model).toLowerCase();
|
|
112
|
+
if (modelLower.includes("claude")) {
|
|
113
|
+
return "anthropic";
|
|
114
|
+
}
|
|
115
|
+
if (modelLower.includes("gpt") || modelLower.includes("o1")) {
|
|
116
|
+
return "openai";
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
return "unknown";
|
|
120
|
+
}
|
|
102
121
|
function extractOpenAIResponse(response) {
|
|
103
122
|
const result = {
|
|
104
123
|
model: "",
|
|
@@ -198,16 +217,15 @@ var PromptTracker = class {
|
|
|
198
217
|
appId;
|
|
199
218
|
environment;
|
|
200
219
|
versionStr;
|
|
201
|
-
provider;
|
|
202
220
|
// Validation
|
|
203
221
|
schema;
|
|
204
222
|
enumFields;
|
|
205
223
|
numericBounds;
|
|
206
|
-
// Context hashes
|
|
224
|
+
// Context hashes (auto-extracted from response)
|
|
207
225
|
promptHash;
|
|
208
226
|
toolSchemaHash;
|
|
209
227
|
systemPromptHash;
|
|
210
|
-
// Context for previews
|
|
228
|
+
// Context for previews (auto-extracted from response)
|
|
211
229
|
messages;
|
|
212
230
|
systemPrompt;
|
|
213
231
|
// Timing
|
|
@@ -226,24 +244,19 @@ var PromptTracker = class {
|
|
|
226
244
|
this.appId = options.appId || process.env.DEADPIPE_APP_ID;
|
|
227
245
|
this.environment = options.environment || process.env.DEADPIPE_ENVIRONMENT;
|
|
228
246
|
this.versionStr = options.version || process.env.DEADPIPE_VERSION || process.env.GIT_COMMIT;
|
|
229
|
-
this.provider = options.provider || "openai";
|
|
230
247
|
this.schema = options.schema;
|
|
231
248
|
this.enumFields = options.enumFields;
|
|
232
249
|
this.numericBounds = options.numericBounds;
|
|
233
|
-
this.
|
|
234
|
-
this.
|
|
235
|
-
this.
|
|
236
|
-
this.
|
|
237
|
-
this.
|
|
250
|
+
this.messages = void 0;
|
|
251
|
+
this.systemPrompt = void 0;
|
|
252
|
+
this.promptHash = void 0;
|
|
253
|
+
this.toolSchemaHash = void 0;
|
|
254
|
+
this.systemPromptHash = void 0;
|
|
238
255
|
this.telemetry = {
|
|
239
256
|
prompt_id: this.promptId,
|
|
240
|
-
provider: this.provider,
|
|
241
257
|
app_id: this.appId,
|
|
242
258
|
environment: this.environment,
|
|
243
259
|
version: this.versionStr,
|
|
244
|
-
prompt_hash: this.promptHash,
|
|
245
|
-
tool_schema_hash: this.toolSchemaHash,
|
|
246
|
-
system_prompt_hash: this.systemPromptHash,
|
|
247
260
|
status: "success"
|
|
248
261
|
};
|
|
249
262
|
}
|
|
@@ -262,11 +275,44 @@ var PromptTracker = class {
|
|
|
262
275
|
this.telemetry.retry_count = this.retryCount;
|
|
263
276
|
}
|
|
264
277
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
265
|
-
record(response, parsedOutput) {
|
|
278
|
+
record(response, parsedOutput, input) {
|
|
266
279
|
this.endTime = Date.now();
|
|
267
280
|
this.telemetry.end_time = new Date(this.endTime).toISOString();
|
|
268
281
|
this.telemetry.total_latency = this.startTime ? this.endTime - this.startTime : 0;
|
|
269
|
-
const
|
|
282
|
+
const detectedProvider = detectProvider(response);
|
|
283
|
+
this.telemetry.provider = detectedProvider !== "unknown" ? detectedProvider : "openai";
|
|
284
|
+
const extracted = detectedProvider === "anthropic" ? extractAnthropicResponse(response) : extractOpenAIResponse(response);
|
|
285
|
+
if (input) {
|
|
286
|
+
const messages = input.messages || [];
|
|
287
|
+
const tools = input.tools;
|
|
288
|
+
let systemPrompt;
|
|
289
|
+
for (const msg of messages) {
|
|
290
|
+
if (msg.role === "system") {
|
|
291
|
+
systemPrompt = msg.content || "";
|
|
292
|
+
break;
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
if (messages.length > 0) {
|
|
296
|
+
this.promptHash = hashMessages(messages);
|
|
297
|
+
this.messages = messages;
|
|
298
|
+
}
|
|
299
|
+
if (tools) {
|
|
300
|
+
this.toolSchemaHash = hashTools(tools);
|
|
301
|
+
}
|
|
302
|
+
if (systemPrompt) {
|
|
303
|
+
this.systemPromptHash = hashContentSync(systemPrompt);
|
|
304
|
+
this.systemPrompt = systemPrompt;
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
if (this.promptHash) {
|
|
308
|
+
this.telemetry.prompt_hash = this.promptHash;
|
|
309
|
+
}
|
|
310
|
+
if (this.toolSchemaHash) {
|
|
311
|
+
this.telemetry.tool_schema_hash = this.toolSchemaHash;
|
|
312
|
+
}
|
|
313
|
+
if (this.systemPromptHash) {
|
|
314
|
+
this.telemetry.system_prompt_hash = this.systemPromptHash;
|
|
315
|
+
}
|
|
270
316
|
this.telemetry.model = extracted.model;
|
|
271
317
|
this.telemetry.input_tokens = extracted.inputTokens ?? void 0;
|
|
272
318
|
this.telemetry.output_tokens = extracted.outputTokens ?? void 0;
|
|
@@ -385,17 +431,25 @@ var PromptTracker = class {
|
|
|
385
431
|
this.recorded = true;
|
|
386
432
|
}
|
|
387
433
|
async send() {
|
|
388
|
-
if (!this.apiKey)
|
|
434
|
+
if (!this.apiKey) {
|
|
435
|
+
if (process.env.NODE_ENV === "development" || process.env.DEADPIPE_DEBUG === "1") {
|
|
436
|
+
console.warn("[Deadpipe] DEADPIPE_API_KEY not set. Telemetry will not be sent.");
|
|
437
|
+
}
|
|
438
|
+
return;
|
|
439
|
+
}
|
|
389
440
|
try {
|
|
390
441
|
const controller = new AbortController();
|
|
391
442
|
const timeoutId = setTimeout(() => controller.abort(), this.timeoutMs);
|
|
392
443
|
const payload = {};
|
|
393
444
|
for (const [key, value] of Object.entries(this.telemetry)) {
|
|
394
|
-
if (value
|
|
395
|
-
|
|
396
|
-
|
|
445
|
+
if (value === void 0 || value === null) continue;
|
|
446
|
+
if (typeof value === "string" && value === "") continue;
|
|
447
|
+
if (typeof value === "boolean" && value === false) continue;
|
|
448
|
+
if (Array.isArray(value) && value.length === 0) continue;
|
|
449
|
+
if (typeof value === "object" && Object.keys(value).length === 0) continue;
|
|
450
|
+
payload[key] = value;
|
|
397
451
|
}
|
|
398
|
-
|
|
452
|
+
fetch(`${this.baseUrl}/prompt`, {
|
|
399
453
|
method: "POST",
|
|
400
454
|
headers: {
|
|
401
455
|
"Content-Type": "application/json",
|
|
@@ -403,8 +457,10 @@ var PromptTracker = class {
|
|
|
403
457
|
},
|
|
404
458
|
body: JSON.stringify(payload),
|
|
405
459
|
signal: controller.signal
|
|
460
|
+
}).catch(() => {
|
|
461
|
+
}).finally(() => {
|
|
462
|
+
clearTimeout(timeoutId);
|
|
406
463
|
});
|
|
407
|
-
clearTimeout(timeoutId);
|
|
408
464
|
} catch {
|
|
409
465
|
}
|
|
410
466
|
}
|
|
@@ -454,15 +510,10 @@ function wrapOpenAI(client, options) {
|
|
|
454
510
|
promptId,
|
|
455
511
|
async (t) => {
|
|
456
512
|
const response = await client.chat.completions.create(params);
|
|
457
|
-
t.record(response);
|
|
513
|
+
t.record(response, void 0, params);
|
|
458
514
|
return response;
|
|
459
515
|
},
|
|
460
|
-
|
|
461
|
-
...trackOptions,
|
|
462
|
-
messages,
|
|
463
|
-
tools,
|
|
464
|
-
systemPrompt
|
|
465
|
-
}
|
|
516
|
+
trackOptions
|
|
466
517
|
);
|
|
467
518
|
}
|
|
468
519
|
}
|
|
@@ -479,22 +530,22 @@ function wrapOpenAI(client, options) {
|
|
|
479
530
|
promptId,
|
|
480
531
|
async (t) => {
|
|
481
532
|
const response = await client.responses.create(params);
|
|
482
|
-
t.record(response);
|
|
533
|
+
t.record(response, void 0, params);
|
|
483
534
|
return response;
|
|
484
535
|
},
|
|
485
|
-
|
|
486
|
-
...trackOptions,
|
|
487
|
-
messages
|
|
488
|
-
}
|
|
536
|
+
trackOptions
|
|
489
537
|
);
|
|
490
538
|
}
|
|
491
539
|
};
|
|
492
540
|
}
|
|
493
541
|
return wrappedClient;
|
|
494
542
|
}
|
|
543
|
+
var index_default = wrapOpenAI;
|
|
495
544
|
export {
|
|
496
545
|
PromptTracker,
|
|
497
546
|
VERSION,
|
|
547
|
+
index_default as default,
|
|
548
|
+
detectProvider,
|
|
498
549
|
detectRefusal,
|
|
499
550
|
estimateCost,
|
|
500
551
|
extractAnthropicResponse,
|