@promptmetrics/sdk 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +82 -0
- package/LICENSE +21 -0
- package/README.md +854 -0
- package/dist/index.d.mts +1320 -0
- package/dist/index.d.ts +1320 -0
- package/dist/index.js +1373 -0
- package/dist/index.mjs +1326 -0
- package/package.json +86 -0
package/README.md
ADDED
|
@@ -0,0 +1,854 @@
|
|
|
1
|
+
<div align="center">
|
|
2
|
+
|
|
3
|
+
# 📊 PromptMetrics SDK
|
|
4
|
+
|
|
5
|
+
**The platform built for <span style="background-color: rgb(219, 234, 254);">prompt engineers</span>**
|
|
6
|
+
|
|
7
|
+
<a href="https://nodejs.org"><img alt="Node" src="https://img.shields.io/badge/Node.js-43853D?style=for-the-badge&logo=node.js&logoColor=white"></a>
|
|
8
|
+
<a href="https://www.typescriptlang.org/"><img alt="TypeScript" src="https://img.shields.io/badge/TypeScript-007ACC?style=for-the-badge&logo=typescript&logoColor=white"></a>
|
|
9
|
+
<a href="https://www.npmjs.com/package/@promptmetrics/sdk"><img alt="npm" src="https://img.shields.io/badge/npm-CB3837?style=for-the-badge&logo=npm&logoColor=white"></a>
|
|
10
|
+
<a href="https://opensource.org/licenses/MIT"><img alt="License" src="https://img.shields.io/badge/License-MIT-yellow.svg?style=for-the-badge"></a>
|
|
11
|
+
|
|
12
|
+
---
|
|
13
|
+
|
|
14
|
+
</div>
|
|
15
|
+
|
|
16
|
+
<div align="left">
|
|
17
|
+
|
|
18
|
+
[PromptMetrics](https://promptmetrics.dev/) is a platform for managing, versioning, and monitoring your LLM prompts. Track every prompt execution, analyze costs, and debug AI workflows with built-in tracing.
|
|
19
|
+
|
|
20
|
+
This repo contains the official TypeScript/JavaScript SDK for PromptMetrics.
|
|
21
|
+
|
|
22
|
+
## Table of Contents
|
|
23
|
+
|
|
24
|
+
- [Installation](#installation)
|
|
25
|
+
- [Quick Start](#quick-start)
|
|
26
|
+
- [Configuration](#configuration)
|
|
27
|
+
- [Core Features](#core-features)
|
|
28
|
+
- [Templates](#templates)
|
|
29
|
+
- [Versions](#versions)
|
|
30
|
+
- [Prompt Logs](#prompt-logs)
|
|
31
|
+
- [LLM Providers](#llm-providers)
|
|
32
|
+
- [Tracing & Observability](#tracing--observability)
|
|
33
|
+
- [@traceable Decorator](#traceable-decorator)
|
|
34
|
+
- [Manual Tracking](#manual-tracking)
|
|
35
|
+
- [Batch Operations](#batch-operations)
|
|
36
|
+
- [LLM Request Correlation](#llm-request-correlation)
|
|
37
|
+
- [API Reference](#api-reference)
|
|
38
|
+
- [Examples](#examples)
|
|
39
|
+
- [Support](#support)
|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
|
|
43
|
+
## Installation
|
|
44
|
+
|
|
45
|
+
```bash
|
|
46
|
+
npm install @promptmetrics/sdk
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
Or with yarn:
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
yarn add @promptmetrics/sdk
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## Quick Start
|
|
58
|
+
|
|
59
|
+
```typescript
|
|
60
|
+
import { PromptMetrics } from "@promptmetrics/sdk";
|
|
61
|
+
|
|
62
|
+
// Initialize the client
|
|
63
|
+
const pm = new PromptMetrics({
|
|
64
|
+
apiKey: process.env.PROMPTMETRICS_API_KEY, // pm_xxxxx
|
|
65
|
+
});
|
|
66
|
+
|
|
67
|
+
// Get a template and run it
|
|
68
|
+
const template = await pm.templates.get("customer-support");
|
|
69
|
+
const version = template.versions[0];
|
|
70
|
+
|
|
71
|
+
const result = await pm.versions.run(version._id, {
|
|
72
|
+
variables: {
|
|
73
|
+
customer_name: "John Doe",
|
|
74
|
+
issue: "billing problem",
|
|
75
|
+
},
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
console.log(result.response_object);
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
---
|
|
82
|
+
|
|
83
|
+
## Configuration
|
|
84
|
+
|
|
85
|
+
### Basic Configuration
|
|
86
|
+
|
|
87
|
+
```typescript
|
|
88
|
+
const pm = new PromptMetrics({
|
|
89
|
+
apiKey: "pm_xxxxx", // Required: Your workspace API key
|
|
90
|
+
});
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Advanced Configuration
|
|
94
|
+
|
|
95
|
+
```typescript
|
|
96
|
+
const pm = new PromptMetrics({
|
|
97
|
+
apiKey: "pm_xxxxx", // Required: Your workspace API key
|
|
98
|
+
timeout: 30000, // Optional: Request timeout in ms (default: 30000)
|
|
99
|
+
maxRetries: 3, // Optional: Max retry attempts (default: 3)
|
|
100
|
+
debug: false, // Optional: Enable debug logging (default: false)
|
|
101
|
+
});
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Environment Variables
|
|
105
|
+
|
|
106
|
+
```bash
|
|
107
|
+
# Required
|
|
108
|
+
BASE_URL=https://api.promptmetrics.com
|
|
109
|
+
|
|
110
|
+
# Optional (can also be passed in config)
|
|
111
|
+
PROMPTMETRICS_API_KEY=pm_xxxxx
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
---
|
|
115
|
+
|
|
116
|
+
## Core Features
|
|
117
|
+
|
|
118
|
+
### Templates
|
|
119
|
+
|
|
120
|
+
Templates are containers for your prompt versions. Each template can have multiple versions with different configurations.
|
|
121
|
+
|
|
122
|
+
#### Get a Template
|
|
123
|
+
|
|
124
|
+
```typescript
|
|
125
|
+
// By name
|
|
126
|
+
const template = await pm.templates.get("customer-support");
|
|
127
|
+
|
|
128
|
+
// By ID
|
|
129
|
+
const template = await pm.templates.get("template_id_123");
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
#### Get Specific Version
|
|
133
|
+
|
|
134
|
+
```typescript
|
|
135
|
+
// Get production version (default)
|
|
136
|
+
const template = await pm.templates.get("customer-support");
|
|
137
|
+
|
|
138
|
+
// Get specific environment
|
|
139
|
+
const template = await pm.templates.get("customer-support", {
|
|
140
|
+
env_label: "staging",
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
// Get specific version number
|
|
144
|
+
const template = await pm.templates.get("customer-support", {
|
|
145
|
+
version: 3,
|
|
146
|
+
});
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
#### List Templates
|
|
150
|
+
|
|
151
|
+
```typescript
|
|
152
|
+
const result = await pm.templates.list({
|
|
153
|
+
page: 1,
|
|
154
|
+
limit: 20,
|
|
155
|
+
search: "support", // Optional: Search by name
|
|
156
|
+
});
|
|
157
|
+
|
|
158
|
+
console.log(result.templates);
|
|
159
|
+
console.log(result.pagination);
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
### Versions
|
|
165
|
+
|
|
166
|
+
Versions represent specific configurations of a template (messages, model, parameters).
|
|
167
|
+
|
|
168
|
+
#### Get a Version
|
|
169
|
+
|
|
170
|
+
```typescript
|
|
171
|
+
const version = await pm.versions.get("version_id_123");
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
#### Run a Version
|
|
175
|
+
|
|
176
|
+
```typescript
|
|
177
|
+
// Basic usage
|
|
178
|
+
const result = await pm.versions.run("version_id_123", {
|
|
179
|
+
variables: {
|
|
180
|
+
customer_name: "John Doe",
|
|
181
|
+
issue: "billing problem",
|
|
182
|
+
},
|
|
183
|
+
});
|
|
184
|
+
|
|
185
|
+
// With custom parameters
|
|
186
|
+
const result = await pm.versions.run("version_id_123", {
|
|
187
|
+
variables: { input: "Hello" },
|
|
188
|
+
parameters: {
|
|
189
|
+
temperature: 0.7,
|
|
190
|
+
max_tokens: 500,
|
|
191
|
+
},
|
|
192
|
+
});
|
|
193
|
+
|
|
194
|
+
// With custom model
|
|
195
|
+
const result = await pm.versions.run("version_id_123", {
|
|
196
|
+
variables: { input: "Hello" },
|
|
197
|
+
model: "gpt-4",
|
|
198
|
+
});
|
|
199
|
+
```
|
|
200
|
+
|
|
201
|
+
#### Update Version Metadata
|
|
202
|
+
|
|
203
|
+
```typescript
|
|
204
|
+
await pm.versions.update("version_id_123", {
|
|
205
|
+
metadata: {
|
|
206
|
+
department: "customer_support",
|
|
207
|
+
priority: "high",
|
|
208
|
+
},
|
|
209
|
+
});
|
|
210
|
+
```
|
|
211
|
+
|
|
212
|
+
#### Update Environment Label
|
|
213
|
+
|
|
214
|
+
```typescript
|
|
215
|
+
// Promote to production
|
|
216
|
+
await pm.versions.update("version_id_123", {
|
|
217
|
+
env_label: "production",
|
|
218
|
+
});
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
---
|
|
222
|
+
|
|
223
|
+
### Prompt Logs
|
|
224
|
+
|
|
225
|
+
Prompt logs capture every execution of a template version, including inputs, outputs, costs, and performance metrics.
|
|
226
|
+
|
|
227
|
+
#### List Logs
|
|
228
|
+
|
|
229
|
+
```typescript
|
|
230
|
+
const result = await pm.logs.list({
|
|
231
|
+
page: 1,
|
|
232
|
+
limit: 50,
|
|
233
|
+
template_id: "template_123", // Optional: Filter by template
|
|
234
|
+
status: "SUCCESS", // Optional: Filter by status
|
|
235
|
+
start_date: "2025-01-01T00:00:00Z", // Optional: Date range
|
|
236
|
+
end_date: "2025-01-31T23:59:59Z",
|
|
237
|
+
});
|
|
238
|
+
|
|
239
|
+
console.log(result.logs);
|
|
240
|
+
console.log(result.pagination);
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
#### Log Structure
|
|
244
|
+
|
|
245
|
+
```typescript
|
|
246
|
+
interface PromptLog {
|
|
247
|
+
_id: string;
|
|
248
|
+
template_version_id: string;
|
|
249
|
+
template_id: string;
|
|
250
|
+
workspace_id: string;
|
|
251
|
+
|
|
252
|
+
// Request/Response
|
|
253
|
+
request_object: {
|
|
254
|
+
model: string;
|
|
255
|
+
messages: Message[];
|
|
256
|
+
temperature?: number;
|
|
257
|
+
// ... other parameters
|
|
258
|
+
};
|
|
259
|
+
response_object: {
|
|
260
|
+
id: string;
|
|
261
|
+
choices: Array<{
|
|
262
|
+
message: Message;
|
|
263
|
+
finish_reason: string;
|
|
264
|
+
}>;
|
|
265
|
+
usage: {
|
|
266
|
+
prompt_tokens: number;
|
|
267
|
+
completion_tokens: number;
|
|
268
|
+
total_tokens: number;
|
|
269
|
+
};
|
|
270
|
+
};
|
|
271
|
+
|
|
272
|
+
// Performance
|
|
273
|
+
latency: number; // seconds
|
|
274
|
+
status: "SUCCESS" | "ERROR";
|
|
275
|
+
|
|
276
|
+
// Costs
|
|
277
|
+
prompt_cost: number;
|
|
278
|
+
completion_cost: number;
|
|
279
|
+
total_cost: number;
|
|
280
|
+
|
|
281
|
+
// Trace correlation (if called from @traceable)
|
|
282
|
+
trace_id?: string;
|
|
283
|
+
span_id?: string;
|
|
284
|
+
group_id?: string;
|
|
285
|
+
|
|
286
|
+
created_at: string;
|
|
287
|
+
updated_at: string;
|
|
288
|
+
}
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
---
|
|
292
|
+
|
|
293
|
+
### LLM Providers
|
|
294
|
+
|
|
295
|
+
Get information about available LLM providers and models.
|
|
296
|
+
|
|
297
|
+
#### List Providers
|
|
298
|
+
|
|
299
|
+
```typescript
|
|
300
|
+
const providers = await pm.providers.list();
|
|
301
|
+
|
|
302
|
+
providers.forEach((provider) => {
|
|
303
|
+
console.log(provider.name); // e.g., "OpenAI"
|
|
304
|
+
console.log(provider.models); // Available models
|
|
305
|
+
});
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
---
|
|
309
|
+
|
|
310
|
+
## Tracing & Observability
|
|
311
|
+
|
|
312
|
+
PromptMetrics provides powerful tracing capabilities to monitor and debug your AI workflows.
|
|
313
|
+
|
|
314
|
+
### @traceable Decorator
|
|
315
|
+
|
|
316
|
+
Automatically track function execution with the `@traceable` decorator.
|
|
317
|
+
|
|
318
|
+
#### Basic Usage
|
|
319
|
+
|
|
320
|
+
```typescript
|
|
321
|
+
import { PromptMetrics } from "@promptmetrics/sdk";
|
|
322
|
+
|
|
323
|
+
const pm = new PromptMetrics({ apiKey: "pm_xxxxx" });
|
|
324
|
+
|
|
325
|
+
class DataProcessor {
|
|
326
|
+
@pm.traceable({ name: "process_data" })
|
|
327
|
+
async processData(input: string) {
|
|
328
|
+
// Your logic here
|
|
329
|
+
return processedData;
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
|
|
333
|
+
const processor = new DataProcessor();
|
|
334
|
+
await processor.processData("test");
|
|
335
|
+
// ✅ Automatically tracked and sent to PromptMetrics
|
|
336
|
+
```
|
|
337
|
+
|
|
338
|
+
#### With Metadata
|
|
339
|
+
|
|
340
|
+
```typescript
|
|
341
|
+
class CustomerService {
|
|
342
|
+
@pm.traceable({
|
|
343
|
+
name: "handle_support_request",
|
|
344
|
+
metadata: {
|
|
345
|
+
service: "customer_support",
|
|
346
|
+
priority: "high",
|
|
347
|
+
},
|
|
348
|
+
})
|
|
349
|
+
async handleRequest(customerId: string, message: string) {
|
|
350
|
+
// Function logic
|
|
351
|
+
return response;
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
```
|
|
355
|
+
|
|
356
|
+
#### With Tags
|
|
357
|
+
|
|
358
|
+
```typescript
|
|
359
|
+
class PaymentProcessor {
|
|
360
|
+
@pm.traceable({
|
|
361
|
+
name: "process_payment",
|
|
362
|
+
tags: ["payment", "critical", "production"],
|
|
363
|
+
})
|
|
364
|
+
async processPayment(amount: number) {
|
|
365
|
+
// Payment logic
|
|
366
|
+
return result;
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
```
|
|
370
|
+
|
|
371
|
+
#### With Grouping
|
|
372
|
+
|
|
373
|
+
```typescript
|
|
374
|
+
class ConversationHandler {
|
|
375
|
+
@pm.traceable({
|
|
376
|
+
name: "handle_conversation_turn",
|
|
377
|
+
})
|
|
378
|
+
async handleTurn(message: string, conversationId: string) {
|
|
379
|
+
// All nested calls automatically grouped
|
|
380
|
+
const enriched = await this.enrichMessage(message);
|
|
381
|
+
const response = await this.generateResponse(enriched);
|
|
382
|
+
return response;
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
@pm.traceable({ name: "enrich_message" })
|
|
386
|
+
async enrichMessage(message: string) {
|
|
387
|
+
// Automatically linked to parent trace
|
|
388
|
+
return enrichedMessage;
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
@pm.traceable({ name: "generate_response" })
|
|
392
|
+
async generateResponse(message: string) {
|
|
393
|
+
// Automatically linked to parent trace
|
|
394
|
+
return response;
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
```
|
|
398
|
+
|
|
399
|
+
#### Decorator Options
|
|
400
|
+
|
|
401
|
+
```typescript
|
|
402
|
+
interface TraceableOptions {
|
|
403
|
+
name?: string; // Function name (default: method name)
|
|
404
|
+
type?: "CUSTOM" | "LLM"; // Function type (default: "CUSTOM")
|
|
405
|
+
metadata?: Record<string, unknown>; // Static metadata
|
|
406
|
+
tags?: string[]; // Tags for categorization
|
|
407
|
+
disabled?: boolean; // Disable tracing (for performance)
|
|
408
|
+
}
|
|
409
|
+
```
|
|
410
|
+
|
|
411
|
+
---
|
|
412
|
+
|
|
413
|
+
### Manual Tracking
|
|
414
|
+
|
|
415
|
+
Track metadata, scores, and groups dynamically within traced functions.
|
|
416
|
+
|
|
417
|
+
#### Track Metadata
|
|
418
|
+
|
|
419
|
+
```typescript
|
|
420
|
+
@pm.traceable({ name: "process_data" })
|
|
421
|
+
async processData(data: string) {
|
|
422
|
+
// Process data
|
|
423
|
+
const result = processRawData(data);
|
|
424
|
+
|
|
425
|
+
// Add metadata dynamically
|
|
426
|
+
await pm.track.metadata({
|
|
427
|
+
records_processed: result.length,
|
|
428
|
+
data_source: "crm_system",
|
|
429
|
+
processing_version: "2.0",
|
|
430
|
+
});
|
|
431
|
+
|
|
432
|
+
return result;
|
|
433
|
+
}
|
|
434
|
+
```
|
|
435
|
+
|
|
436
|
+
#### Track Scores
|
|
437
|
+
|
|
438
|
+
```typescript
|
|
439
|
+
@pm.traceable({ name: "validate_response" })
|
|
440
|
+
async validateResponse(response: string) {
|
|
441
|
+
// Run quality checks
|
|
442
|
+
const coherence = checkCoherence(response);
|
|
443
|
+
const relevance = checkRelevance(response);
|
|
444
|
+
const safety = checkSafety(response);
|
|
445
|
+
|
|
446
|
+
// Track scores
|
|
447
|
+
await pm.track.score({ criteria: "coherence", value: coherence });
|
|
448
|
+
await pm.track.score({ criteria: "relevance", value: relevance });
|
|
449
|
+
await pm.track.score({ criteria: "safety", value: safety });
|
|
450
|
+
|
|
451
|
+
return { passed: coherence > 0.8 && relevance > 0.8 && safety > 0.9 };
|
|
452
|
+
}
|
|
453
|
+
```
|
|
454
|
+
|
|
455
|
+
#### Track Groups
|
|
456
|
+
|
|
457
|
+
```typescript
|
|
458
|
+
@pm.traceable({ name: "handle_conversation" })
|
|
459
|
+
async handleConversation(conversationId: string) {
|
|
460
|
+
// Set group dynamically (overrides decorator)
|
|
461
|
+
pm.track.group({
|
|
462
|
+
group_id: conversationId,
|
|
463
|
+
group_type: "conversation",
|
|
464
|
+
});
|
|
465
|
+
|
|
466
|
+
// All nested calls inherit this group
|
|
467
|
+
const result = await this.processMessage();
|
|
468
|
+
return result;
|
|
469
|
+
}
|
|
470
|
+
```
|
|
471
|
+
|
|
472
|
+
---
|
|
473
|
+
|
|
474
|
+
### Batch Operations
|
|
475
|
+
|
|
476
|
+
Efficiently send multiple traces at once (useful for offline processing or historical data import).
|
|
477
|
+
|
|
478
|
+
#### Batch Create Traces
|
|
479
|
+
|
|
480
|
+
```typescript
|
|
481
|
+
const traces = [
|
|
482
|
+
{
|
|
483
|
+
trace_id: "trace_123",
|
|
484
|
+
span_id: "span_abc",
|
|
485
|
+
function_name: "step_1",
|
|
486
|
+
start_time: new Date("2025-12-08T10:00:00Z"),
|
|
487
|
+
end_time: new Date("2025-12-08T10:00:01Z"),
|
|
488
|
+
duration_ms: 1000,
|
|
489
|
+
status: "SUCCESS",
|
|
490
|
+
metadata: { step: 1 },
|
|
491
|
+
},
|
|
492
|
+
{
|
|
493
|
+
trace_id: "trace_123",
|
|
494
|
+
span_id: "span_def",
|
|
495
|
+
function_name: "step_2",
|
|
496
|
+
start_time: new Date("2025-12-08T10:00:01Z"),
|
|
497
|
+
end_time: new Date("2025-12-08T10:00:03Z"),
|
|
498
|
+
duration_ms: 2000,
|
|
499
|
+
status: "SUCCESS",
|
|
500
|
+
metadata: { step: 2 },
|
|
501
|
+
},
|
|
502
|
+
// ... up to 100 traces
|
|
503
|
+
];
|
|
504
|
+
|
|
505
|
+
const result = await pm.traces.createBatch(traces);
|
|
506
|
+
|
|
507
|
+
console.log(`Created: ${result.summary.successful}`);
|
|
508
|
+
console.log(`Failed: ${result.summary.failed}`);
|
|
509
|
+
|
|
510
|
+
// Handle errors
|
|
511
|
+
result.errors.forEach((error) => {
|
|
512
|
+
console.error(`Trace ${error.index} failed: ${error.error}`);
|
|
513
|
+
});
|
|
514
|
+
```
|
|
515
|
+
|
|
516
|
+
#### Use Cases for Batch Operations
|
|
517
|
+
|
|
518
|
+
**1. Historical Data Import**
|
|
519
|
+
|
|
520
|
+
```typescript
|
|
521
|
+
// Import traces from legacy system
|
|
522
|
+
async function importLegacyTraces() {
|
|
523
|
+
const legacyTraces = await fetchFromLegacyDB();
|
|
524
|
+
|
|
525
|
+
// Convert to PromptMetrics format
|
|
526
|
+
const traces = legacyTraces.map((legacy) => ({
|
|
527
|
+
trace_id: legacy.id,
|
|
528
|
+
span_id: legacy.span,
|
|
529
|
+
function_name: legacy.operation,
|
|
530
|
+
start_time: new Date(legacy.started_at),
|
|
531
|
+
end_time: new Date(legacy.ended_at),
|
|
532
|
+
duration_ms: legacy.duration,
|
|
533
|
+
status: legacy.success ? "SUCCESS" : "ERROR",
|
|
534
|
+
metadata: legacy.context,
|
|
535
|
+
}));
|
|
536
|
+
|
|
537
|
+
// Import in batches of 100
|
|
538
|
+
for (let i = 0; i < traces.length; i += 100) {
|
|
539
|
+
const batch = traces.slice(i, i + 100);
|
|
540
|
+
await pm.traces.createBatch(batch);
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
```
|
|
544
|
+
|
|
545
|
+
**2. Offline Processing**
|
|
546
|
+
|
|
547
|
+
```typescript
|
|
548
|
+
// Buffer traces and send periodically
|
|
549
|
+
class TraceBuffer {
|
|
550
|
+
private buffer: CreateTraceOptions[] = [];
|
|
551
|
+
|
|
552
|
+
add(trace: CreateTraceOptions) {
|
|
553
|
+
this.buffer.push(trace);
|
|
554
|
+
|
|
555
|
+
if (this.buffer.length >= 50) {
|
|
556
|
+
this.flush();
|
|
557
|
+
}
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
async flush() {
|
|
561
|
+
if (this.buffer.length === 0) return;
|
|
562
|
+
|
|
563
|
+
const toSend = this.buffer.splice(0, 100);
|
|
564
|
+
await pm.traces.createBatch(toSend);
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
```
|
|
568
|
+
|
|
569
|
+
---
|
|
570
|
+
|
|
571
|
+
### LLM Request Correlation
|
|
572
|
+
|
|
573
|
+
When you call `pm.versions.run()` inside a `@traceable` function, the LLM request is automatically linked to your trace.
|
|
574
|
+
|
|
575
|
+
#### Automatic Correlation
|
|
576
|
+
|
|
577
|
+
```typescript
|
|
578
|
+
class AIService {
|
|
579
|
+
@pm.traceable({ name: "generate_support_response" })
|
|
580
|
+
async generateResponse(customerMessage: string) {
|
|
581
|
+
// This LLM call is automatically linked to the trace!
|
|
582
|
+
const result = await pm.versions.run("version_123", {
|
|
583
|
+
variables: {
|
|
584
|
+
customer_message: customerMessage,
|
|
585
|
+
context: "support",
|
|
586
|
+
},
|
|
587
|
+
});
|
|
588
|
+
|
|
589
|
+
// The prompt_log will have:
|
|
590
|
+
// - trace_id: current trace ID
|
|
591
|
+
// - span_id: current span ID
|
|
592
|
+
// - group_id: current group ID (if set)
|
|
593
|
+
|
|
594
|
+
return result;
|
|
595
|
+
}
|
|
596
|
+
}
|
|
597
|
+
```
|
|
598
|
+
|
|
599
|
+
#### Complete Workflow Example
|
|
600
|
+
|
|
601
|
+
```typescript
|
|
602
|
+
class CustomerSupportWorkflow {
|
|
603
|
+
@pm.traceable({
|
|
604
|
+
name: "handle_support_request",
|
|
605
|
+
})
|
|
606
|
+
async handleRequest(message: string, conversationId: string) {
|
|
607
|
+
// Set group for entire workflow
|
|
608
|
+
pm.track.group({
|
|
609
|
+
group_id: conversationId,
|
|
610
|
+
group_type: "conversation",
|
|
611
|
+
});
|
|
612
|
+
|
|
613
|
+
// Step 1: Custom function (traced)
|
|
614
|
+
const enriched = await this.enrichCustomerData(message);
|
|
615
|
+
|
|
616
|
+
// Step 2: LLM call (auto-linked!)
|
|
617
|
+
const response = await pm.versions.run("support-template", {
|
|
618
|
+
variables: { message: enriched.text },
|
|
619
|
+
});
|
|
620
|
+
|
|
621
|
+
// Step 3: Custom function (traced)
|
|
622
|
+
const validation = await this.validateResponse(response);
|
|
623
|
+
|
|
624
|
+
return validation;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
@pm.traceable({ name: "enrich_customer_data" })
|
|
628
|
+
async enrichCustomerData(message: string) {
|
|
629
|
+
// Custom logic
|
|
630
|
+
return enrichedData;
|
|
631
|
+
}
|
|
632
|
+
|
|
633
|
+
@pm.traceable({ name: "validate_response" })
|
|
634
|
+
async validateResponse(response: any) {
|
|
635
|
+
// Quality checks
|
|
636
|
+
await pm.track.score({ criteria: "coherence", value: 0.92 });
|
|
637
|
+
return validation;
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
```
|
|
641
|
+
|
|
642
|
+
**Result:** Complete end-to-end visibility of your AI workflow:
|
|
643
|
+
|
|
644
|
+
- Custom functions tracked as traces
|
|
645
|
+
- LLM calls tracked as prompt logs
|
|
646
|
+
- All linked by `trace_id`, `span_id`, and `group_id`
|
|
647
|
+
|
|
648
|
+
---
|
|
649
|
+
|
|
650
|
+
## API Reference
|
|
651
|
+
|
|
652
|
+
### Client Initialization
|
|
653
|
+
|
|
654
|
+
```typescript
|
|
655
|
+
new PromptMetrics(config: PromptMetricsConfig)
|
|
656
|
+
```
|
|
657
|
+
|
|
658
|
+
### Templates
|
|
659
|
+
|
|
660
|
+
```typescript
|
|
661
|
+
pm.templates.get(identifier: string, options?: GetTemplateOptions): Promise<Template>
|
|
662
|
+
pm.templates.list(options?: ListPromptsOptions): Promise<ListPromptsResponse>
|
|
663
|
+
```
|
|
664
|
+
|
|
665
|
+
### Versions
|
|
666
|
+
|
|
667
|
+
```typescript
|
|
668
|
+
pm.versions.get(versionId: string): Promise<TemplateVersion>
|
|
669
|
+
pm.versions.run(versionId: string, options?: RunVersionOptions): Promise<PromptLog>
|
|
670
|
+
pm.versions.update(versionId: string, options: UpdateVersionOptions): Promise<TemplateVersion>
|
|
671
|
+
```
|
|
672
|
+
|
|
673
|
+
### Logs
|
|
674
|
+
|
|
675
|
+
```typescript
|
|
676
|
+
pm.logs.list(options?: ListLogsOptions): Promise<{ logs: PromptLog[]; pagination: PaginationMeta }>
|
|
677
|
+
```
|
|
678
|
+
|
|
679
|
+
### Providers
|
|
680
|
+
|
|
681
|
+
```typescript
|
|
682
|
+
pm.providers.list(): Promise<ProviderWithModels[]>
|
|
683
|
+
```
|
|
684
|
+
|
|
685
|
+
### Traces
|
|
686
|
+
|
|
687
|
+
```typescript
|
|
688
|
+
pm.traces.create(options: CreateTraceOptions): Promise<Trace>
|
|
689
|
+
pm.traces.createBatch(traces: CreateTraceOptions[]): Promise<BatchCreateResult>
|
|
690
|
+
pm.traces.getBySpanId(spanId: string): Promise<Trace>
|
|
691
|
+
pm.traces.getTrace(traceId: string): Promise<TraceTreeNode[]>
|
|
692
|
+
pm.traces.getGroup(groupId: string): Promise<Trace[]>
|
|
693
|
+
pm.traces.list(options?: ListTracesOptions): Promise<TraceListResponse>
|
|
694
|
+
pm.traces.addScore(spanId: string, options: AddTraceScoreOptions): Promise<Trace>
|
|
695
|
+
pm.traces.updateMetadata(spanId: string, options: UpdateTraceMetadataOptions): Promise<Trace>
|
|
696
|
+
pm.traces.getAnalytics(options: { start_date: string; end_date: string }): Promise<TraceAnalytics>
|
|
697
|
+
```
|
|
698
|
+
|
|
699
|
+
### Tracking
|
|
700
|
+
|
|
701
|
+
```typescript
|
|
702
|
+
pm.track.metadata(metadata: Record<string, unknown>): Promise<void>
|
|
703
|
+
pm.track.score(options: { criteria: string; value: number }): Promise<void>
|
|
704
|
+
pm.track.group(options: { group_id: string; group_type: string }): Promise<void>
|
|
705
|
+
```
|
|
706
|
+
|
|
707
|
+
### Decorator
|
|
708
|
+
|
|
709
|
+
```typescript
|
|
710
|
+
@pm.traceable(options?: TraceableOptions)
|
|
711
|
+
```
|
|
712
|
+
|
|
713
|
+
---
|
|
714
|
+
|
|
715
|
+
## Examples
|
|
716
|
+
|
|
717
|
+
### Basic Template Execution
|
|
718
|
+
|
|
719
|
+
```typescript
|
|
720
|
+
import { PromptMetrics } from "@promptmetrics/sdk";
|
|
721
|
+
|
|
722
|
+
const pm = new PromptMetrics({ apiKey: process.env.PROMPTMETRICS_API_KEY });
|
|
723
|
+
|
|
724
|
+
async function main() {
|
|
725
|
+
// Get template
|
|
726
|
+
const template = await pm.templates.get("greeting");
|
|
727
|
+
|
|
728
|
+
// Run version
|
|
729
|
+
const result = await pm.versions.run(template.versions[0]._id, {
|
|
730
|
+
variables: { name: "Alice" },
|
|
731
|
+
});
|
|
732
|
+
|
|
733
|
+
console.log(result.response_object.choices[0].message.content);
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
main();
|
|
737
|
+
```
|
|
738
|
+
|
|
739
|
+
### Traced AI Workflow
|
|
740
|
+
|
|
741
|
+
```typescript
|
|
742
|
+
import { PromptMetrics } from "@promptmetrics/sdk";
|
|
743
|
+
|
|
744
|
+
const pm = new PromptMetrics({ apiKey: process.env.PROMPTMETRICS_API_KEY });
|
|
745
|
+
|
|
746
|
+
class AIWorkflow {
|
|
747
|
+
@pm.traceable({
|
|
748
|
+
name: "process_document",
|
|
749
|
+
tags: ["document", "processing"],
|
|
750
|
+
})
|
|
751
|
+
async processDocument(document: string) {
|
|
752
|
+
// Step 1: Extract entities
|
|
753
|
+
const entities = await this.extractEntities(document);
|
|
754
|
+
|
|
755
|
+
// Step 2: Summarize
|
|
756
|
+
const summary = await this.summarize(document);
|
|
757
|
+
|
|
758
|
+
// Step 3: Generate insights
|
|
759
|
+
const insights = await this.generateInsights(entities, summary);
|
|
760
|
+
|
|
761
|
+
return { entities, summary, insights };
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
@pm.traceable({ name: "extract_entities" })
|
|
765
|
+
async extractEntities(text: string) {
|
|
766
|
+
const result = await pm.versions.run("entity-extraction", {
|
|
767
|
+
variables: { text },
|
|
768
|
+
});
|
|
769
|
+
|
|
770
|
+
await pm.track.metadata({ entity_count: result.entities.length });
|
|
771
|
+
return result.entities;
|
|
772
|
+
}
|
|
773
|
+
|
|
774
|
+
@pm.traceable({ name: "summarize" })
|
|
775
|
+
async summarize(text: string) {
|
|
776
|
+
const result = await pm.versions.run("summarization", {
|
|
777
|
+
variables: { text },
|
|
778
|
+
});
|
|
779
|
+
|
|
780
|
+
await pm.track.score({ criteria: "conciseness", value: 0.85 });
|
|
781
|
+
return result.summary;
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
@pm.traceable({ name: "generate_insights" })
|
|
785
|
+
async generateInsights(entities: any[], summary: string) {
|
|
786
|
+
const result = await pm.versions.run("insight-generation", {
|
|
787
|
+
variables: { entities: JSON.stringify(entities), summary },
|
|
788
|
+
});
|
|
789
|
+
|
|
790
|
+
return result.insights;
|
|
791
|
+
}
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
const workflow = new AIWorkflow();
|
|
795
|
+
await workflow.processDocument("Long document text...");
|
|
796
|
+
```
|
|
797
|
+
|
|
798
|
+
### Conversation Tracking
|
|
799
|
+
|
|
800
|
+
```typescript
|
|
801
|
+
import { PromptMetrics } from "@promptmetrics/sdk";
|
|
802
|
+
|
|
803
|
+
const pm = new PromptMetrics({ apiKey: process.env.PROMPTMETRICS_API_KEY });
|
|
804
|
+
|
|
805
|
+
class ChatBot {
|
|
806
|
+
@pm.traceable({
|
|
807
|
+
name: "handle_message",
|
|
808
|
+
})
|
|
809
|
+
async handleMessage(message: string, conversationId: string) {
|
|
810
|
+
// Set conversation group
|
|
811
|
+
pm.track.group({
|
|
812
|
+
group_id: conversationId,
|
|
813
|
+
group_type: "conversation",
|
|
814
|
+
});
|
|
815
|
+
|
|
816
|
+
// Generate response
|
|
817
|
+
const result = await pm.versions.run("chatbot-template", {
|
|
818
|
+
variables: { message },
|
|
819
|
+
});
|
|
820
|
+
|
|
821
|
+
// Track quality
|
|
822
|
+
await pm.track.score({ criteria: "helpfulness", value: 0.9 });
|
|
823
|
+
|
|
824
|
+
return result.response_object.choices[0].message.content;
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
const bot = new ChatBot();
|
|
829
|
+
await bot.handleMessage("Hello!", "conv_123");
|
|
830
|
+
await bot.handleMessage("How are you?", "conv_123");
|
|
831
|
+
// Both messages grouped under conversation "conv_123"
|
|
832
|
+
```
|
|
833
|
+
|
|
834
|
+
---
|
|
835
|
+
|
|
836
|
+
## Support
|
|
837
|
+
|
|
838
|
+
- **Documentation**: [docs.promptmetrics.com](https://docs.promptmetrics.com)
|
|
839
|
+
- **GitHub Issues**: [github.com/Xomatic/promptmetrics-sdk/issues](https://github.com/Xomatic/promptmetrics-sdk/issues)
|
|
840
|
+
- **Email**: support@promptmetrics.com
|
|
841
|
+
|
|
842
|
+
---
|
|
843
|
+
|
|
844
|
+
## License
|
|
845
|
+
|
|
846
|
+
MIT
|
|
847
|
+
|
|
848
|
+
---
|
|
849
|
+
|
|
850
|
+
## Additional Resources
|
|
851
|
+
|
|
852
|
+
- **[TRACING.md](./TRACING.md)** - Comprehensive tracing guide with advanced examples
|
|
853
|
+
- **[Examples](./examples/)** - Code examples for common use cases
|
|
854
|
+
- **[Changelog](./CHANGELOG.md)** - Version history and updates
|