@warpmetrics/warp 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,142 @@
1
+ # warp
2
+
3
+ Measure your agents, not your LLM calls.
4
+
5
+ Warp is a lightweight SDK that wraps your existing OpenAI or Anthropic client and gives you full observability over your AI agent's execution — runs, groups, costs, and outcomes — with zero config changes to your LLM calls.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ npm install warpmetrics
11
+ ```
12
+
13
+ ## Quick start
14
+
15
+ ```js
16
+ import OpenAI from 'openai';
17
+ import { warp, run, group, add, outcome } from 'warpmetrics';
18
+
19
+ const openai = warp(new OpenAI(), { apiKey: 'wm_...' });
20
+
21
+ const r = run('code-review', { name: 'Review PR #42' });
22
+ const planning = group('planning');
23
+
24
+ const response = await openai.chat.completions.create({
25
+ model: 'gpt-4o',
26
+ messages: [{ role: 'user', content: 'Review this PR...' }],
27
+ });
28
+
29
+ add(planning, response);
30
+ add(r, planning);
31
+ outcome(r, 'completed', { reason: 'Approved' });
32
+ ```
33
+
34
+ Every LLM call is automatically tracked. You structure the execution with `run` and `group`, then record the result with `outcome`.
35
+
36
+ ## API
37
+
38
+ ### `warp(client, options?)`
39
+
40
+ Wrap an OpenAI or Anthropic client. Every call to `.chat.completions.create()` or `.messages.create()` is automatically intercepted and tracked.
41
+
42
+ ```js
43
+ const openai = warp(new OpenAI(), { apiKey: 'wm_...' });
44
+ const anthropic = warp(new Anthropic(), { apiKey: 'wm_...' });
45
+ ```
46
+
47
+ Options are only needed on the first call. After that, config is shared across all wrapped clients.
48
+
49
+ | Option | Type | Default | Description |
50
+ |---|---|---|---|
51
+ | `apiKey` | `string` | `WARPMETRICS_API_KEY` env var | Your Warpmetrics API key |
52
+ | `baseUrl` | `string` | `https://api.warpmetrics.com` | API endpoint |
53
+ | `enabled` | `boolean` | `true` | Disable tracking entirely |
54
+ | `debug` | `boolean` | `false` | Log events to console |
55
+ | `flushInterval` | `number` | `1000` | Auto-flush interval in ms |
56
+ | `maxBatchSize` | `number` | `100` | Max events per batch |
57
+
58
+ ### `run(label, options?)`
59
+
60
+ Create a run — the top-level unit that tracks one agent execution.
61
+
62
+ ```js
63
+ const r = run('code-review', { name: 'PR #42', link: 'https://github.com/org/repo/pull/42' });
64
+ ```
65
+
66
+ ### `group(label, options?)`
67
+
68
+ Create a group — a logical phase or step inside a run.
69
+
70
+ ```js
71
+ const planning = group('planning', { name: 'Planning phase' });
72
+ const coding = group('coding');
73
+ ```
74
+
75
+ ### `add(target, ...items)`
76
+
77
+ Link groups or LLM responses to a run or group.
78
+
79
+ ```js
80
+ add(planning, response1, response2); // LLM responses to a group
81
+ add(r, planning, coding); // groups to a run
82
+ add(planning, subGroup); // groups can nest
83
+ ```
84
+
85
+ ### `outcome(target, name, options?)`
86
+
87
+ Record an outcome on any tracked target.
88
+
89
+ ```js
90
+ outcome(r, 'completed', {
91
+ reason: 'All checks passed',
92
+ source: 'ci',
93
+ tags: ['approved'],
94
+ metadata: { reviewer: 'alice' },
95
+ });
96
+ ```
97
+
98
+ ### `ref(target)`
99
+
100
+ Resolve any target (run, group, or LLM response) to its string ID. Useful for passing IDs to your frontend or storing them.
101
+
102
+ ```js
103
+ ref(r) // 'wm_run_a1b2c3d4e5f6'
104
+ ref(response) // 'wm_call_x9y8z7w6v5u4'
105
+ ref('wm_run_abc') // pass-through
106
+ ```
107
+
108
+ ### `cost(target)`
109
+
110
+ Get the estimated cost in USD for any target. Aggregates across all nested calls for runs and groups.
111
+
112
+ ```js
113
+ cost(response) // 0.0012
114
+ cost(r) // 0.0036 (sum of all calls in the run)
115
+ ```
116
+
117
+ ### `flush()`
118
+
119
+ Manually flush pending events. Events are auto-flushed on an interval and on process exit, but you can force it.
120
+
121
+ ```js
122
+ await flush();
123
+ ```
124
+
125
+ ## Supported providers
126
+
127
+ - **OpenAI** — `client.chat.completions.create()` and `client.responses.create()`
128
+ - **Anthropic** — `client.messages.create()`
129
+
130
+ Need another provider? [Open an issue](https://github.com/warpmetrics/warp/issues).
131
+
132
+ ## Environment variables
133
+
134
+ | Variable | Description |
135
+ |---|---|
136
+ | `WARPMETRICS_API_KEY` | API key (fallback if not passed to `warp()`) |
137
+ | `WARPMETRICS_API_URL` | Custom API endpoint |
138
+ | `WARPMETRICS_DEBUG` | Set to `"true"` to enable debug logging |
139
+
140
+ ## License
141
+
142
+ MIT
package/package.json ADDED
@@ -0,0 +1,47 @@
1
+ {
2
+ "name": "@warpmetrics/warp",
3
+ "version": "0.0.2",
4
+ "description": "Measure your agents, not your LLM calls.",
5
+ "type": "module",
6
+ "main": "src/index.js",
7
+ "exports": {
8
+ ".": {
9
+ "import": "./src/index.js",
10
+ "types": "./src/index.d.ts"
11
+ }
12
+ },
13
+ "files": [
14
+ "src"
15
+ ],
16
+ "scripts": {
17
+ "test": "vitest run",
18
+ "test:watch": "vitest",
19
+ "test:coverage": "vitest run --coverage",
20
+ "release:patch": "npm version patch && git push origin main --tags",
21
+ "release:minor": "npm version minor && git push origin main --tags"
22
+ },
23
+ "keywords": [
24
+ "ai",
25
+ "agents",
26
+ "llm",
27
+ "observability",
28
+ "openai",
29
+ "anthropic",
30
+ "claude",
31
+ "gpt",
32
+ "tracing",
33
+ "monitoring"
34
+ ],
35
+ "license": "MIT",
36
+ "repository": {
37
+ "type": "git",
38
+ "url": "https://github.com/nikolaionken/warpmetrics"
39
+ },
40
+ "dependencies": {
41
+ "ulid": "^3.0.2"
42
+ },
43
+ "devDependencies": {
44
+ "@vitest/coverage-v8": "^1.6.1",
45
+ "vitest": "^1.2.0"
46
+ }
47
+ }
@@ -0,0 +1,17 @@
1
+ // Warpmetrics SDK — Registries
2
+ // Module-level state that tracks runs, groups, and calls in memory.
3
+
4
+ /** @type {Map<string, object>} run id → run data */
5
+ export const runRegistry = new Map();
6
+
7
+ /** @type {Map<string, object>} group id → group data */
8
+ export const groupRegistry = new Map();
9
+
10
+ /** @type {WeakMap<object, string>} LLM response object → call id */
11
+ export const responseRegistry = new WeakMap();
12
+
13
+ /** @type {WeakMap<object, number>} LLM response object → cost in USD */
14
+ export const costRegistry = new WeakMap();
15
+
16
+ /** @type {Map<string, number>} call id → cost in USD (for aggregation) */
17
+ export const costByCallId = new Map();
@@ -0,0 +1,196 @@
1
+ // Warpmetrics SDK — Transport
2
+ // Batches events and flushes them to the API over HTTP.
3
+
4
+ import { createRequire } from 'module';
5
+ const { version: SDK_VERSION } = createRequire(import.meta.url)('../../package.json');
6
+
7
+ const env = typeof process !== 'undefined' ? process.env : {};
8
+
9
+ let config = {
10
+ apiKey: env.WARPMETRICS_API_KEY || null,
11
+ baseUrl: env.WARPMETRICS_API_URL || 'https://api.warpmetrics.com',
12
+ enabled: true,
13
+ flushInterval: 1000,
14
+ maxBatchSize: 100,
15
+ debug: env.WARPMETRICS_DEBUG === 'true',
16
+ };
17
+
18
+ const queue = {
19
+ runs: [],
20
+ groups: [],
21
+ calls: [],
22
+ links: [],
23
+ outcomes: [],
24
+ };
25
+
26
+ let flushTimeout = null;
27
+
28
+ // ---------------------------------------------------------------------------
29
+ // Config
30
+ // ---------------------------------------------------------------------------
31
+
32
+ export function setConfig(updates) {
33
+ config = { ...config, ...updates };
34
+ }
35
+
36
+ export function getConfig() {
37
+ return config;
38
+ }
39
+
40
+ /** Clear all pending events without sending. For testing only. */
41
+ export function clearQueue() {
42
+ queue.runs.length = 0;
43
+ queue.groups.length = 0;
44
+ queue.calls.length = 0;
45
+ queue.links.length = 0;
46
+ queue.outcomes.length = 0;
47
+ }
48
+
49
+ // ---------------------------------------------------------------------------
50
+ // Queue
51
+ // ---------------------------------------------------------------------------
52
+
53
+ function enqueue(type, event) {
54
+ if (!config.enabled) return;
55
+
56
+ queue[type].push(event);
57
+
58
+ const total = queue.runs.length + queue.groups.length + queue.calls.length
59
+ + queue.links.length + queue.outcomes.length;
60
+
61
+ if (total >= config.maxBatchSize) {
62
+ flush();
63
+ } else if (!flushTimeout) {
64
+ flushTimeout = setTimeout(flush, config.flushInterval);
65
+ }
66
+ }
67
+
68
+ // ---------------------------------------------------------------------------
69
+ // Flush
70
+ // ---------------------------------------------------------------------------
71
+
72
+ export async function flush() {
73
+ if (flushTimeout) {
74
+ clearTimeout(flushTimeout);
75
+ flushTimeout = null;
76
+ }
77
+
78
+ const batch = {
79
+ runs: queue.runs.splice(0),
80
+ groups: queue.groups.splice(0),
81
+ calls: queue.calls.splice(0),
82
+ links: queue.links.splice(0),
83
+ outcomes: queue.outcomes.splice(0),
84
+ };
85
+
86
+ const total = batch.runs.length + batch.groups.length + batch.calls.length
87
+ + batch.links.length + batch.outcomes.length;
88
+
89
+ if (total === 0) return;
90
+
91
+ if (!config.apiKey) {
92
+ if (config.debug) {
93
+ console.log('[warpmetrics] No API key — events discarded.');
94
+ }
95
+ return;
96
+ }
97
+
98
+ if (config.debug) {
99
+ console.log(
100
+ `[warpmetrics] Flushing ${total} events`
101
+ + ` (runs=${batch.runs.length} groups=${batch.groups.length}`
102
+ + ` calls=${batch.calls.length} links=${batch.links.length}`
103
+ + ` outcomes=${batch.outcomes.length})`
104
+ );
105
+ }
106
+
107
+ try {
108
+ const res = await fetch(`${config.baseUrl}/v1/events`, {
109
+ method: 'POST',
110
+ headers: {
111
+ 'Content-Type': 'application/json',
112
+ 'Authorization': `Bearer ${config.apiKey}`,
113
+ 'X-SDK-Version': SDK_VERSION,
114
+ },
115
+ body: JSON.stringify(batch),
116
+ });
117
+
118
+ if (!res.ok) {
119
+ const body = await res.text().catch(() => '');
120
+ throw new Error(`HTTP ${res.status}: ${body}`);
121
+ }
122
+
123
+ if (config.debug) {
124
+ const result = await res.json();
125
+ console.log(`[warpmetrics] Flush OK — received=${result.received} processed=${result.processed}`);
126
+ }
127
+ } catch (err) {
128
+ if (config.debug) {
129
+ console.error('[warpmetrics] Flush failed:', err.message);
130
+ }
131
+ // Re-queue so nothing is lost.
132
+ queue.runs.unshift(...batch.runs);
133
+ queue.groups.unshift(...batch.groups);
134
+ queue.calls.unshift(...batch.calls);
135
+ queue.links.unshift(...batch.links);
136
+ queue.outcomes.unshift(...batch.outcomes);
137
+ }
138
+ }
139
+
140
+ // ---------------------------------------------------------------------------
141
+ // Log helpers — called by the public API modules
142
+ // ---------------------------------------------------------------------------
143
+
144
+ export function logRun(data) {
145
+ enqueue('runs', {
146
+ id: data.id,
147
+ label: data.label,
148
+ link: data.link,
149
+ name: data.name,
150
+ timestamp: new Date().toISOString(),
151
+ });
152
+ }
153
+
154
+ export function logGroup(data) {
155
+ enqueue('groups', {
156
+ id: data.id,
157
+ label: data.label,
158
+ name: data.name,
159
+ timestamp: new Date().toISOString(),
160
+ });
161
+ }
162
+
163
+ export function logCall(data) {
164
+ enqueue('calls', data);
165
+ }
166
+
167
+ export function logLink(data) {
168
+ enqueue('links', {
169
+ parentId: data.parentId,
170
+ childId: data.childId,
171
+ type: data.type,
172
+ timestamp: new Date().toISOString(),
173
+ });
174
+ }
175
+
176
+ export function logOutcome(data) {
177
+ enqueue('outcomes', {
178
+ targetId: data.targetId,
179
+ name: data.name,
180
+ reason: data.reason,
181
+ source: data.source,
182
+ tags: data.tags,
183
+ metadata: data.metadata,
184
+ timestamp: new Date().toISOString(),
185
+ });
186
+ }
187
+
188
+ // ---------------------------------------------------------------------------
189
+ // Auto-flush on process exit (Node.js only)
190
+ // ---------------------------------------------------------------------------
191
+
192
+ if (typeof process !== 'undefined' && process.on) {
193
+ process.on('beforeExit', flush);
194
+ process.on('SIGTERM', () => flush().then(() => process.exit(0)));
195
+ process.on('SIGINT', () => flush().then(() => process.exit(0)));
196
+ }
@@ -0,0 +1,41 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { createRequire } from 'module';
3
+ import { run, flush } from '../index.js';
4
+ import { setupBeforeEach } from '../../test/setup.js';
5
+
6
+ const { version } = createRequire(import.meta.url)('../../package.json');
7
+
8
+ setupBeforeEach();
9
+
10
+ describe('transport', () => {
11
+ it('sends batched events with correct auth header', async () => {
12
+ run('test');
13
+ await flush();
14
+
15
+ expect(global.fetch).toHaveBeenCalledTimes(1);
16
+ const [url, opts] = global.fetch.mock.calls[0];
17
+ expect(url).toBe('https://api.warpmetrics.com/v1/events');
18
+ expect(opts.headers['Authorization']).toBe('Bearer wm_test_123');
19
+ expect(opts.headers['X-SDK-Version']).toBe(version);
20
+ });
21
+
22
+ it('re-queues events on failure and succeeds on retry', async () => {
23
+ global.fetch.mockRejectedValueOnce(new Error('Network error'));
24
+
25
+ run('test');
26
+ await flush();
27
+ expect(global.fetch).toHaveBeenCalledTimes(1);
28
+
29
+ global.fetch.mockResolvedValueOnce({ ok: true, json: () => Promise.resolve({}) });
30
+ await flush();
31
+ expect(global.fetch).toHaveBeenCalledTimes(2);
32
+
33
+ const body = JSON.parse(global.fetch.mock.calls[1][1].body);
34
+ expect(body.runs).toHaveLength(1);
35
+ });
36
+
37
+ it('skips flush when queue is empty', async () => {
38
+ await flush();
39
+ expect(global.fetch).not.toHaveBeenCalled();
40
+ });
41
+ });
@@ -0,0 +1,58 @@
1
+ // Warpmetrics SDK — Utilities
2
+
3
+ import { monotonicFactory } from 'ulid';
4
+
5
+ const ulid = monotonicFactory();
6
+
7
+ /**
8
+ * Generate a prefixed ULID-based unique ID (lowercase).
9
+ * @param {'run' | 'grp' | 'call'} prefix
10
+ * @returns {string} e.g. "wm_run_01jkx3ndek0gh4r5tmqp9a3bcv"
11
+ */
12
+ export function generateId(prefix) {
13
+ return `wm_${prefix}_${ulid().toLowerCase()}`;
14
+ }
15
+
16
+ // ---------------------------------------------------------------------------
17
+ // Pricing (per 1 M tokens, USD)
18
+ // Best-effort — returns 0 for unknown models.
19
+ // ---------------------------------------------------------------------------
20
+
21
+ const PRICING = {
22
+ // OpenAI
23
+ 'gpt-4o': { prompt: 2.50, completion: 10.00 },
24
+ 'gpt-4o-2024-11-20': { prompt: 2.50, completion: 10.00 },
25
+ 'gpt-4o-mini': { prompt: 0.15, completion: 0.60 },
26
+ 'gpt-4o-mini-2024-07-18': { prompt: 0.15, completion: 0.60 },
27
+ 'gpt-4-turbo': { prompt: 10.00, completion: 30.00 },
28
+ 'gpt-4-turbo-preview': { prompt: 10.00, completion: 30.00 },
29
+ 'gpt-4': { prompt: 30.00, completion: 60.00 },
30
+ 'gpt-3.5-turbo': { prompt: 0.50, completion: 1.50 },
31
+ 'o1': { prompt: 15.00, completion: 60.00 },
32
+ 'o1-mini': { prompt: 3.00, completion: 12.00 },
33
+ 'o3-mini': { prompt: 1.10, completion: 4.40 },
34
+
35
+ // Anthropic
36
+ 'claude-sonnet-4-5-20250514': { prompt: 3.00, completion: 15.00 },
37
+ 'claude-opus-4-6': { prompt: 15.00, completion: 75.00 },
38
+ 'claude-3-5-sonnet-20241022': { prompt: 3.00, completion: 15.00 },
39
+ 'claude-3-5-sonnet-latest': { prompt: 3.00, completion: 15.00 },
40
+ 'claude-3-5-haiku-20241022': { prompt: 0.80, completion: 4.00 },
41
+ 'claude-3-5-haiku-latest': { prompt: 0.80, completion: 4.00 },
42
+ 'claude-3-opus-20240229': { prompt: 15.00, completion: 75.00 },
43
+ 'claude-3-haiku-20240307': { prompt: 0.25, completion: 1.25 },
44
+ };
45
+
46
+ /**
47
+ * Estimate cost from model name and token counts.
48
+ * Returns 0 for unknown models.
49
+ * @param {string} model
50
+ * @param {{ prompt: number, completion: number }} tokens
51
+ * @returns {number} cost in USD
52
+ */
53
+ export function calculateCost(model, tokens) {
54
+ const p = PRICING[model];
55
+ if (!p) return 0;
56
+ return (tokens.prompt / 1_000_000) * p.prompt
57
+ + (tokens.completion / 1_000_000) * p.completion;
58
+ }
@@ -0,0 +1,155 @@
1
+ // Warpmetrics SDK — warp()
2
+ // Wraps an LLM client so every API call is automatically tracked.
3
+
4
+ import { generateId, calculateCost } from './utils.js';
5
+ import { responseRegistry, costRegistry, costByCallId } from './registry.js';
6
+ import { logCall, setConfig, getConfig } from './transport.js';
7
+ import * as openai from '../providers/openai.js';
8
+ import * as anthropic from '../providers/anthropic.js';
9
+
10
+ const providers = [openai, anthropic];
11
+
12
+ // ---------------------------------------------------------------------------
13
+ // Provider resolution
14
+ // ---------------------------------------------------------------------------
15
+
16
+ function findProvider(client) {
17
+ return providers.find(p => p.detect(client)) || null;
18
+ }
19
+
20
+ // ---------------------------------------------------------------------------
21
+ // Interceptor
22
+ // ---------------------------------------------------------------------------
23
+
24
+ function createInterceptor(originalFn, context, provider) {
25
+ return async function (...args) {
26
+ const start = Date.now();
27
+ const callId = generateId('call');
28
+ const model = args[0]?.model || 'unknown';
29
+ const messages = args[0]?.messages || args[0]?.input || [];
30
+ const tools = args[0]?.tools || null;
31
+ const stream = args[0]?.stream === true;
32
+
33
+ try {
34
+ const result = await originalFn.apply(context, args);
35
+ const latency = Date.now() - start;
36
+
37
+ if (stream) {
38
+ return wrapStream(result, { callId, provider, model, messages, tools, start });
39
+ }
40
+
41
+ const ext = provider.extract(result);
42
+ const cost = calculateCost(model, ext.tokens);
43
+
44
+ logCall({
45
+ id: callId, provider: provider.name, model, messages,
46
+ response: ext.response,
47
+ tools: tools ? tools.map(t => t.function?.name || t.name).filter(Boolean) : null,
48
+ toolCalls: ext.toolCalls,
49
+ tokens: ext.tokens, cost, latency,
50
+ timestamp: new Date().toISOString(),
51
+ status: 'success',
52
+ });
53
+
54
+ responseRegistry.set(result, callId);
55
+ costRegistry.set(result, cost);
56
+ costByCallId.set(callId, cost);
57
+
58
+ return result;
59
+ } catch (error) {
60
+ logCall({
61
+ id: callId, provider: provider.name, model, messages,
62
+ error: error.message,
63
+ latency: Date.now() - start,
64
+ timestamp: new Date().toISOString(),
65
+ status: 'error',
66
+ });
67
+ throw error;
68
+ }
69
+ };
70
+ }
71
+
72
+ // ---------------------------------------------------------------------------
73
+ // Streaming wrapper
74
+ // ---------------------------------------------------------------------------
75
+
76
+ function wrapStream(stream, ctx) {
77
+ const wrapped = {
78
+ async *[Symbol.asyncIterator]() {
79
+ let content = '';
80
+ let usage = null;
81
+
82
+ for await (const chunk of stream) {
83
+ const delta = ctx.provider.extractStreamDelta(chunk);
84
+ if (delta.content) content += delta.content;
85
+ if (delta.usage) usage = delta.usage;
86
+ yield chunk;
87
+ }
88
+
89
+ const tokens = usage
90
+ ? ctx.provider.normalizeUsage(usage)
91
+ : { prompt: 0, completion: 0, total: 0 };
92
+
93
+ const cost = calculateCost(ctx.model, tokens);
94
+
95
+ logCall({
96
+ id: ctx.callId, provider: ctx.provider.name, model: ctx.model, messages: ctx.messages,
97
+ response: content,
98
+ tools: ctx.tools ? ctx.tools.map(t => t.function?.name || t.name).filter(Boolean) : null,
99
+ tokens, cost,
100
+ latency: Date.now() - ctx.start,
101
+ timestamp: new Date().toISOString(),
102
+ status: 'success',
103
+ });
104
+
105
+ responseRegistry.set(wrapped, ctx.callId);
106
+ costRegistry.set(wrapped, cost);
107
+ costByCallId.set(ctx.callId, cost);
108
+ },
109
+ };
110
+ return wrapped;
111
+ }
112
+
113
+ // ---------------------------------------------------------------------------
114
+ // Public API
115
+ // ---------------------------------------------------------------------------
116
+
117
+ /**
118
+ * Wrap an LLM client to automatically track every API call.
119
+ *
120
+ * @param {object} client — OpenAI or Anthropic client instance
121
+ * @param {object} [options]
122
+ * @param {string} [options.apiKey]
123
+ * @param {string} [options.baseUrl]
124
+ * @param {boolean} [options.enabled]
125
+ * @param {number} [options.flushInterval]
126
+ * @param {number} [options.maxBatchSize]
127
+ * @param {boolean} [options.debug]
128
+ * @returns {object} — the same client, proxied
129
+ */
130
+ export function warp(client, options) {
131
+ if (options) {
132
+ const cfg = getConfig();
133
+ setConfig({
134
+ apiKey: options.apiKey ?? cfg.apiKey,
135
+ baseUrl: options.baseUrl ?? cfg.baseUrl,
136
+ enabled: options.enabled ?? cfg.enabled,
137
+ flushInterval: options.flushInterval ?? cfg.flushInterval,
138
+ maxBatchSize: options.maxBatchSize ?? cfg.maxBatchSize,
139
+ debug: options.debug ?? cfg.debug,
140
+ });
141
+ }
142
+
143
+ const provider = findProvider(client);
144
+
145
+ if (!provider) {
146
+ if (getConfig().debug) {
147
+ console.warn('[warpmetrics] Unknown client type — supported: OpenAI, Anthropic.');
148
+ }
149
+ return client;
150
+ }
151
+
152
+ return provider.proxy(client, (originalFn, context) =>
153
+ createInterceptor(originalFn, context, provider),
154
+ );
155
+ }