@warpmetrics/warp 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,101 @@
1
+ import { describe, it, expect, vi } from 'vitest';
2
+ import { warp, flush } from '../index.js';
3
+ import { responseRegistry, costRegistry } from '../core/registry.js';
4
+ import {
5
+ setupBeforeEach,
6
+ createMockOpenAI, createMockAnthropic,
7
+ OPENAI_RESPONSE, OPENAI_RESPONSES_API_RESPONSE, ANTHROPIC_RESPONSE,
8
+ } from '../../test/setup.js';
9
+
10
+ setupBeforeEach();
11
+
12
+ describe('warp() — OpenAI', () => {
13
+ it('intercepts chat.completions.create and tracks the call', async () => {
14
+ const client = createMockOpenAI(OPENAI_RESPONSE);
15
+ const wrapped = warp(client);
16
+
17
+ const result = await wrapped.chat.completions.create({
18
+ model: 'gpt-4o-mini',
19
+ messages: [{ role: 'user', content: 'Hi' }],
20
+ });
21
+
22
+ expect(result.choices[0].message.content).toBe('Hello!');
23
+ expect(responseRegistry.has(result)).toBe(true);
24
+ expect(costRegistry.has(result)).toBe(true);
25
+
26
+ await flush();
27
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
28
+ expect(body.calls).toHaveLength(1);
29
+ expect(body.calls[0].model).toBe('gpt-4o-mini');
30
+ expect(body.calls[0].tokens.total).toBe(15);
31
+ expect(body.calls[0].status).toBe('success');
32
+ });
33
+
34
+ it('tracks errors without swallowing them', async () => {
35
+ const error = new Error('Rate limit exceeded');
36
+ const client = createMockOpenAI(null);
37
+ client.chat.completions.create = vi.fn().mockRejectedValue(error);
38
+ const wrapped = warp(client);
39
+
40
+ await expect(
41
+ wrapped.chat.completions.create({ model: 'gpt-4o', messages: [] })
42
+ ).rejects.toThrow('Rate limit exceeded');
43
+
44
+ await flush();
45
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
46
+ expect(body.calls).toHaveLength(1);
47
+ expect(body.calls[0].status).toBe('error');
48
+ expect(body.calls[0].error).toBe('Rate limit exceeded');
49
+ });
50
+
51
+ it('accepts config options on first call', () => {
52
+ const client = createMockOpenAI(OPENAI_RESPONSE);
53
+ warp(client, { apiKey: 'wm_live_xyz', debug: true });
54
+ });
55
+ });
56
+
57
+ describe('warp() — OpenAI Responses API', () => {
58
+ it('intercepts responses.create and tracks the call', async () => {
59
+ const client = createMockOpenAI(OPENAI_RESPONSE, OPENAI_RESPONSES_API_RESPONSE);
60
+ const wrapped = warp(client);
61
+
62
+ const result = await wrapped.responses.create({
63
+ model: 'gpt-4o',
64
+ input: 'Hi',
65
+ });
66
+
67
+ expect(result.output_text).toBe('Hello from Responses API!');
68
+ expect(responseRegistry.has(result)).toBe(true);
69
+ expect(costRegistry.has(result)).toBe(true);
70
+
71
+ await flush();
72
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
73
+ expect(body.calls).toHaveLength(1);
74
+ expect(body.calls[0].model).toBe('gpt-4o');
75
+ expect(body.calls[0].tokens.prompt).toBe(8);
76
+ expect(body.calls[0].tokens.completion).toBe(6);
77
+ expect(body.calls[0].status).toBe('success');
78
+ });
79
+ });
80
+
81
+ describe('warp() — Anthropic', () => {
82
+ it('intercepts messages.create and tracks the call', async () => {
83
+ const client = createMockAnthropic(ANTHROPIC_RESPONSE);
84
+ const wrapped = warp(client);
85
+
86
+ const result = await wrapped.messages.create({
87
+ model: 'claude-3-5-sonnet-latest',
88
+ messages: [{ role: 'user', content: 'Hi' }],
89
+ });
90
+
91
+ expect(result.content[0].text).toBe('Hello from Claude!');
92
+ expect(responseRegistry.has(result)).toBe(true);
93
+
94
+ await flush();
95
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
96
+ expect(body.calls).toHaveLength(1);
97
+ expect(body.calls[0].provider).toBe('anthropic');
98
+ expect(body.calls[0].tokens.prompt).toBe(12);
99
+ expect(body.calls[0].tokens.completion).toBe(8);
100
+ });
101
+ });
@@ -0,0 +1,41 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { warp, run, group, add, outcome, ref, cost, flush } from './index.js';
3
+ import { setupBeforeEach, createMockOpenAI, OPENAI_RESPONSE } from '../test/setup.js';
4
+
5
+ setupBeforeEach();
6
+
7
+ describe('end-to-end', () => {
8
+ it('full agent flow: warp -> run -> group -> add -> outcome -> flush', async () => {
9
+ const client = createMockOpenAI(OPENAI_RESPONSE);
10
+ const openai = warp(client);
11
+
12
+ const r = run('code-review', { link: 'ticket:123', name: 'Review PR' });
13
+ const planning = group('planning', { name: 'Plan Phase' });
14
+
15
+ const response = await openai.chat.completions.create({
16
+ model: 'gpt-4o-mini',
17
+ messages: [{ role: 'user', content: 'Plan a review' }],
18
+ });
19
+
20
+ add(planning, response);
21
+ add(r, planning);
22
+ outcome(r, 'completed', { reason: 'Looks good', source: 'ci' });
23
+
24
+ expect(ref(r)).toBe(r.id);
25
+ expect(ref(planning)).toBe(planning.id);
26
+ expect(ref(response)).toMatch(/^wm_call_/);
27
+ expect(cost(r)).toBeGreaterThan(0);
28
+ expect(cost(response)).toBeGreaterThan(0);
29
+ expect(cost(r)).toEqual(cost(response));
30
+
31
+ await flush();
32
+
33
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
34
+ expect(body.runs).toHaveLength(1);
35
+ expect(body.groups).toHaveLength(1);
36
+ expect(body.calls).toHaveLength(1);
37
+ expect(body.links).toHaveLength(2);
38
+ expect(body.outcomes).toHaveLength(1);
39
+ expect(body.outcomes[0].name).toBe('completed');
40
+ });
41
+ });
package/src/index.d.ts ADDED
@@ -0,0 +1,74 @@
1
+ // Warpmetrics SDK — Type Definitions
2
+
3
+ export interface Run {
4
+ readonly id: string;
5
+ readonly _type: 'run';
6
+ }
7
+
8
+ export interface Group {
9
+ readonly id: string;
10
+ readonly _type: 'group';
11
+ }
12
+
13
+ export interface WarpOptions {
14
+ apiKey?: string;
15
+ baseUrl?: string;
16
+ enabled?: boolean;
17
+ flushInterval?: number;
18
+ maxBatchSize?: number;
19
+ debug?: boolean;
20
+ }
21
+
22
+ export interface RunOptions {
23
+ /** External reference (e.g. "ticket:PROJ-101", a PR URL, etc.) */
24
+ link?: string;
25
+ /** Human-readable name */
26
+ name?: string;
27
+ }
28
+
29
+ export interface GroupOptions {
30
+ /** Human-readable name */
31
+ name?: string;
32
+ }
33
+
34
+ export interface OutcomeOptions {
35
+ /** Why this outcome occurred */
36
+ reason?: string;
37
+ /** Who / what recorded this outcome */
38
+ source?: string;
39
+ /** Categorisation tags */
40
+ tags?: string[];
41
+ /** Arbitrary extra data */
42
+ metadata?: Record<string, any>;
43
+ }
44
+
45
+ /**
46
+ * Wrap an LLM client to automatically track every API call.
47
+ * Pass options on the first call to configure the SDK; env vars are used as defaults.
48
+ */
49
+ export function warp<T>(client: T, options?: WarpOptions): T;
50
+
51
+ /** Create a run — the top-level unit that tracks one agent execution. */
52
+ export function run(label: string, options?: RunOptions): Run;
53
+
54
+ /** Create a group — a logical phase or step inside a run. */
55
+ export function group(label: string, options?: GroupOptions): Group;
56
+
57
+ /** Add items (groups or LLM responses) to a run or group. */
58
+ export function add(target: Run | Group | string, ...items: any[]): void;
59
+
60
+ /** Record an outcome on any tracked target. */
61
+ export function outcome(
62
+ target: Run | Group | object | string,
63
+ name: string,
64
+ options?: OutcomeOptions,
65
+ ): void;
66
+
67
+ /** Resolve any trackable target to its string ID. */
68
+ export function ref(target: Run | Group | object | string): string | undefined;
69
+
70
+ /** Get the cost in USD for any tracked target. */
71
+ export function cost(target: Run | Group | object | string): number;
72
+
73
+ /** Manually flush pending events to the API. */
74
+ export function flush(): Promise<void>;
package/src/index.js ADDED
@@ -0,0 +1,19 @@
1
+ // Warpmetrics SDK
2
+ // Measure your agents, not your LLM calls.
3
+ //
4
+ // warp(client, options?) — wrap an LLM client
5
+ // run(label, options?) — create a run
6
+ // group(label, options?) — create a group
7
+ // add(target, ...items) — add groups / calls to a run or group
8
+ // outcome(target, name, options?) — record a result
9
+ // ref(target) — get tracking ID
10
+ // cost(target) — get cost in USD
11
+
12
+ export { warp } from './core/warp.js';
13
+ export { run } from './trace/run.js';
14
+ export { group } from './trace/group.js';
15
+ export { add } from './trace/add.js';
16
+ export { outcome } from './trace/outcome.js';
17
+ export { ref } from './trace/ref.js';
18
+ export { cost } from './trace/cost.js';
19
+ export { flush } from './core/transport.js';
@@ -0,0 +1,51 @@
1
+ // Warpmetrics SDK — Anthropic Provider
2
+
3
+ export const name = 'anthropic';
4
+
5
+ export function detect(client) {
6
+ return client?.constructor?.name === 'Anthropic';
7
+ }
8
+
9
+ export function extract(result) {
10
+ const input = result?.usage?.input_tokens || 0;
11
+ const output = result?.usage?.output_tokens || 0;
12
+ return {
13
+ response: Array.isArray(result?.content)
14
+ ? result.content.filter(c => c.type === 'text').map(c => c.text).join('')
15
+ : '',
16
+ tokens: { prompt: input, completion: output, total: input + output },
17
+ toolCalls: null,
18
+ };
19
+ }
20
+
21
+ export function extractStreamDelta(chunk) {
22
+ return {
23
+ content: chunk.type === 'content_block_delta' ? (chunk.delta?.text || null) : null,
24
+ usage: chunk.type === 'message_delta' ? (chunk.usage || null) : null,
25
+ };
26
+ }
27
+
28
+ export function normalizeUsage(usage) {
29
+ const prompt = usage?.input_tokens || 0;
30
+ const completion = usage?.output_tokens || 0;
31
+ return { prompt, completion, total: prompt + completion };
32
+ }
33
+
34
+ export function proxy(client, intercept) {
35
+ return new Proxy(client, {
36
+ get(target, prop, receiver) {
37
+ const value = Reflect.get(target, prop, receiver);
38
+ if (prop !== 'messages') return value;
39
+
40
+ return new Proxy(value, {
41
+ get(msgTarget, msgProp, msgReceiver) {
42
+ const msgValue = Reflect.get(msgTarget, msgProp, msgReceiver);
43
+ if (msgProp === 'create' && typeof msgValue === 'function') {
44
+ return intercept(msgValue, msgTarget);
45
+ }
46
+ return msgValue;
47
+ },
48
+ });
49
+ },
50
+ });
51
+ }
@@ -0,0 +1,130 @@
1
+ // Warpmetrics SDK — OpenAI Provider
2
+ // Supports both Chat Completions and Responses API.
3
+
4
+ export const name = 'openai';
5
+
6
+ export function detect(client) {
7
+ return client?.constructor?.name === 'OpenAI';
8
+ }
9
+
10
+ // ---------------------------------------------------------------------------
11
+ // Response extraction
12
+ // ---------------------------------------------------------------------------
13
+
14
+ function isResponsesAPI(result) {
15
+ return result?.object === 'response' || Array.isArray(result?.output);
16
+ }
17
+
18
+ function extractChatCompletions(result) {
19
+ return {
20
+ response: result?.choices?.[0]?.message?.content || '',
21
+ tokens: {
22
+ prompt: result?.usage?.prompt_tokens || 0,
23
+ completion: result?.usage?.completion_tokens || 0,
24
+ total: result?.usage?.total_tokens || 0,
25
+ },
26
+ toolCalls: result?.choices?.[0]?.message?.tool_calls?.map(tc => ({
27
+ id: tc.id,
28
+ name: tc.function?.name,
29
+ arguments: tc.function?.arguments,
30
+ })) || null,
31
+ };
32
+ }
33
+
34
+ function extractResponses(result) {
35
+ const textItems = (result?.output || [])
36
+ .filter(item => item.type === 'message')
37
+ .flatMap(item => (item.content || []).filter(c => c.type === 'output_text'));
38
+
39
+ const text = result?.output_text || textItems.map(c => c.text).join('') || '';
40
+
41
+ const input = result?.usage?.input_tokens || 0;
42
+ const output = result?.usage?.output_tokens || 0;
43
+
44
+ const fnCalls = (result?.output || []).filter(item => item.type === 'function_call');
45
+
46
+ return {
47
+ response: text,
48
+ tokens: { prompt: input, completion: output, total: input + output },
49
+ toolCalls: fnCalls.length > 0
50
+ ? fnCalls.map(fc => ({ id: fc.id, name: fc.name, arguments: fc.arguments }))
51
+ : null,
52
+ };
53
+ }
54
+
55
+ export function extract(result) {
56
+ return isResponsesAPI(result) ? extractResponses(result) : extractChatCompletions(result);
57
+ }
58
+
59
+ // ---------------------------------------------------------------------------
60
+ // Streaming extraction
61
+ // ---------------------------------------------------------------------------
62
+
63
+ export function extractStreamDelta(chunk) {
64
+ // Responses API streaming
65
+ if (chunk.type === 'response.output_text.delta') {
66
+ return { content: chunk.delta || null, usage: null };
67
+ }
68
+ if (chunk.type === 'response.completed') {
69
+ return { content: null, usage: chunk.response?.usage || null };
70
+ }
71
+
72
+ // Chat Completions streaming
73
+ return {
74
+ content: chunk.choices?.[0]?.delta?.content || null,
75
+ usage: chunk.usage || null,
76
+ };
77
+ }
78
+
79
+ export function normalizeUsage(usage) {
80
+ const prompt = usage?.prompt_tokens || usage?.input_tokens || 0;
81
+ const completion = usage?.completion_tokens || usage?.output_tokens || 0;
82
+ return { prompt, completion, total: prompt + completion };
83
+ }
84
+
85
+ // ---------------------------------------------------------------------------
86
+ // Proxy
87
+ // ---------------------------------------------------------------------------
88
+
89
+ function interceptCreate(target, prop, intercept) {
90
+ const value = Reflect.get(target, prop);
91
+ if (prop === 'create' && typeof value === 'function') {
92
+ return intercept(value, target);
93
+ }
94
+ return value;
95
+ }
96
+
97
+ export function proxy(client, intercept) {
98
+ return new Proxy(client, {
99
+ get(target, prop, receiver) {
100
+ const value = Reflect.get(target, prop, receiver);
101
+
102
+ // client.responses.create()
103
+ if (prop === 'responses' && value) {
104
+ return new Proxy(value, {
105
+ get(rTarget, rProp) {
106
+ return interceptCreate(rTarget, rProp, intercept);
107
+ },
108
+ });
109
+ }
110
+
111
+ // client.chat.completions.create()
112
+ if (prop === 'chat' && value) {
113
+ return new Proxy(value, {
114
+ get(chatTarget, chatProp) {
115
+ const chatValue = Reflect.get(chatTarget, chatProp);
116
+ if (chatProp !== 'completions') return chatValue;
117
+
118
+ return new Proxy(chatValue, {
119
+ get(compTarget, compProp) {
120
+ return interceptCreate(compTarget, compProp, intercept);
121
+ },
122
+ });
123
+ },
124
+ });
125
+ }
126
+
127
+ return value;
128
+ },
129
+ });
130
+ }
@@ -0,0 +1,53 @@
1
+ // Warpmetrics SDK — add()
2
+
3
+ import { runRegistry, groupRegistry, responseRegistry } from '../core/registry.js';
4
+ import { logLink, getConfig } from '../core/transport.js';
5
+ import { ref as getRef } from './ref.js';
6
+
7
+ /**
8
+ * Add items (groups or LLM responses) to a run or group.
9
+ *
10
+ * @param {object | string} target — Run, Group, or ref string
11
+ * @param {...object} items — Group objects or LLM response objects
12
+ */
13
+ export function add(target, ...items) {
14
+ const targetId = getRef(target);
15
+ if (!targetId) {
16
+ if (getConfig().debug) console.warn('[warpmetrics] add() — target not recognised.');
17
+ return;
18
+ }
19
+
20
+ const data = runRegistry.get(targetId) || groupRegistry.get(targetId);
21
+ if (!data) {
22
+ if (getConfig().debug) console.warn(`[warpmetrics] add() — target not in registry: ${targetId}`);
23
+ return;
24
+ }
25
+
26
+ for (const item of items) {
27
+ // Group
28
+ if (item && item._type === 'group') {
29
+ const groupData = groupRegistry.get(item.id);
30
+ if (groupData) {
31
+ groupData.parentId = targetId;
32
+ data.groups.push(item.id);
33
+ logLink({ parentId: targetId, childId: item.id, type: 'group' });
34
+ }
35
+ continue;
36
+ }
37
+
38
+ // Run → cannot nest runs
39
+ if (item && item._type === 'run') {
40
+ if (getConfig().debug) console.warn('[warpmetrics] add() — cannot add a run to another target.');
41
+ continue;
42
+ }
43
+
44
+ // LLM response
45
+ const callId = responseRegistry.get(item);
46
+ if (callId) {
47
+ data.calls.push(callId);
48
+ logLink({ parentId: targetId, childId: callId, type: 'call' });
49
+ } else if (getConfig().debug) {
50
+ console.warn('[warpmetrics] add() — item not tracked. Was it from a warp()-ed client?');
51
+ }
52
+ }
53
+ }
@@ -0,0 +1,65 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { warp, run, group, add, flush } from '../index.js';
3
+ import { runRegistry, groupRegistry } from '../core/registry.js';
4
+ import { setupBeforeEach, createMockOpenAI, OPENAI_RESPONSE } from '../../test/setup.js';
5
+
6
+ setupBeforeEach();
7
+
8
+ describe('add()', () => {
9
+ it('links a group to a run', async () => {
10
+ const r = run('test');
11
+ const g = group('step');
12
+ add(r, g);
13
+
14
+ const runData = runRegistry.get(r.id);
15
+ expect(runData.groups).toContain(g.id);
16
+
17
+ const groupData = groupRegistry.get(g.id);
18
+ expect(groupData.parentId).toBe(r.id);
19
+
20
+ await flush();
21
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
22
+ const link = body.links.find(l => l.childId === g.id);
23
+ expect(link).toBeDefined();
24
+ expect(link.parentId).toBe(r.id);
25
+ expect(link.type).toBe('group');
26
+ });
27
+
28
+ it('links an LLM response to a group', async () => {
29
+ const client = createMockOpenAI(OPENAI_RESPONSE);
30
+ const wrapped = warp(client);
31
+ const response = await wrapped.chat.completions.create({ model: 'gpt-4o-mini', messages: [] });
32
+
33
+ const g = group('step');
34
+ add(g, response);
35
+
36
+ const groupData = groupRegistry.get(g.id);
37
+ expect(groupData.calls).toHaveLength(1);
38
+ expect(groupData.calls[0]).toMatch(/^wm_call_/);
39
+ });
40
+
41
+ it('accepts multiple items at once', () => {
42
+ const r = run('test');
43
+ const g1 = group('a');
44
+ const g2 = group('b');
45
+ add(r, g1, g2);
46
+
47
+ const data = runRegistry.get(r.id);
48
+ expect(data.groups).toHaveLength(2);
49
+ });
50
+
51
+ it('nests groups inside groups', () => {
52
+ const r = run('test');
53
+ const parent = group('outer');
54
+ const child = group('inner');
55
+
56
+ add(parent, child);
57
+ add(r, parent);
58
+
59
+ const parentData = groupRegistry.get(parent.id);
60
+ expect(parentData.groups).toContain(child.id);
61
+
62
+ const childData = groupRegistry.get(child.id);
63
+ expect(childData.parentId).toBe(parent.id);
64
+ });
65
+ });
@@ -0,0 +1,50 @@
1
+ // Warpmetrics SDK — cost()
2
+
3
+ import { ref as getRef } from './ref.js';
4
+ import { runRegistry, groupRegistry, costRegistry, costByCallId } from '../core/registry.js';
5
+
6
+ /**
7
+ * Get the cost in USD for any tracked target.
8
+ *
9
+ * - Response object → cost of that single call
10
+ * - Run / Group → sum of all nested calls
11
+ * - Ref string → lookup in registries
12
+ *
13
+ * @param {object | string} target
14
+ * @returns {number} cost in USD
15
+ */
16
+ export function cost(target) {
17
+ // Response object — direct lookup
18
+ if (typeof target === 'object' && costRegistry.has(target)) {
19
+ return costRegistry.get(target);
20
+ }
21
+
22
+ const id = getRef(target);
23
+ if (!id) return 0;
24
+
25
+ // Single call by ref string
26
+ if (id.startsWith('wm_call_')) {
27
+ return costByCallId.get(id) || 0;
28
+ }
29
+
30
+ // Run or Group — aggregate
31
+ const container = runRegistry.get(id) || groupRegistry.get(id);
32
+ if (container) return sumCosts(container);
33
+
34
+ return 0;
35
+ }
36
+
37
+ function sumCosts(container) {
38
+ let total = 0;
39
+
40
+ for (const callId of container.calls) {
41
+ total += costByCallId.get(callId) || 0;
42
+ }
43
+
44
+ for (const groupId of container.groups) {
45
+ const g = groupRegistry.get(groupId);
46
+ if (g) total += sumCosts(g);
47
+ }
48
+
49
+ return total;
50
+ }
@@ -0,0 +1,42 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { warp, run, group, add, cost } from '../index.js';
3
+ import { setupBeforeEach, createMockOpenAI, OPENAI_RESPONSE } from '../../test/setup.js';
4
+
5
+ setupBeforeEach();
6
+
7
+ describe('cost()', () => {
8
+ it('returns cost for an LLM response object', async () => {
9
+ const client = createMockOpenAI(OPENAI_RESPONSE);
10
+ const wrapped = warp(client);
11
+ const response = await wrapped.chat.completions.create({ model: 'gpt-4o-mini', messages: [] });
12
+
13
+ const c = cost(response);
14
+ expect(c).toBeGreaterThan(0);
15
+ // gpt-4o-mini: prompt=$0.15/1M, completion=$0.60/1M
16
+ // 10 prompt + 5 completion -> expected ~0.0000045
17
+ expect(c).toBeCloseTo(0.0000045, 7);
18
+ });
19
+
20
+ it('aggregates cost across a run with calls', async () => {
21
+ const client = createMockOpenAI(OPENAI_RESPONSE);
22
+ const wrapped = warp(client);
23
+
24
+ const r = run('test');
25
+ const g = group('step');
26
+
27
+ const r1 = await wrapped.chat.completions.create({ model: 'gpt-4o-mini', messages: [] });
28
+ const r2 = await wrapped.chat.completions.create({ model: 'gpt-4o-mini', messages: [] });
29
+
30
+ add(g, r1, r2);
31
+ add(r, g);
32
+
33
+ const singleCost = cost(r1);
34
+ const runCost = cost(r);
35
+ expect(runCost).toBeCloseTo(singleCost * 2, 10);
36
+ });
37
+
38
+ it('returns 0 for unknown targets', () => {
39
+ expect(cost({})).toBe(0);
40
+ expect(cost('wm_run_nonexistent')).toBe(0);
41
+ });
42
+ });
@@ -0,0 +1,31 @@
1
+ // Warpmetrics SDK — group()
2
+
3
+ import { generateId } from '../core/utils.js';
4
+ import { groupRegistry } from '../core/registry.js';
5
+ import { logGroup } from '../core/transport.js';
6
+
7
+ /**
8
+ * Create a group — a logical phase or step inside a run.
9
+ *
10
+ * @param {string} label — group type used for aggregation ("planner", "coder")
11
+ * @param {object} [options]
12
+ * @param {string} [options.name] — human-readable name
13
+ * @returns {{ readonly id: string, readonly _type: 'group' }}
14
+ */
15
+ export function group(label, options = {}) {
16
+ const id = generateId('grp');
17
+
18
+ const data = {
19
+ id,
20
+ label,
21
+ name: options.name || null,
22
+ parentId: null,
23
+ groups: [],
24
+ calls: [],
25
+ };
26
+
27
+ groupRegistry.set(id, data);
28
+ logGroup(data);
29
+
30
+ return Object.freeze({ id, _type: 'group' });
31
+ }
@@ -0,0 +1,22 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { group } from '../index.js';
3
+ import { groupRegistry } from '../core/registry.js';
4
+ import { setupBeforeEach } from '../../test/setup.js';
5
+
6
+ setupBeforeEach();
7
+
8
+ describe('group()', () => {
9
+ it('returns a frozen object with id and _type', () => {
10
+ const g = group('planner');
11
+ expect(g.id).toMatch(/^wm_grp_/);
12
+ expect(g._type).toBe('group');
13
+ expect(Object.isFrozen(g)).toBe(true);
14
+ });
15
+
16
+ it('stores data in groupRegistry', () => {
17
+ const g = group('planner', { name: 'Planning Phase' });
18
+ const data = groupRegistry.get(g.id);
19
+ expect(data.label).toBe('planner');
20
+ expect(data.name).toBe('Planning Phase');
21
+ });
22
+ });