@warpmetrics/warp 0.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,33 @@
1
+ // Warpmetrics SDK — outcome()
2
+
3
+ import { ref as getRef } from './ref.js';
4
+ import { logOutcome, getConfig } from '../core/transport.js';
5
+
6
+ /**
7
+ * Record an outcome on any tracked target.
8
+ *
9
+ * @param {object | string} target — Run, Group, LLM response, or ref string
10
+ * @param {string} name — outcome name ("completed", "failed", "helpful")
11
+ * @param {object} [options]
12
+ * @param {string} [options.reason] — why this outcome occurred
13
+ * @param {string} [options.source] — who / what recorded it
14
+ * @param {string[]} [options.tags] — categorisation tags
15
+ * @param {Record<string, any>} [options.metadata] — arbitrary extra data
16
+ */
17
+ export function outcome(target, name, options = {}) {
18
+ const targetId = getRef(target);
19
+
20
+ if (!targetId) {
21
+ if (getConfig().debug) console.warn('[warpmetrics] outcome() — target not tracked.');
22
+ return;
23
+ }
24
+
25
+ logOutcome({
26
+ targetId,
27
+ name,
28
+ reason: options.reason || null,
29
+ source: options.source || null,
30
+ tags: options.tags || null,
31
+ metadata: options.metadata || null,
32
+ });
33
+ }
@@ -0,0 +1,40 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { warp, run, outcome, flush } from '../index.js';
3
+ import { setupBeforeEach, createMockOpenAI, OPENAI_RESPONSE } from '../../test/setup.js';
4
+
5
+ setupBeforeEach();
6
+
7
+ describe('outcome()', () => {
8
+ it('enqueues an outcome event for a run', async () => {
9
+ const r = run('test');
10
+ outcome(r, 'completed', { reason: 'All good' });
11
+ await flush();
12
+
13
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
14
+ const o = body.outcomes.find(e => e.name === 'completed');
15
+ expect(o).toBeDefined();
16
+ expect(o.targetId).toBe(r.id);
17
+ expect(o.reason).toBe('All good');
18
+ });
19
+
20
+ it('works with a ref string', async () => {
21
+ outcome('wm_run_abc123', 'shipped');
22
+ await flush();
23
+
24
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
25
+ expect(body.outcomes[0].targetId).toBe('wm_run_abc123');
26
+ });
27
+
28
+ it('works on an LLM response', async () => {
29
+ const client = createMockOpenAI(OPENAI_RESPONSE);
30
+ const wrapped = warp(client);
31
+ const response = await wrapped.chat.completions.create({ model: 'gpt-4o-mini', messages: [] });
32
+
33
+ outcome(response, 'helpful');
34
+ await flush();
35
+
36
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
37
+ const o = body.outcomes.find(e => e.name === 'helpful');
38
+ expect(o.targetId).toMatch(/^wm_call_/);
39
+ });
40
+ });
@@ -0,0 +1,27 @@
1
+ // Warpmetrics SDK — ref()
2
+
3
+ import { responseRegistry } from '../core/registry.js';
4
+
5
+ /**
6
+ * Resolve any trackable target to its string ID.
7
+ *
8
+ * Accepts:
9
+ * - A string ref (pass-through)
10
+ * - A Run or Group object ({ id, _type })
11
+ * - An LLM response object (looked up in the response registry)
12
+ *
13
+ * @param {object | string} target
14
+ * @returns {string | undefined}
15
+ */
16
+ export function ref(target) {
17
+ if (typeof target === 'string') return target;
18
+
19
+ if (target && target._type && target.id) return target.id;
20
+
21
+ if (target && typeof target === 'object') {
22
+ const callId = responseRegistry.get(target);
23
+ if (callId) return callId;
24
+ }
25
+
26
+ return undefined;
27
+ }
@@ -0,0 +1,35 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { warp, run, group, ref } from '../index.js';
3
+ import { setupBeforeEach, createMockOpenAI, OPENAI_RESPONSE } from '../../test/setup.js';
4
+
5
+ setupBeforeEach();
6
+
7
+ describe('ref()', () => {
8
+ it('passes through strings', () => {
9
+ expect(ref('wm_run_abc')).toBe('wm_run_abc');
10
+ });
11
+
12
+ it('extracts id from Run', () => {
13
+ const r = run('test');
14
+ expect(ref(r)).toBe(r.id);
15
+ });
16
+
17
+ it('extracts id from Group', () => {
18
+ const g = group('test');
19
+ expect(ref(g)).toBe(g.id);
20
+ });
21
+
22
+ it('resolves LLM response to call id', async () => {
23
+ const client = createMockOpenAI(OPENAI_RESPONSE);
24
+ const wrapped = warp(client);
25
+ const response = await wrapped.chat.completions.create({ model: 'gpt-4o-mini', messages: [] });
26
+
27
+ const id = ref(response);
28
+ expect(id).toMatch(/^wm_call_/);
29
+ });
30
+
31
+ it('returns undefined for unknown objects', () => {
32
+ expect(ref({})).toBeUndefined();
33
+ expect(ref(null)).toBeUndefined();
34
+ });
35
+ });
@@ -0,0 +1,32 @@
1
+ // Warpmetrics SDK — run()
2
+
3
+ import { generateId } from '../core/utils.js';
4
+ import { runRegistry } from '../core/registry.js';
5
+ import { logRun } from '../core/transport.js';
6
+
7
+ /**
8
+ * Create a run — the top-level unit that tracks one agent execution.
9
+ *
10
+ * @param {string} label — run type used for aggregation ("code-review", "bug-fix")
11
+ * @param {object} [options]
12
+ * @param {string} [options.link] — external reference ("ticket:PROJ-101", PR URL, etc.)
13
+ * @param {string} [options.name] — human-readable name
14
+ * @returns {{ readonly id: string, readonly _type: 'run' }}
15
+ */
16
+ export function run(label, options = {}) {
17
+ const id = generateId('run');
18
+
19
+ const data = {
20
+ id,
21
+ label,
22
+ link: options.link || null,
23
+ name: options.name || null,
24
+ groups: [],
25
+ calls: [],
26
+ };
27
+
28
+ runRegistry.set(id, data);
29
+ logRun(data);
30
+
31
+ return Object.freeze({ id, _type: 'run' });
32
+ }
@@ -0,0 +1,34 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { run, flush } from '../index.js';
3
+ import { runRegistry } from '../core/registry.js';
4
+ import { setupBeforeEach } from '../../test/setup.js';
5
+
6
+ setupBeforeEach();
7
+
8
+ describe('run()', () => {
9
+ it('returns a frozen object with id and _type', () => {
10
+ const r = run('code-review');
11
+ expect(r.id).toMatch(/^wm_run_/);
12
+ expect(r._type).toBe('run');
13
+ expect(Object.isFrozen(r)).toBe(true);
14
+ });
15
+
16
+ it('stores data in runRegistry', () => {
17
+ const r = run('code-review', { name: 'PR #42', link: 'https://github.com/pr/42' });
18
+ const data = runRegistry.get(r.id);
19
+ expect(data.label).toBe('code-review');
20
+ expect(data.name).toBe('PR #42');
21
+ expect(data.link).toBe('https://github.com/pr/42');
22
+ expect(data.groups).toEqual([]);
23
+ expect(data.calls).toEqual([]);
24
+ });
25
+
26
+ it('enqueues a run event for transport', async () => {
27
+ run('test-label');
28
+ await flush();
29
+ expect(global.fetch).toHaveBeenCalledTimes(1);
30
+ const body = JSON.parse(global.fetch.mock.calls[0][1].body);
31
+ expect(body.runs).toHaveLength(1);
32
+ expect(body.runs[0].label).toBe('test-label');
33
+ });
34
+ });