@diabolicallabs/agent-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,83 @@
1
+ # @diabolicallabs/agent-sdk
2
+
3
+ Cost-tracking middleware for `@diabolicallabs/llm-client`. Drop-in wrapper that captures call records and dispatches them asynchronously to the Agent Spend Dashboard. © Diabolical Labs
4
+
5
+ **Pre-1.0. APIs may change between minor versions.**
6
+
7
+ ## Status
8
+
9
+ **Scaffolded.** Types and public API surface are defined. Full implementation ships Week 4.
10
+
11
+ ## Install
12
+
13
+ ```bash
14
+ pnpm add @diabolicallabs/agent-sdk @diabolicallabs/llm-client
15
+ ```
16
+
17
+ ## Usage
18
+
19
+ ```typescript
20
+ import { createClientFromEnv } from '@diabolicallabs/llm-client';
21
+ import { instrumentClient } from '@diabolicallabs/agent-sdk';
22
+
23
+ const base = createClientFromEnv('anthropic', 'claude-sonnet-4-6');
24
+
25
+ const client = instrumentClient(base, {
26
+ identity: { agentId: process.env.AGENT_ID!, taskLabel: 'geo-audit' },
27
+ ingestionUrl: process.env.SPEND_INGESTION_URL!,
28
+ ingestionKey: process.env.SPEND_INGESTION_KEY!,
29
+ });
30
+
31
+ // Identical API to LlmClient — instrumentation is invisible to the caller
32
+ const response = await client.complete([
33
+ { role: 'user', content: 'Hello' },
34
+ ]);
35
+ // CallRecord dispatched asynchronously — response returned immediately
36
+ ```
37
+
38
+ ## API
39
+
40
+ ### `instrumentClient(client, config): InstrumentedLlmClient`
41
+
42
+ Wraps any `LlmClient` with cost-tracking middleware. The returned `InstrumentedLlmClient` is a drop-in replacement — it implements the same interface.
43
+
44
+ **Config:**
45
+
46
+ | Field | Type | Default | Description |
47
+ |---|---|---|---|
48
+ | `identity.agentId` | `string` | required | UUID from Spend Dashboard agent registry |
49
+ | `identity.taskLabel` | `string?` | — | Optional label for this call (max 200 chars) |
50
+ | `identity.projectId` | `string?` | — | Optional project override |
51
+ | `ingestionUrl` | `string` | required | Agent Spend Dashboard `/api/ingest` endpoint |
52
+ | `ingestionKey` | `string` | required | Agent-scoped bearer token |
53
+ | `maxIngestionRetries` | `number` | `3` | Retries before dropping the record |
54
+ | `ingestionTimeoutMs` | `number` | `5000` | Ingestion request timeout — never blocks the LLM call |
55
+ | `disabled` | `boolean` | `false` | Set `true` in test/dev to skip all instrumentation |
56
+
57
+ ## Ingestion contract
58
+
59
+ Every LLM call produces a `CallRecord` dispatched to the ingestion URL:
60
+
61
+ ```typescript
62
+ interface CallRecord {
63
+ agent_id: string;
64
+ model: string;
65
+ prompt_tokens: number;
66
+ completion_tokens: number;
67
+ cache_creation_tokens?: number; // Anthropic prompt cache only
68
+ cache_read_tokens?: number; // Anthropic prompt cache only
69
+ latency_ms: number;
70
+ task_label?: string;
71
+ project_id?: string;
72
+ timestamp: string; // ISO 8601 UTC
73
+ call_id: string; // UUID v4 — idempotency key
74
+ }
75
+ ```
76
+
77
+ ## Failure behavior
78
+
79
+ Ingestion failures are **always silent** — they never surface to the LLM caller.
80
+
81
+ - Endpoint down or slow: retried up to `maxIngestionRetries` with exponential backoff
82
+ - Retries exhausted: record dropped, structured warning logged (includes `call_id` for audit)
83
+ - `disabled: true`: all instrumentation skipped, underlying client returned directly
@@ -0,0 +1,60 @@
1
+ import { LlmClient } from '@diabolicallabs/llm-client';
2
+
3
+ /**
4
+ * Core type definitions for @diabolicallabs/agent-sdk.
5
+ * Matches the spec in briefs/brief-platform.md §4.2.
6
+ */
7
+
8
+ interface AgentIdentity {
9
+ agentId: string;
10
+ taskLabel?: string;
11
+ projectId?: string;
12
+ }
13
+ interface AgentSdkConfig {
14
+ identity: AgentIdentity;
15
+ ingestionUrl: string;
16
+ ingestionKey: string;
17
+ maxIngestionRetries?: number;
18
+ ingestionTimeoutMs?: number;
19
+ disabled?: boolean;
20
+ }
21
+ interface InstrumentedLlmClient extends LlmClient {
22
+ readonly sdkConfig: Readonly<AgentSdkConfig>;
23
+ }
24
+ interface CallRecord {
25
+ agent_id: string;
26
+ model: string;
27
+ prompt_tokens: number;
28
+ completion_tokens: number;
29
+ cache_creation_tokens?: number;
30
+ cache_read_tokens?: number;
31
+ latency_ms: number;
32
+ task_label?: string;
33
+ project_id?: string;
34
+ timestamp: string;
35
+ call_id: string;
36
+ }
37
+
38
+ /**
39
+ * instrumentClient — wraps an LlmClient with cost-tracking middleware.
40
+ *
41
+ * Each call to complete(), stream(), or structured() is intercepted:
42
+ * 1. The LLM call executes normally.
43
+ * 2. A CallRecord is built from the response (tokens, latency, identifiers).
44
+ * 3. The record is dispatched to config.ingestionUrl asynchronously — the LLM
45
+ * response is returned to the caller before ingestion completes.
46
+ * 4. Failed dispatches retry with exponential backoff up to maxIngestionRetries.
47
+ * 5. If all retries fail, the record is dropped and a structured warning is logged.
48
+ * The error is never propagated to the LLM caller.
49
+ */
50
+
51
+ /**
52
+ * Wraps an existing LlmClient with cost-tracking middleware.
53
+ * Returns an InstrumentedLlmClient that is a drop-in replacement for LlmClient.
54
+ *
55
+ * When config.disabled is true, the underlying client is returned directly
56
+ * with sdkConfig attached — no instrumentation overhead, no fetch calls.
57
+ */
58
+ declare function instrumentClient(client: LlmClient, config: AgentSdkConfig): InstrumentedLlmClient;
59
+
60
+ export { type AgentIdentity, type AgentSdkConfig, type CallRecord, type InstrumentedLlmClient, instrumentClient };
package/dist/index.js ADDED
@@ -0,0 +1,120 @@
1
+ // src/sdk.ts
2
+ function buildCallRecord(usage, model, latencyMs, config) {
3
+ return {
4
+ agent_id: config.identity.agentId,
5
+ model,
6
+ prompt_tokens: usage.inputTokens,
7
+ completion_tokens: usage.outputTokens,
8
+ ...usage.cacheCreationTokens !== void 0 && {
9
+ cache_creation_tokens: usage.cacheCreationTokens
10
+ },
11
+ ...usage.cacheReadTokens !== void 0 && {
12
+ cache_read_tokens: usage.cacheReadTokens
13
+ },
14
+ latency_ms: latencyMs,
15
+ ...config.identity.taskLabel !== void 0 && {
16
+ task_label: config.identity.taskLabel
17
+ },
18
+ ...config.identity.projectId !== void 0 && {
19
+ project_id: config.identity.projectId
20
+ },
21
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
22
+ call_id: crypto.randomUUID()
23
+ };
24
+ }
25
+ async function dispatchOnce(record, ingestionUrl, ingestionKey, timeoutMs) {
26
+ const controller = new AbortController();
27
+ const timer = setTimeout(() => controller.abort(), timeoutMs);
28
+ try {
29
+ const response = await fetch(ingestionUrl, {
30
+ method: "POST",
31
+ signal: controller.signal,
32
+ headers: {
33
+ "Content-Type": "application/json",
34
+ Authorization: `Bearer ${ingestionKey}`
35
+ },
36
+ body: JSON.stringify(record)
37
+ });
38
+ return response.ok;
39
+ } catch {
40
+ return false;
41
+ } finally {
42
+ clearTimeout(timer);
43
+ }
44
+ }
45
+ async function dispatchWithRetry(record, config) {
46
+ const maxRetries = config.maxIngestionRetries ?? 3;
47
+ const timeoutMs = config.ingestionTimeoutMs ?? 5e3;
48
+ const baseDelayMs = 500;
49
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
50
+ const success = await dispatchOnce(record, config.ingestionUrl, config.ingestionKey, timeoutMs);
51
+ if (success) return;
52
+ if (attempt < maxRetries) {
53
+ const delay = baseDelayMs * 2 ** attempt;
54
+ await new Promise((resolve) => setTimeout(resolve, delay));
55
+ }
56
+ }
57
+ console.warn(
58
+ JSON.stringify({
59
+ level: "warn",
60
+ pkg: "@diabolicallabs/agent-sdk",
61
+ event: "ingestion_exhausted",
62
+ call_id: record.call_id,
63
+ agent_id: record.agent_id,
64
+ model: record.model,
65
+ message: `Ingestion dispatch failed after ${maxRetries + 1} attempts. Record dropped.`
66
+ })
67
+ );
68
+ }
69
+ function instrumentClient(client, config) {
70
+ if (config.disabled === true) {
71
+ return { ...client, sdkConfig: config };
72
+ }
73
+ async function complete(...args) {
74
+ const start = Date.now();
75
+ const response = await client.complete(...args);
76
+ const latencyMs = Date.now() - start;
77
+ const record = buildCallRecord(response.usage, response.model, latencyMs, config);
78
+ void dispatchWithRetry(record, config);
79
+ return response;
80
+ }
81
+ async function* stream(...args) {
82
+ let finalUsage;
83
+ const model = client.config.model;
84
+ const start = Date.now();
85
+ try {
86
+ for await (const chunk of client.stream(...args)) {
87
+ if (chunk.usage !== void 0) {
88
+ finalUsage = chunk.usage;
89
+ }
90
+ yield chunk;
91
+ }
92
+ } catch (err) {
93
+ throw err;
94
+ }
95
+ if (finalUsage !== void 0) {
96
+ const latencyMs = Date.now() - start;
97
+ const record = buildCallRecord(finalUsage, model, latencyMs, config);
98
+ void dispatchWithRetry(record, config);
99
+ }
100
+ }
101
+ async function structured(messages, schema, options) {
102
+ const start = Date.now();
103
+ const response = await client.structured(messages, schema, options);
104
+ const latencyMs = Date.now() - start;
105
+ const record = buildCallRecord(response.usage, client.config.model, latencyMs, config);
106
+ void dispatchWithRetry(record, config);
107
+ return response;
108
+ }
109
+ return {
110
+ config: client.config,
111
+ sdkConfig: config,
112
+ complete,
113
+ stream,
114
+ structured
115
+ };
116
+ }
117
+ export {
118
+ instrumentClient
119
+ };
120
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/sdk.ts"],"sourcesContent":["/**\n * instrumentClient — wraps an LlmClient with cost-tracking middleware.\n *\n * Each call to complete(), stream(), or structured() is intercepted:\n * 1. The LLM call executes normally.\n * 2. A CallRecord is built from the response (tokens, latency, identifiers).\n * 3. The record is dispatched to config.ingestionUrl asynchronously — the LLM\n * response is returned to the caller before ingestion completes.\n * 4. Failed dispatches retry with exponential backoff up to maxIngestionRetries.\n * 5. If all retries fail, the record is dropped and a structured warning is logged.\n * The error is never propagated to the LLM caller.\n */\n\nimport type {\n LlmClient,\n LlmResponse,\n LlmStreamChunk,\n LlmStructuredResponse,\n LlmUsage,\n} from '@diabolicallabs/llm-client';\nimport type { AgentSdkConfig, CallRecord, InstrumentedLlmClient } from './types.js';\n\n// ---------------------------------------------------------------------------\n// CallRecord builder\n// ---------------------------------------------------------------------------\n\n/**\n * Builds a CallRecord from normalized LlmUsage data.\n * call_id uses crypto.randomUUID() — Node 20 built-in, no external package.\n */\nfunction buildCallRecord(\n usage: LlmUsage,\n model: string,\n latencyMs: number,\n config: AgentSdkConfig\n): CallRecord {\n return {\n agent_id: config.identity.agentId,\n model,\n prompt_tokens: usage.inputTokens,\n completion_tokens: usage.outputTokens,\n ...(usage.cacheCreationTokens !== undefined && {\n cache_creation_tokens: usage.cacheCreationTokens,\n }),\n ...(usage.cacheReadTokens !== undefined && {\n cache_read_tokens: usage.cacheReadTokens,\n }),\n latency_ms: latencyMs,\n ...(config.identity.taskLabel !== undefined && {\n task_label: config.identity.taskLabel,\n }),\n ...(config.identity.projectId !== undefined && {\n project_id: config.identity.projectId,\n }),\n timestamp: new Date().toISOString(),\n call_id: crypto.randomUUID(),\n };\n}\n\n// ---------------------------------------------------------------------------\n// Ingestion dispatch with retry\n// ---------------------------------------------------------------------------\n\n/**\n * Dispatches a single attempt to the ingestion endpoint.\n * Returns true on HTTP 2xx, false on any error (network, timeout, non-2xx).\n * Never throws — all errors are caught internally.\n */\nasync function dispatchOnce(\n record: CallRecord,\n ingestionUrl: string,\n ingestionKey: string,\n timeoutMs: number\n): Promise<boolean> {\n const controller = new AbortController();\n const timer = setTimeout(() => controller.abort(), timeoutMs);\n try {\n const response = await fetch(ingestionUrl, {\n method: 'POST',\n signal: controller.signal,\n headers: {\n 'Content-Type': 'application/json',\n Authorization: `Bearer ${ingestionKey}`,\n },\n body: JSON.stringify(record),\n });\n return response.ok;\n } catch {\n // Network errors, AbortError (timeout), JSON stringify errors — all treated as failure\n return false;\n } finally {\n clearTimeout(timer);\n }\n}\n\n/**\n * Dispatches a CallRecord with exponential backoff retry.\n * On exhaustion, logs a structured warning and drops the record.\n * Never throws — ingestion errors are always silent to the LLM caller.\n */\nasync function dispatchWithRetry(record: CallRecord, config: AgentSdkConfig): Promise<void> {\n const maxRetries = config.maxIngestionRetries ?? 3;\n const timeoutMs = config.ingestionTimeoutMs ?? 5000;\n const baseDelayMs = 500;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n const success = await dispatchOnce(record, config.ingestionUrl, config.ingestionKey, timeoutMs);\n if (success) return;\n\n if (attempt < maxRetries) {\n // Exponential backoff: 500ms, 1000ms, 2000ms, ...\n const delay = baseDelayMs * 2 ** attempt;\n await new Promise<void>((resolve) => setTimeout(resolve, delay));\n }\n }\n\n // All retries exhausted — drop the record and warn\n // Structured log: include call_id for audit trail but never log ingestionKey\n console.warn(\n JSON.stringify({\n level: 'warn',\n pkg: '@diabolicallabs/agent-sdk',\n event: 'ingestion_exhausted',\n call_id: record.call_id,\n agent_id: record.agent_id,\n model: record.model,\n message: `Ingestion dispatch failed after ${maxRetries + 1} attempts. Record dropped.`,\n })\n );\n}\n\n// ---------------------------------------------------------------------------\n// instrumentClient\n// ---------------------------------------------------------------------------\n\n/**\n * Wraps an existing LlmClient with cost-tracking middleware.\n * Returns an InstrumentedLlmClient that is a drop-in replacement for LlmClient.\n *\n * When config.disabled is true, the underlying client is returned directly\n * with sdkConfig attached — no instrumentation overhead, no fetch calls.\n */\nexport function instrumentClient(client: LlmClient, config: AgentSdkConfig): InstrumentedLlmClient {\n // Disabled mode: skip all instrumentation, expose underlying client directly\n if (config.disabled === true) {\n return { ...client, sdkConfig: config };\n }\n\n // complete() — non-streaming, usage available on the response\n async function complete(...args: Parameters<LlmClient['complete']>): Promise<LlmResponse> {\n const start = Date.now();\n const response = await client.complete(...args);\n const latencyMs = Date.now() - start;\n\n const record = buildCallRecord(response.usage, response.model, latencyMs, config);\n // Dispatch non-blocking — do not await\n void dispatchWithRetry(record, config);\n\n return response;\n }\n\n // stream() — async generator passthrough; usage arrives on final chunk\n async function* stream(...args: Parameters<LlmClient['stream']>): AsyncGenerator<LlmStreamChunk> {\n let finalUsage: LlmUsage | undefined;\n const model = client.config.model;\n const start = Date.now();\n\n try {\n for await (const chunk of client.stream(...args)) {\n if (chunk.usage !== undefined) {\n finalUsage = chunk.usage;\n }\n yield chunk; // pass through immediately — never buffer\n }\n } catch (err) {\n // Stream error: no usage data, no record. Propagate to caller.\n throw err;\n }\n\n // Stream completed — dispatch if usage was captured\n if (finalUsage !== undefined) {\n const latencyMs = Date.now() - start;\n const record = buildCallRecord(finalUsage, model, latencyMs, config);\n // Non-blocking — void the promise\n void dispatchWithRetry(record, config);\n }\n }\n\n // structured() — same pattern as complete(), usage on the structured response\n async function structured<T>(\n messages: Parameters<LlmClient['complete']>[0],\n schema: { parse: (data: unknown) => T },\n options?: Parameters<LlmClient['complete']>[1]\n ): Promise<LlmStructuredResponse<T>> {\n const start = Date.now();\n const response = await client.structured<T>(messages, schema, options);\n const latencyMs = Date.now() - start;\n\n const record = buildCallRecord(response.usage, client.config.model, latencyMs, config);\n void dispatchWithRetry(record, config);\n\n return response;\n }\n\n return {\n config: client.config,\n sdkConfig: config,\n complete,\n stream,\n structured,\n };\n}\n"],"mappings":";AA8BA,SAAS,gBACP,OACA,OACA,WACA,QACY;AACZ,SAAO;AAAA,IACL,UAAU,OAAO,SAAS;AAAA,IAC1B;AAAA,IACA,eAAe,MAAM;AAAA,IACrB,mBAAmB,MAAM;AAAA,IACzB,GAAI,MAAM,wBAAwB,UAAa;AAAA,MAC7C,uBAAuB,MAAM;AAAA,IAC/B;AAAA,IACA,GAAI,MAAM,oBAAoB,UAAa;AAAA,MACzC,mBAAmB,MAAM;AAAA,IAC3B;AAAA,IACA,YAAY;AAAA,IACZ,GAAI,OAAO,SAAS,cAAc,UAAa;AAAA,MAC7C,YAAY,OAAO,SAAS;AAAA,IAC9B;AAAA,IACA,GAAI,OAAO,SAAS,cAAc,UAAa;AAAA,MAC7C,YAAY,OAAO,SAAS;AAAA,IAC9B;AAAA,IACA,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,IAClC,SAAS,OAAO,WAAW;AAAA,EAC7B;AACF;AAWA,eAAe,aACb,QACA,cACA,cACA,WACkB;AAClB,QAAM,aAAa,IAAI,gBAAgB;AACvC,QAAM,QAAQ,WAAW,MAAM,WAAW,MAAM,GAAG,SAAS;AAC5D,MAAI;AACF,UAAM,WAAW,MAAM,MAAM,cAAc;AAAA,MACzC,QAAQ;AAAA,MACR,QAAQ,WAAW;AAAA,MACnB,SAAS;AAAA,QACP,gBAAgB;AAAA,QAChB,eAAe,UAAU,YAAY;AAAA,MACvC;AAAA,MACA,MAAM,KAAK,UAAU,MAAM;AAAA,IAC7B,CAAC;AACD,WAAO,SAAS;AAAA,EAClB,QAAQ;AAEN,WAAO;AAAA,EACT,UAAE;AACA,iBAAa,KAAK;AAAA,EACpB;AACF;AAOA,eAAe,kBAAkB,QAAoB,QAAuC;AAC1F,QAAM,aAAa,OAAO,uBAAuB;AACjD,QAAM,YAAY,OAAO,sBAAsB;AAC/C,QAAM,cAAc;AAEpB,WAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AACtD,UAAM,UAAU,MAAM,aAAa,QAAQ,OAAO,cAAc,OAAO,cAAc,SAAS;AAC9F,QAAI,QAAS;AAEb,QAAI,UAAU,YAAY;AAExB,YAAM,QAAQ,cAAc,KAAK;AACjC,YAAM,IAAI,QAAc,CAAC,YAAY,WAAW,SAAS,KAAK,CAAC;AAAA,IACjE;AAAA,EACF;AAIA,UAAQ;AAAA,IACN,KAAK,UAAU;AAAA,MACb,OAAO;AAAA,MACP,KAAK;AAAA,MACL,OAAO;AAAA,MACP,SAAS,OAAO;AAAA,MAChB,UAAU,OAAO;AAAA,MACjB,OAAO,OAAO;AAAA,MACd,SAAS,mCAAmC,aAAa,CAAC;AAAA,IAC5D,CAAC;AAAA,EACH;AACF;AAaO,SAAS,iBAAiB,QAAmB,QAA+C;AAEjG,MAAI,OAAO,aAAa,MAAM;AAC5B,WAAO,EAAE,GAAG,QAAQ,WAAW,OAAO;AAAA,EACxC;AAGA,iBAAe,YAAY,MAA+D;AACxF,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,WAAW,MAAM,OAAO,SAAS,GAAG,IAAI;AAC9C,UAAM,YAAY,KAAK,IAAI,IAAI;AAE/B,UAAM,SAAS,gBAAgB,SAAS,OAAO,SAAS,OAAO,WAAW,MAAM;AAEhF,SAAK,kBAAkB,QAAQ,MAAM;AAErC,WAAO;AAAA,EACT;AAGA,kBAAgB,UAAU,MAAuE;AAC/F,QAAI;AACJ,UAAM,QAAQ,OAAO,OAAO;AAC5B,UAAM,QAAQ,KAAK,IAAI;AAEvB,QAAI;AACF,uBAAiB,SAAS,OAAO,OAAO,GAAG,IAAI,GAAG;AAChD,YAAI,MAAM,UAAU,QAAW;AAC7B,uBAAa,MAAM;AAAA,QACrB;AACA,cAAM;AAAA,MACR;AAAA,IACF,SAAS,KAAK;AAEZ,YAAM;AAAA,IACR;AAGA,QAAI,eAAe,QAAW;AAC5B,YAAM,YAAY,KAAK,IAAI,IAAI;AAC/B,YAAM,SAAS,gBAAgB,YAAY,OAAO,WAAW,MAAM;AAEnE,WAAK,kBAAkB,QAAQ,MAAM;AAAA,IACvC;AAAA,EACF;AAGA,iBAAe,WACb,UACA,QACA,SACmC;AACnC,UAAM,QAAQ,KAAK,IAAI;AACvB,UAAM,WAAW,MAAM,OAAO,WAAc,UAAU,QAAQ,OAAO;AACrE,UAAM,YAAY,KAAK,IAAI,IAAI;AAE/B,UAAM,SAAS,gBAAgB,SAAS,OAAO,OAAO,OAAO,OAAO,WAAW,MAAM;AACrF,SAAK,kBAAkB,QAAQ,MAAM;AAErC,WAAO;AAAA,EACT;AAEA,SAAO;AAAA,IACL,QAAQ,OAAO;AAAA,IACf,WAAW;AAAA,IACX;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;","names":[]}
package/package.json ADDED
@@ -0,0 +1,48 @@
1
+ {
2
+ "name": "@diabolicallabs/agent-sdk",
3
+ "version": "0.1.0",
4
+ "description": "Cost-tracking middleware for @diabolicallabs/llm-client. Async fire-and-forget ingestion to Agent Spend Dashboard. © Diabolical Labs",
5
+ "author": "Diana Ismail <diana@deeismail.com> (https://deeismail.com)",
6
+ "publisher": "Diabolical Labs",
7
+ "license": "UNLICENSED",
8
+ "type": "module",
9
+ "exports": {
10
+ ".": {
11
+ "import": "./dist/index.js",
12
+ "types": "./dist/index.d.ts"
13
+ }
14
+ },
15
+ "main": "./dist/index.js",
16
+ "types": "./dist/index.d.ts",
17
+ "files": [
18
+ "dist"
19
+ ],
20
+ "repository": {
21
+ "type": "git",
22
+ "url": "git+https://github.com/mannism/dlabs-toolkit.git",
23
+ "directory": "packages/agent-sdk"
24
+ },
25
+ "homepage": "https://github.com/mannism/dlabs-toolkit#readme",
26
+ "bugs": {
27
+ "url": "https://github.com/mannism/dlabs-toolkit/issues"
28
+ },
29
+ "engines": {
30
+ "node": ">=20"
31
+ },
32
+ "dependencies": {
33
+ "@diabolicallabs/llm-client": "0.1.0"
34
+ },
35
+ "devDependencies": {
36
+ "@types/node": "^25.6.0",
37
+ "@vitest/coverage-v8": "^4.1.5",
38
+ "tsup": "^8.3.5",
39
+ "vitest": "^4.1.5"
40
+ },
41
+ "scripts": {
42
+ "build": "tsup",
43
+ "typecheck": "tsc --noEmit",
44
+ "lint": "biome check ./src && eslint ./src",
45
+ "test": "vitest run",
46
+ "test:watch": "vitest"
47
+ }
48
+ }