@sensu-ai/sdk 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,129 @@
1
+ # @sensu-ai/sdk
2
+
3
+ The official Node.js SDK for [Sensu](https://sensu-ai.com) — observability for AI agents.
4
+
5
+ Instrument your agents to track LLM calls, tool use, token spend, latency, and cost. Events are buffered and sent in batches so there's no impact on your agent's performance.
6
+
7
+ ## Installation
8
+
9
+ ```bash
10
+ npm install @sensu-ai/sdk
11
+ ```
12
+
13
+ ## Quick start
14
+
15
+ ```ts
16
+ import { SensuClient } from '@sensu-ai/sdk';
17
+
18
+ const sensu = new SensuClient({
19
+ apiKey: process.env.SENSU_API_KEY,
20
+ agentId: 'my-agent',
21
+ });
22
+
23
+ const run = sensu.startRun();
24
+ const step = run.startStep({ name: 'plan' });
25
+
26
+ // Wrap an LLM call — latency and token usage are captured automatically
27
+ const response = await step.trackLlm({
28
+ provider: 'anthropic',
29
+ model: 'claude-sonnet-4-6',
30
+ fn: () => anthropic.messages.create({ ... }),
31
+ });
32
+
33
+ // Wrap a tool call
34
+ const result = await step.trackTool({
35
+ toolName: 'search_web',
36
+ fn: () => searchWeb(query),
37
+ });
38
+
39
+ await step.end();
40
+ await run.end();
41
+ ```
42
+
43
+ ## Configuration
44
+
45
+ ```ts
46
+ const sensu = new SensuClient({
47
+ apiKey: 'sns_live_...', // Required. Your Sensu API key
48
+ agentId: 'my-agent', // Required. Identifies this agent in the dashboard
49
+ baseUrl: 'https://...', // Default: http://localhost:3001
50
+ orgId: '', // Optional. Populated automatically from your API key
51
+ batchSize: 10, // Flush after N events (default: 10)
52
+ flushIntervalMs: 2000, // Flush every N ms (default: 2000)
53
+ disabled: false, // Set true to disable all telemetry (e.g. in tests)
54
+ });
55
+ ```
56
+
57
+ You can also load config from environment variables:
58
+
59
+ ```ts
60
+ const sensu = new SensuClient({ fromEnv: true });
61
+ ```
62
+
63
+ | Variable | Description |
64
+ |---|---|
65
+ | `SENSU_API_KEY` | Your Sensu API key |
66
+ | `SENSU_AGENT_ID` | Agent identifier |
67
+ | `SENSU_BASE_URL` | API base URL |
68
+ | `SENSU_ORG_ID` | Organisation ID (optional) |
69
+
70
+ ## API
71
+
72
+ ### `SensuClient`
73
+
74
+ | Method | Description |
75
+ |---|---|
76
+ | `startRun(opts?)` | Start a new agent run. Returns a `RunHandle`. |
77
+ | `flush()` | Manually flush buffered events to the API. |
78
+ | `destroy()` | Stop the background flush timer. |
79
+
80
+ ### `RunHandle`
81
+
82
+ | Method | Description |
83
+ |---|---|
84
+ | `startStep(opts?)` | Start a step within the run. Returns a `StepHandle`. |
85
+ | `end(status?)` | End the run. `status` is `'completed'` (default) or `'failed'`. |
86
+
87
+ ### `StepHandle`
88
+
89
+ | Method | Description |
90
+ |---|---|
91
+ | `trackLlm(opts)` | Wrap an LLM call — measures latency and extracts token usage from the response. |
92
+ | `recordLlm(opts)` | Emit a raw LLM event when you already have the stats. |
93
+ | `trackTool(opts)` | Wrap a tool call — measures latency and output size. |
94
+ | `end()` | End the step. |
95
+
96
+ ## Integrations
97
+
98
+ ### OpenAI
99
+
100
+ Wrap the OpenAI client to track all completions automatically:
101
+
102
+ ```ts
103
+ import { wrapOpenAI } from '@sensu-ai/sdk/integrations/openai';
104
+ import OpenAI from 'openai';
105
+
106
+ const openai = wrapOpenAI(new OpenAI({ apiKey }), {
107
+ client: sensu,
108
+ runHandle: run,
109
+ });
110
+
111
+ // All calls to openai.chat.completions.create() are now tracked
112
+ const response = await openai.chat.completions.create({ model: 'gpt-4o', ... });
113
+ ```
114
+
115
+ ### LangChain
116
+
117
+ ```ts
118
+ import { SensuCallbackHandler } from '@sensu-ai/sdk/integrations/langchain';
119
+
120
+ const handler = new SensuCallbackHandler({ client: sensu, runHandle: run });
121
+ const chain = new LLMChain({ llm, prompt, callbacks: [handler] });
122
+ ```
123
+
124
+ ## Supported models (cost estimation)
125
+
126
+ The SDK automatically estimates cost for the following models:
127
+
128
+ - Anthropic: `claude-opus-4-6`, `claude-sonnet-4-6`, `claude-haiku-4-5`, `claude-3-5-sonnet`, `claude-3-5-haiku`, `claude-3-opus`
129
+ - OpenAI: `gpt-4o`, `gpt-4o-mini`, `gpt-4-turbo`
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sensu-ai/sdk",
3
- "version": "0.1.0",
3
+ "version": "0.1.2",
4
4
  "type": "module",
5
5
  "main": "./src/index.ts",
6
6
  "exports": {
@@ -25,7 +25,11 @@
25
25
  "openai": ">=4.0.0"
26
26
  },
27
27
  "peerDependenciesMeta": {
28
- "langchain": { "optional": true },
29
- "openai": { "optional": true }
28
+ "langchain": {
29
+ "optional": true
30
+ },
31
+ "openai": {
32
+ "optional": true
33
+ }
30
34
  }
31
35
  }
package/src/client.ts CHANGED
@@ -7,6 +7,7 @@ import type {
7
7
  TrackLlmOptions,
8
8
  TrackToolOptions,
9
9
  RawLlmCallOptions,
10
+ ContextBreakdown,
10
11
  } from './types.js';
11
12
 
12
13
  // ---------------------------------------------------------------------------
@@ -96,6 +97,7 @@ export class StepHandle {
96
97
 
97
98
  // Try to extract token usage from common response shapes
98
99
  const usage = extractUsage(result, opts.model);
100
+ const contextBreakdown = opts.extractContextBreakdown?.(result);
99
101
 
100
102
  this.client.enqueue({
101
103
  ...this.base(),
@@ -107,6 +109,7 @@ export class StepHandle {
107
109
  latency_ms: latencyMs,
108
110
  status,
109
111
  ...usage,
112
+ ...(contextBreakdown ? { context_breakdown: contextBreakdown } : {}),
110
113
  });
111
114
 
112
115
  if (err) throw err;
@@ -115,10 +118,12 @@ export class StepHandle {
115
118
 
116
119
  /** Emit a raw LLM call event (when you have the stats already) */
117
120
  recordLlm(opts: RawLlmCallOptions): void {
121
+ const { contextBreakdown, ...rest } = opts;
118
122
  this.client.enqueue({
119
123
  ...this.base(),
120
124
  event_type: 'llm.request.completed',
121
- ...opts,
125
+ ...rest,
126
+ ...(contextBreakdown ? { context_breakdown: contextBreakdown } : {}),
122
127
  });
123
128
  }
124
129
 
package/src/index.ts CHANGED
@@ -6,4 +6,5 @@ export type {
6
6
  TrackLlmOptions,
7
7
  TrackToolOptions,
8
8
  RawLlmCallOptions,
9
+ ContextBreakdown,
9
10
  } from './types.ts';
package/src/types.ts CHANGED
@@ -27,12 +27,25 @@ export interface StartStepOptions {
27
27
  stepId?: string;
28
28
  }
29
29
 
30
+ export interface ContextBreakdown {
31
+ system_tokens?: number;
32
+ user_tokens?: number;
33
+ assistant_tokens?: number;
34
+ tool_tokens?: number;
35
+ retrieval_tokens?: number;
36
+ }
37
+
30
38
  export interface TrackLlmOptions {
31
39
  provider: string;
32
40
  model: string;
33
41
  /** Wraps the LLM call and measures latency automatically */
34
42
  fn: () => Promise<unknown>;
35
43
  maxContextTokens?: number;
44
+ /**
45
+ * Optional callback to extract a context breakdown from the LLM response.
46
+ * Called with the raw response after fn() resolves.
47
+ */
48
+ extractContextBreakdown?: (result: unknown) => ContextBreakdown | undefined;
36
49
  }
37
50
 
38
51
  export interface LlmResult {
@@ -62,4 +75,5 @@ export interface RawLlmCallOptions {
62
75
  ttftMs?: number;
63
76
  costUsdEstimate?: number;
64
77
  status?: 'success' | 'error' | 'timeout';
78
+ contextBreakdown?: ContextBreakdown;
65
79
  }