@sensu-ai/sdk 0.1.6 → 0.5.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/index.ts DELETED
@@ -1,24 +0,0 @@
1
- export { SensuClient, RunHandle, StepHandle } from './client.ts';
2
- export type {
3
- SensuClientOptions,
4
- StartRunOptions,
5
- StartStepOptions,
6
- TrackLlmOptions,
7
- TrackToolOptions,
8
- RawLlmCallOptions,
9
- ContextBreakdown,
10
- TrackRetrievalOptions,
11
- RawRetrievalOptions,
12
- TrackEmbeddingOptions,
13
- RawEmbeddingOptions,
14
- RecordFeedbackOptions,
15
- RecordEvalScoreOptions,
16
- SpawnRunOptions,
17
- HandoffOptions,
18
- TrackGuardrailOptions,
19
- RawGuardrailOptions,
20
- RecordPromptRenderOptions,
21
- DeployPromptVersionOptions,
22
- StartSessionOptions,
23
- ResumeSessionOptions,
24
- } from './types.ts';
@@ -1,175 +0,0 @@
1
- /**
2
- * LangChain callback handler for Sensu telemetry.
3
- *
4
- * Usage:
5
- * import { SensuCallbackHandler } from '@sensu-ai/sdk/integrations/langchain';
6
- * const handler = new SensuCallbackHandler({ client: sensu });
7
- * const chain = new LLMChain({ ..., callbacks: [handler] });
8
- */
9
-
10
- import { randomUUID } from 'crypto';
11
- import type { SensuClient } from '../client.ts';
12
-
13
- interface LangChainCallbackHandlerOptions {
14
- client: SensuClient;
15
- sessionId?: string;
16
- runId?: string;
17
- }
18
-
19
- interface LangChainLlmStartInput {
20
- name?: string;
21
- [key: string]: unknown;
22
- }
23
-
24
- interface LangChainLlmEndOutput {
25
- generations?: Array<Array<{ text?: string; generationInfo?: Record<string, unknown> }>>;
26
- llmOutput?: Record<string, unknown>;
27
- }
28
-
29
- export class SensuCallbackHandler {
30
- private readonly client: SensuClient;
31
- private readonly sessionId: string;
32
- private runId: string;
33
- private traceId: string;
34
- private startTimes: Map<string, number> = new Map();
35
- private toolStartTimes: Map<string, number> = new Map();
36
- private stepIds: Map<string, string> = new Map();
37
-
38
- constructor(opts: LangChainCallbackHandlerOptions) {
39
- this.client = opts.client;
40
- this.sessionId = opts.sessionId ?? randomUUID();
41
- this.runId = opts.runId ?? randomUUID();
42
- this.traceId = randomUUID();
43
- }
44
-
45
- private base(spanId?: string) {
46
- return {
47
- event_id: randomUUID(),
48
- timestamp: new Date().toISOString(),
49
- org_id: (this.client as unknown as { orgId: string }).orgId,
50
- agent_id: (this.client as unknown as { agentId: string }).agentId,
51
- session_id: this.sessionId,
52
- run_id: this.runId,
53
- trace_id: this.traceId,
54
- span_id: spanId ?? randomUUID(),
55
- };
56
- }
57
-
58
- // Called when a chain starts
59
- async handleChainStart(_chain: unknown, _inputs: unknown, runId: string) {
60
- const stepId = randomUUID();
61
- this.stepIds.set(runId, stepId);
62
- this.client.enqueue({
63
- ...this.base(randomUUID()),
64
- step_id: stepId,
65
- event_type: 'agent.step.started',
66
- step_type: 'chain',
67
- sequence: 0,
68
- });
69
- }
70
-
71
- // Called when a chain ends
72
- async handleChainEnd(_outputs: unknown, runId: string) {
73
- const stepId = this.stepIds.get(runId);
74
- this.client.enqueue({
75
- ...this.base(),
76
- step_id: stepId,
77
- event_type: 'agent.step.completed',
78
- });
79
- this.stepIds.delete(runId);
80
- }
81
-
82
- // Called when an LLM starts
83
- async handleLLMStart(
84
- llm: LangChainLlmStartInput,
85
- _prompts: string[],
86
- runId: string,
87
- ) {
88
- this.startTimes.set(runId, Date.now());
89
- this.client.enqueue({
90
- ...this.base(),
91
- event_type: 'llm.request.started',
92
- provider: 'langchain',
93
- model: llm.name ?? 'unknown',
94
- });
95
- }
96
-
97
- // Called when an LLM ends
98
- async handleLLMEnd(output: LangChainLlmEndOutput, runId: string) {
99
- const startMs = this.startTimes.get(runId);
100
- const latencyMs = startMs ? Date.now() - startMs : undefined;
101
- this.startTimes.delete(runId);
102
-
103
- // Extract token usage from llmOutput if available
104
- const usage = output.llmOutput?.['tokenUsage'] as Record<string, number> | undefined;
105
-
106
- this.client.enqueue({
107
- ...this.base(),
108
- event_type: 'llm.request.completed',
109
- provider: 'langchain',
110
- model: 'unknown',
111
- latency_ms: latencyMs,
112
- status: 'success',
113
- input_tokens: usage?.['promptTokens'],
114
- output_tokens: usage?.['completionTokens'],
115
- total_tokens: usage?.['totalTokens'],
116
- });
117
- }
118
-
119
- // Called when an LLM errors
120
- async handleLLMError(err: Error, runId: string) {
121
- const startMs = this.startTimes.get(runId);
122
- const latencyMs = startMs ? Date.now() - startMs : undefined;
123
- this.startTimes.delete(runId);
124
-
125
- this.client.enqueue({
126
- ...this.base(),
127
- event_type: 'llm.request.completed',
128
- provider: 'langchain',
129
- model: 'unknown',
130
- latency_ms: latencyMs,
131
- status: 'error',
132
- });
133
- }
134
-
135
- // Called when a tool starts
136
- async handleToolStart(_tool: unknown, _input: string, runId: string) {
137
- this.toolStartTimes.set(runId, Date.now());
138
- this.client.enqueue({
139
- ...this.base(),
140
- event_type: 'tool.call.started',
141
- tool_name: 'unknown',
142
- });
143
- }
144
-
145
- // Called when a tool ends
146
- async handleToolEnd(output: string, runId: string) {
147
- const startMs = this.toolStartTimes.get(runId);
148
- const latencyMs = startMs ? Date.now() - startMs : undefined;
149
- this.toolStartTimes.delete(runId);
150
-
151
- this.client.enqueue({
152
- ...this.base(),
153
- event_type: 'tool.call.completed',
154
- tool_name: 'unknown',
155
- latency_ms: latencyMs,
156
- status: 'success',
157
- output_size_bytes: Buffer.byteLength(output ?? '', 'utf8'),
158
- });
159
- }
160
-
161
- // Called when a tool errors
162
- async handleToolError(err: Error, runId: string) {
163
- const startMs = this.toolStartTimes.get(runId);
164
- const latencyMs = startMs ? Date.now() - startMs : undefined;
165
- this.toolStartTimes.delete(runId);
166
-
167
- this.client.enqueue({
168
- ...this.base(),
169
- event_type: 'tool.call.completed',
170
- tool_name: 'unknown',
171
- latency_ms: latencyMs,
172
- status: 'error',
173
- });
174
- }
175
- }
@@ -1,109 +0,0 @@
1
- /**
2
- * OpenAI SDK wrapper for Sensu telemetry.
3
- * Wraps the OpenAI client to automatically track all completions.
4
- *
5
- * Usage:
6
- * import { wrapOpenAI } from '@sensu-ai/sdk/integrations/openai';
7
- * const openai = wrapOpenAI(new OpenAI({ apiKey }), { client: sensu, runHandle });
8
- * const resp = await openai.chat.completions.create({ ... }); // auto-tracked
9
- */
10
-
11
- import type { SensuClient, RunHandle } from '../client.ts';
12
-
13
- interface OpenAILike {
14
- chat: {
15
- completions: {
16
- create: (params: unknown) => Promise<unknown>;
17
- };
18
- };
19
- }
20
-
21
- interface WrapOpenAIOptions {
22
- client: SensuClient;
23
- runHandle?: RunHandle;
24
- defaultModel?: string;
25
- defaultProvider?: string;
26
- }
27
-
28
- interface CompletionUsage {
29
- prompt_tokens?: number;
30
- completion_tokens?: number;
31
- total_tokens?: number;
32
- }
33
-
34
- interface CompletionResponse {
35
- usage?: CompletionUsage;
36
- model?: string;
37
- }
38
-
39
- export function wrapOpenAI<T extends OpenAILike>(
40
- openai: T,
41
- opts: WrapOpenAIOptions,
42
- ): T {
43
- const { client, runHandle } = opts;
44
-
45
- const originalCreate = openai.chat.completions.create.bind(
46
- openai.chat.completions,
47
- );
48
-
49
- openai.chat.completions.create = async (params: unknown): Promise<unknown> => {
50
- const p = params as Record<string, unknown>;
51
- const model = (p['model'] as string | undefined) ?? opts.defaultModel ?? 'unknown';
52
- const provider = opts.defaultProvider ?? 'openai';
53
-
54
- let step: import('../client.js').StepHandle | undefined;
55
- if (runHandle) {
56
- step = runHandle.startStep({ name: 'openai-completion', stepType: 'llm' });
57
- }
58
-
59
- const startMs = Date.now();
60
- let result: unknown;
61
- let status: 'success' | 'error' = 'success';
62
- let err: unknown;
63
-
64
- try {
65
- result = await originalCreate(params);
66
- } catch (e) {
67
- status = 'error';
68
- err = e;
69
- }
70
-
71
- const latencyMs = Date.now() - startMs;
72
- const r = result as CompletionResponse | undefined;
73
- const usage = r?.usage;
74
-
75
- const callOpts = {
76
- provider,
77
- model: r?.model ?? model,
78
- input_tokens: usage?.prompt_tokens,
79
- output_tokens: usage?.completion_tokens,
80
- total_tokens: usage?.total_tokens,
81
- latency_ms: latencyMs,
82
- status,
83
- };
84
-
85
- if (step) {
86
- step.recordLlm(callOpts);
87
- void step.end();
88
- } else {
89
- // Emit standalone if no step context
90
- client.enqueue({
91
- event_id: crypto.randomUUID(),
92
- event_type: 'llm.request.completed',
93
- timestamp: new Date().toISOString(),
94
- org_id: (client as unknown as { orgId: string }).orgId,
95
- agent_id: (client as unknown as { agentId: string }).agentId,
96
- session_id: crypto.randomUUID(),
97
- run_id: crypto.randomUUID(),
98
- trace_id: crypto.randomUUID(),
99
- span_id: crypto.randomUUID(),
100
- ...callOpts,
101
- });
102
- }
103
-
104
- if (err) throw err;
105
- return result;
106
- };
107
-
108
- return openai;
109
- }
package/src/types.ts DELETED
@@ -1,240 +0,0 @@
1
- export interface SensuClientOptions {
2
- apiKey?: string;
3
- baseUrl?: string;
4
- agentId?: string;
5
- orgId?: string;
6
- /** Read config from environment variables */
7
- fromEnv?: boolean;
8
- /** Flush buffer when this many events accumulate */
9
- batchSize?: number;
10
- /** Flush buffer every N milliseconds */
11
- flushIntervalMs?: number;
12
- /** Disable all telemetry (useful for tests) */
13
- disabled?: boolean;
14
- }
15
-
16
- export interface StartRunOptions {
17
- sessionId?: string;
18
- runType?: string;
19
- endUserId?: string;
20
- runId?: string;
21
- }
22
-
23
- export interface StartStepOptions {
24
- name?: string;
25
- stepType?: string;
26
- sequence?: number;
27
- stepId?: string;
28
- }
29
-
30
- export interface ContextBreakdown {
31
- system_tokens?: number;
32
- user_tokens?: number;
33
- assistant_tokens?: number;
34
- tool_tokens?: number;
35
- retrieval_tokens?: number;
36
- }
37
-
38
- export interface MessageSnapshotItem {
39
- role: 'system' | 'user' | 'assistant' | 'tool';
40
- tool_name?: string;
41
- token_count: number;
42
- content_hash: string;
43
- }
44
-
45
- export interface TrackLlmOptions {
46
- provider: string;
47
- model: string;
48
- /** Wraps the LLM call and measures latency automatically */
49
- fn: () => Promise<unknown>;
50
- maxContextTokens?: number;
51
- /**
52
- * Optional callback to extract a context breakdown from the LLM response.
53
- * Called with the raw response after fn() resolves.
54
- */
55
- extractContextBreakdown?: (result: unknown) => ContextBreakdown | undefined;
56
- /** Stable ID for this LLM call — used to link eval scores. Generated if omitted. */
57
- llmCallId?: string;
58
- /** Snapshot of every message in the context window sent to this LLM call. */
59
- messagesSnapshot?: MessageSnapshotItem[];
60
- /** IDs of retrieval chunks whose content the model actually referenced in its output. */
61
- referencedChunkIds?: string[];
62
- }
63
-
64
- export interface LlmResult {
65
- inputTokens?: number;
66
- outputTokens?: number;
67
- cachedInputTokens?: number;
68
- totalTokens?: number;
69
- costUsdEstimate?: number;
70
- result: unknown;
71
- }
72
-
73
- export interface TrackToolOptions {
74
- toolName: string;
75
- fn: () => Promise<unknown>;
76
- }
77
-
78
- export interface RawLlmCallOptions {
79
- provider: string;
80
- model: string;
81
- inputTokens?: number;
82
- outputTokens?: number;
83
- cachedInputTokens?: number;
84
- totalTokens?: number;
85
- maxContextTokens?: number;
86
- contextUsedTokens?: number;
87
- latencyMs?: number;
88
- ttftMs?: number;
89
- costUsdEstimate?: number;
90
- status?: 'success' | 'error' | 'timeout';
91
- contextBreakdown?: ContextBreakdown;
92
- }
93
-
94
- // ---------------------------------------------------------------------------
95
- // Retrieval & Embedding
96
- // ---------------------------------------------------------------------------
97
-
98
- export interface TrackRetrievalOptions {
99
- /** Wraps the retrieval call and measures latency automatically */
100
- fn: () => Promise<unknown>;
101
- vectorStoreId?: string;
102
- topK?: number;
103
- }
104
-
105
- export interface RetrievalChunkInput {
106
- chunk_id: string;
107
- source?: string;
108
- token_count: number;
109
- similarity_score?: number;
110
- content_preview?: string;
111
- }
112
-
113
- export interface RawRetrievalOptions {
114
- vectorStoreId?: string;
115
- topK?: number;
116
- latencyMs?: number;
117
- chunksReturned?: number;
118
- tokensInjected?: number;
119
- similarityScoreAvg?: number;
120
- status?: 'success' | 'error';
121
- /** Per-chunk detail for retrieval noise analysis. */
122
- chunks?: RetrievalChunkInput[];
123
- }
124
-
125
- export interface TrackEmbeddingOptions {
126
- model: string;
127
- /** Wraps the embedding call and measures latency automatically */
128
- fn: () => Promise<unknown>;
129
- inputTextLength?: number;
130
- batchSize?: number;
131
- }
132
-
133
- export interface RawEmbeddingOptions {
134
- model: string;
135
- inputTextLength?: number;
136
- tokenCount?: number;
137
- latencyMs?: number;
138
- costUsdEstimate?: number;
139
- batchSize?: number;
140
- }
141
-
142
- // ---------------------------------------------------------------------------
143
- // Evaluation & Feedback
144
- // ---------------------------------------------------------------------------
145
-
146
- export interface RecordFeedbackOptions {
147
- type: 'thumbs_up' | 'thumbs_down' | 'score' | 'correction';
148
- score?: number;
149
- comment?: string;
150
- endUserId?: string;
151
- }
152
-
153
- export interface RecordEvalScoreOptions {
154
- metric: string;
155
- score: number;
156
- evaluatorId?: string;
157
- modelUsedForEval?: string;
158
- /** Step ID this eval score is linked to — enables quality correlation view. */
159
- stepId?: string;
160
- /** LLM call ID this eval score is linked to — must match the llmCallId used in trackLlm(). */
161
- llmCallId?: string;
162
- }
163
-
164
- // ---------------------------------------------------------------------------
165
- // Multi-Agent
166
- // ---------------------------------------------------------------------------
167
-
168
- export interface SpawnRunOptions {
169
- childAgentId: string;
170
- childRunId?: string;
171
- spawnReason?: string;
172
- /** Options forwarded to the child RunHandle */
173
- runType?: string;
174
- sessionId?: string;
175
- }
176
-
177
- export interface HandoffOptions {
178
- toAgentId: string;
179
- reason?: string;
180
- contextTokensTransferred?: number;
181
- }
182
-
183
- // ---------------------------------------------------------------------------
184
- // Guardrails
185
- // ---------------------------------------------------------------------------
186
-
187
- export interface TrackGuardrailOptions {
188
- guardrailId: string;
189
- guardrailType?: 'content' | 'pii' | 'jailbreak' | 'custom';
190
- inputHash?: string;
191
- /** Wraps the guardrail check and measures latency automatically */
192
- fn: () => Promise<'pass' | 'fail' | 'modified'>;
193
- }
194
-
195
- export interface RawGuardrailOptions {
196
- guardrailId: string;
197
- guardrailType?: 'content' | 'pii' | 'jailbreak' | 'custom';
198
- inputHash?: string;
199
- result?: 'pass' | 'fail' | 'modified';
200
- blockReason?: string;
201
- severity?: 'low' | 'medium' | 'high';
202
- latencyMs?: number;
203
- blocked?: boolean;
204
- }
205
-
206
- // ---------------------------------------------------------------------------
207
- // Prompt Management
208
- // ---------------------------------------------------------------------------
209
-
210
- export interface RecordPromptRenderOptions {
211
- templateId: string;
212
- templateVersion?: string;
213
- renderedTokenCount?: number;
214
- variableCount?: number;
215
- latencyMs?: number;
216
- }
217
-
218
- export interface DeployPromptVersionOptions {
219
- templateId: string;
220
- newVersion: string;
221
- oldVersion?: string;
222
- deployedBy?: string;
223
- }
224
-
225
- // ---------------------------------------------------------------------------
226
- // Session Lifecycle
227
- // ---------------------------------------------------------------------------
228
-
229
- export interface StartSessionOptions {
230
- sessionId?: string;
231
- channel?: 'web' | 'api' | 'mobile';
232
- endUserId?: string;
233
- }
234
-
235
- export interface ResumeSessionOptions {
236
- sessionId?: string;
237
- resumedFromSessionId: string;
238
- channel?: 'web' | 'api' | 'mobile';
239
- endUserId?: string;
240
- }
package/tsconfig.json DELETED
@@ -1,11 +0,0 @@
1
- {
2
- "extends": "../../tsconfig.base.json",
3
- "compilerOptions": {
4
- "module": "NodeNext",
5
- "moduleResolution": "NodeNext",
6
- "outDir": "./dist",
7
- "rootDir": "./src",
8
- "types": ["node"]
9
- },
10
- "include": ["src"]
11
- }