@sentrial/sdk 0.1.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,6 +5,7 @@ TypeScript SDK for [Sentrial](https://sentrial.com) - AI agent observability and
5
5
  Track sessions, tool calls, and metrics to power:
6
6
  - **Signal detection**: Auto-detect patterns and anomalies
7
7
  - **Root cause analysis**: Understand WHY agents fail
8
+ - **Experiments**: A/B test different system prompts
8
9
  - **Code fixer**: AI-suggested fixes with GitHub PRs
9
10
 
10
11
  ## Installation
@@ -17,89 +18,151 @@ pnpm add @sentrial/sdk
17
18
  yarn add @sentrial/sdk
18
19
  ```
19
20
 
20
- ## Quick Start
21
+ ## Quick Start (30 seconds)
22
+
23
+ ### Option 1: Auto-Wrap LLM Clients (Recommended)
21
24
 
22
25
  ```typescript
23
- import { SentrialClient } from '@sentrial/sdk';
26
+ import OpenAI from 'openai';
27
+ import { wrapOpenAI, withSession, configure } from '@sentrial/sdk';
24
28
 
25
- const client = new SentrialClient({
26
- apiKey: process.env.SENTRIAL_API_KEY!,
27
- });
29
+ configure({ apiKey: 'sentrial_live_xxx' });
28
30
 
29
- async function runAgent(userId: string, input: string) {
30
- // 1. Create session
31
- const sessionId = await client.createSession({
32
- name: `Support: ${input.slice(0, 50)}`,
33
- agentName: 'support-agent',
34
- userId,
31
+ // Wrap once - all calls auto-tracked!
32
+ const openai = wrapOpenAI(new OpenAI());
33
+
34
+ const myAgent = withSession('support-agent', async (userId: string, message: string) => {
35
+ // LLM calls automatically tracked with tokens, cost, latency
36
+ const response = await openai.chat.completions.create({
37
+ model: 'gpt-4o',
38
+ messages: [{ role: 'user', content: message }],
35
39
  });
40
+ return response.choices[0].message.content;
41
+ });
36
42
 
37
- try {
38
- // 2. Track tool calls
39
- await client.trackToolCall({
40
- sessionId,
41
- toolName: 'search_kb',
42
- toolInput: { query: input },
43
- toolOutput: { articles: ['KB-001'] },
44
- reasoning: 'Searching knowledge base',
45
- });
46
-
47
- // 3. Process and respond
48
- const response = await processInput(input);
49
-
50
- // 4. Complete session
51
- await client.completeSession({
52
- sessionId,
53
- success: true,
54
- customMetrics: { satisfaction: 4.5 },
55
- });
56
-
57
- return response;
58
- } catch (error) {
59
- await client.completeSession({
60
- sessionId,
61
- success: false,
62
- failureReason: error instanceof Error ? error.message : 'Unknown error',
63
- });
64
- throw error;
65
- }
66
- }
43
+ await myAgent('user_123', 'How do I reset my password?');
67
44
  ```
68
45
 
69
- ## Simple API (begin/finish pattern)
46
+ ### Option 2: Decorators / Higher-Order Functions
70
47
 
71
48
  ```typescript
72
- import { sentrial } from '@sentrial/sdk';
49
+ import { withTool, withSession, configure } from '@sentrial/sdk';
73
50
 
74
- sentrial.configure({ apiKey: 'sentrial_live_xxx' });
51
+ configure({ apiKey: 'sentrial_live_xxx' });
75
52
 
76
- async function handleMessage(userId: string, message: string) {
77
- const interaction = await sentrial.begin({
78
- userId,
79
- event: 'chat_message',
80
- input: message,
81
- });
53
+ // Track tools
54
+ const searchKB = withTool('search_kb', async (query: string) => {
55
+ return { articles: ['KB-001', 'KB-002'] };
56
+ });
57
+
58
+ // Track sessions
59
+ const supportAgent = withSession('support-agent', async (userId: string, message: string) => {
60
+ const results = await searchKB(message); // Auto-tracked!
61
+ return `Found ${results.articles.length} articles`;
62
+ });
63
+ ```
64
+
65
+ ### Option 3: Vercel AI SDK
66
+
67
+ ```typescript
68
+ import { configure, wrapAISDK } from '@sentrial/sdk';
69
+ import * as ai from 'ai';
70
+ import { openai } from '@ai-sdk/openai';
71
+
72
+ configure({ apiKey: process.env.SENTRIAL_API_KEY, defaultAgent: 'my-agent' });
73
+
74
+ const { generateText, streamText } = wrapAISDK(ai);
75
+
76
+ // All calls automatically traced!
77
+ const { text } = await generateText({
78
+ model: openai('gpt-4'),
79
+ prompt: 'What is the capital of France?',
80
+ });
81
+ ```
82
+
83
+ ## Features
82
84
 
83
- try {
84
- const response = await agent.process(message);
85
-
86
- await interaction.finish({
87
- output: response,
88
- success: true,
89
- customMetrics: { satisfaction: 4.5 },
90
- });
91
-
92
- return response;
93
- } catch (error) {
94
- await interaction.finish({
95
- success: false,
96
- failureReason: error.message,
97
- });
98
- throw error;
99
- }
85
+ ### LLM Auto-Wrappers
86
+
87
+ Wrap once, track everything automatically:
88
+
89
+ ```typescript
90
+ import { wrapOpenAI, wrapAnthropic, wrapGoogle, wrapLLM } from '@sentrial/sdk';
91
+
92
+ // OpenAI
93
+ const openai = wrapOpenAI(new OpenAI());
94
+
95
+ // Anthropic
96
+ const anthropic = wrapAnthropic(new Anthropic());
97
+
98
+ // Google Gemini
99
+ const model = wrapGoogle(genAI.getGenerativeModel({ model: 'gemini-2.0-flash' }));
100
+
101
+ // Auto-detect
102
+ const client = wrapLLM(new OpenAI()); // Detects OpenAI, Anthropic, or Google
103
+ ```
104
+
105
+ ### Decorators
106
+
107
+ ```typescript
108
+ import { withTool, withSession, Tool, TrackSession } from '@sentrial/sdk';
109
+
110
+ // Higher-order functions
111
+ const searchWeb = withTool('web_search', async (query: string) => {...});
112
+ const myAgent = withSession('my-agent', async (userId, message) => {...});
113
+
114
+ // Class decorators (with experimentalDecorators)
115
+ class MyAgent {
116
+ @Tool('search')
117
+ async searchWeb(query: string) {...}
118
+
119
+ @TrackSession('my-agent')
120
+ async handleRequest(userId: string, message: string) {...}
100
121
  }
101
122
  ```
102
123
 
124
+ ### Experiments
125
+
126
+ A/B test different system prompts:
127
+
128
+ ```typescript
129
+ import { Experiment, getSystemPrompt, isExperimentMode } from '@sentrial/sdk';
130
+
131
+ const experiment = new Experiment('exp_abc123');
132
+ await experiment.load();
133
+
134
+ await experiment.run(async (testCase, variant, tracker) => {
135
+ const systemPrompt = getSystemPrompt('Default prompt');
136
+ const response = await runAgent(testCase.userInput, systemPrompt);
137
+ tracker.setResultSessionId(sessionId);
138
+ });
139
+ ```
140
+
141
+ ### Vercel AI SDK Integration
142
+
143
+ Full support for Vercel AI SDK v3-v6:
144
+
145
+ ```typescript
146
+ import { wrapAISDK } from '@sentrial/sdk';
147
+ import * as ai from 'ai';
148
+ import { z } from 'zod';
149
+
150
+ const { generateText, streamText, generateObject, streamObject } = wrapAISDK(ai);
151
+
152
+ // With tools
153
+ const { text } = await generateText({
154
+ model: openai('gpt-4'),
155
+ prompt: "What's the weather?",
156
+ tools: {
157
+ getWeather: {
158
+ description: 'Get weather',
159
+ parameters: z.object({ location: z.string() }),
160
+ execute: async ({ location }) => ({ temp: 72 }), // Auto-tracked!
161
+ },
162
+ },
163
+ });
164
+ ```
165
+
103
166
  ## Configuration
104
167
 
105
168
  ### Environment Variables
@@ -109,100 +172,110 @@ SENTRIAL_API_URL=https://api.sentrial.com
109
172
  SENTRIAL_API_KEY=sentrial_live_xxx
110
173
  ```
111
174
 
112
- ### Explicit Configuration
175
+ ### Programmatic
113
176
 
114
177
  ```typescript
115
- const client = new SentrialClient({
178
+ import { configure } from '@sentrial/sdk';
179
+
180
+ configure({
116
181
  apiKey: 'sentrial_live_xxx',
117
182
  apiUrl: 'https://api.sentrial.com', // Optional
118
- failSilently: true, // Default: true (errors logged but don't crash)
119
183
  });
120
184
  ```
121
185
 
122
- ## Cost Calculation
186
+ ## Full API
187
+
188
+ For maximum control:
123
189
 
124
190
  ```typescript
125
- import { calculateOpenAICost, calculateAnthropicCost, calculateGoogleCost } from '@sentrial/sdk';
191
+ import { SentrialClient } from '@sentrial/sdk';
126
192
 
127
- const openaiCost = calculateOpenAICost({
128
- model: 'gpt-4o',
129
- inputTokens: 1000,
130
- outputTokens: 500,
131
- });
193
+ const client = new SentrialClient({ apiKey: '...' });
132
194
 
133
- const anthropicCost = calculateAnthropicCost({
134
- model: 'claude-3-5-sonnet',
135
- inputTokens: 1000,
136
- outputTokens: 500,
195
+ // Create session
196
+ const sessionId = await client.createSession({
197
+ name: 'Support Request',
198
+ agentName: 'support-agent',
199
+ userId: 'user_123',
137
200
  });
138
201
 
139
- const googleCost = calculateGoogleCost({
140
- model: 'gemini-2.0-flash',
141
- inputTokens: 1000,
142
- outputTokens: 500,
202
+ // Track tool calls
203
+ await client.trackToolCall({
204
+ sessionId,
205
+ toolName: 'search_kb',
206
+ toolInput: { query: 'password reset' },
207
+ toolOutput: { articles: ['KB-001'] },
208
+ reasoning: 'User asked about password reset',
143
209
  });
144
- ```
145
-
146
- ## Error Handling
147
-
148
- ```typescript
149
- import { SentrialClient, SentrialError } from '@sentrial/sdk';
150
210
 
151
- const client = new SentrialClient({
152
- apiKey: '...',
153
- failSilently: false, // Enable errors during development
211
+ // Track decisions
212
+ await client.trackDecision({
213
+ sessionId,
214
+ reasoning: 'Found relevant article, sharing with user',
215
+ alternatives: ['Escalate', 'Ask clarifying question'],
216
+ confidence: 0.9,
154
217
  });
155
218
 
156
- try {
157
- await client.createSession({...});
158
- } catch (error) {
159
- if (error instanceof SentrialError) {
160
- if (error.isAuthError()) {
161
- console.error('Invalid API key');
162
- } else if (error.isRateLimitError()) {
163
- console.error('Rate limited');
164
- }
165
- }
166
- throw error;
167
- }
219
+ // Complete session
220
+ await client.completeSession({
221
+ sessionId,
222
+ success: true,
223
+ customMetrics: { satisfaction: 4.5 },
224
+ promptTokens: 1500,
225
+ completionTokens: 500,
226
+ });
168
227
  ```
169
228
 
170
- ## API Reference
171
-
172
- ### SentrialClient
229
+ ## Cost Calculation
173
230
 
174
231
  ```typescript
175
- class SentrialClient {
176
- constructor(config?: SentrialClientConfig);
177
-
178
- createSession(params: CreateSessionParams): Promise<string | null>;
179
- trackToolCall(params: TrackToolCallParams): Promise<Event | null>;
180
- trackDecision(params: TrackDecisionParams): Promise<Event | null>;
181
- trackError(params: TrackErrorParams): Promise<Event | null>;
182
- completeSession(params: CompleteSessionParams): Promise<Session | null>;
183
- begin(params: BeginParams): Promise<Interaction>;
184
-
185
- // Static cost calculators
186
- static calculateOpenAICost(params: CostParams): number;
187
- static calculateAnthropicCost(params: CostParams): number;
188
- static calculateGoogleCost(params: CostParams): number;
189
- }
190
- ```
191
-
192
- ### Interaction
232
+ import { calculateOpenAICost, calculateAnthropicCost, calculateGoogleCost } from '@sentrial/sdk';
193
233
 
194
- ```typescript
195
- class Interaction {
196
- setOutput(output: string): void;
197
- finish(params?: FinishParams): Promise<Session | null>;
198
- trackToolCall(params: Omit<TrackToolCallParams, 'sessionId'>): Promise<Event | null>;
199
- trackDecision(params: Omit<TrackDecisionParams, 'sessionId'>): Promise<Event | null>;
200
- trackError(params: Omit<TrackErrorParams, 'sessionId'>): Promise<Event | null>;
201
- getSessionId(): string | null;
202
- isDegraded(): boolean;
203
- }
234
+ const cost = calculateOpenAICost({
235
+ model: 'gpt-4o',
236
+ inputTokens: 1000,
237
+ outputTokens: 500,
238
+ });
239
+ // Returns: 0.0075
204
240
  ```
205
241
 
242
+ ## What Gets Tracked
243
+
244
+ | Data | Auto-tracked | Manual |
245
+ |------|-------------|--------|
246
+ | LLM calls | ✔️ via wrappers | `trackToolCall()` |
247
+ | Token usage | ✔️ via wrappers | `promptTokens` param |
248
+ | Cost (USD) | ✔️ calculated | `estimatedCost` param |
249
+ | Latency | ✔️ always | - |
250
+ | Tool calls | ✔️ via `withTool` | `trackToolCall()` |
251
+ | Errors | ✔️ always | `trackError()` |
252
+ | User ID | ✔️ via session | `userId` param |
253
+ | Custom metrics | - | `customMetrics` param |
254
+
255
+ ## Framework Compatibility
256
+
257
+ | Framework | Integration | Status |
258
+ |-----------|-------------|--------|
259
+ | **Direct OpenAI** | `wrapOpenAI()` | ✔️ |
260
+ | **Direct Anthropic** | `wrapAnthropic()` | ✔️ |
261
+ | **Direct Gemini** | `wrapGoogle()` | ✔️ |
262
+ | **Vercel AI SDK** | `wrapAISDK()` | ✔️ |
263
+ | **Express/Fastify** | Decorators | ✔️ |
264
+ | **Next.js** | Decorators | ✔️ |
265
+ | **Custom agents** | Full API | ✔️ |
266
+
267
+ ## Documentation
268
+
269
+ See the `docs/` folder in the repository:
270
+ - `docs/sdks/typescript-sdk.md` - Full SDK reference
271
+ - `docs/integrations/vercel-ai-sdk.md` - Vercel AI SDK integration
272
+ - `docs/features/experiments.md` - Experiments guide
273
+
274
+ ## Support
275
+ - 💬 [Discord](https://discord.gg/9bMmJCXt)
276
+ - 📧 [Email](mailto:neel@sentrial.com)
277
+ - 🐛 [GitHub Issues](https://github.com/neelshar/Sentrial/issues)
278
+
206
279
  ## License
207
280
 
208
281
  MIT