@lelemondev/sdk 0.1.0 → 0.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,151 +1,173 @@
1
- # @lelemon/sdk
2
-
3
- Low-friction LLM observability. **3 lines of code.**
4
-
5
- ```typescript
6
- import { trace } from '@lelemon/sdk';
7
-
8
- const t = trace({ input: userMessage });
9
- try {
10
- // ... your agent code ...
11
- await t.success(messages);
12
- } catch (error) {
13
- await t.error(error, messages);
14
- }
15
- ```
16
-
17
- ## Installation
18
-
19
- ```bash
20
- npm install @lelemon/sdk
21
- # or
22
- yarn add @lelemon/sdk
23
- # or
24
- pnpm add @lelemon/sdk
25
- ```
26
-
27
- ## Setup
28
-
29
- Set your API key:
30
-
31
- ```bash
32
- export LELEMON_API_KEY=le_your_api_key
33
- ```
34
-
35
- Or configure programmatically:
36
-
37
- ```typescript
38
- import { init } from '@lelemon/sdk';
39
-
40
- init({ apiKey: 'le_xxx' });
41
- ```
42
-
43
- ## Quick Start
44
-
45
- ```typescript
46
- import { trace } from '@lelemon/sdk';
47
-
48
- async function runAgent(userMessage: string) {
49
- const t = trace({ input: userMessage });
50
-
51
- try {
52
- const messages = [
53
- { role: 'system', content: 'You are a helpful assistant.' },
54
- { role: 'user', content: userMessage },
55
- ];
56
-
57
- const response = await openai.chat.completions.create({
58
- model: 'gpt-4',
59
- messages,
60
- });
61
-
62
- // Optional: log response to capture token usage
63
- t.log(response);
64
-
65
- messages.push(response.choices[0].message);
66
-
67
- await t.success(messages);
68
- return response.choices[0].message.content;
69
- } catch (error) {
70
- await t.error(error, messages);
71
- throw error;
72
- }
73
- }
74
- ```
75
-
76
- ## Supported Providers
77
-
78
- | Provider | Message Format | Auto-detected |
79
- |----------|---------------|---------------|
80
- | **OpenAI** | `role: 'user' \| 'assistant'` | Yes |
81
- | **Anthropic** | `role: 'user' \| 'assistant'` | Yes |
82
- | **Gemini** | `role: 'user' \| 'model'` | Yes |
83
- | **AWS Bedrock** | Anthropic format | Yes |
84
-
85
- ## API Reference
86
-
87
- ### `trace(options)`
88
-
89
- Start a new trace.
90
-
91
- ```typescript
92
- const t = trace({
93
- input: userMessage, // Required
94
- name: 'my-agent', // Optional
95
- sessionId: 'session-123', // Optional
96
- userId: 'user-456', // Optional
97
- metadata: { ... }, // Optional
98
- tags: ['prod'], // Optional
99
- });
100
- ```
101
-
102
- ### `t.success(messages)`
103
-
104
- Complete trace successfully.
105
-
106
- ```typescript
107
- await t.success(messages);
108
- ```
109
-
110
- ### `t.error(error, messages?)`
111
-
112
- Complete trace with error.
113
-
114
- ```typescript
115
- await t.error(error, messages);
116
- ```
117
-
118
- ### `t.log(response)`
119
-
120
- Log an LLM response to capture tokens (optional).
121
-
122
- ```typescript
123
- t.log(response);
124
- ```
125
-
126
- ### `init(config)`
127
-
128
- Initialize SDK globally (optional).
129
-
130
- ```typescript
131
- init({
132
- apiKey: 'le_xxx',
133
- endpoint: 'https://custom.endpoint.com',
134
- debug: true,
135
- disabled: process.env.NODE_ENV === 'test',
136
- });
137
- ```
138
-
139
- ## What Gets Captured
140
-
141
- - System prompt
142
- - User input
143
- - Tool calls and results
144
- - Final output
145
- - Token usage
146
- - Duration
147
- - Errors with stack traces
148
-
149
- ## License
150
-
151
- MIT
1
+ # @lelemondev/sdk
2
+
3
+ [![npm version](https://img.shields.io/npm/v/@lelemondev/sdk.svg)](https://www.npmjs.com/package/@lelemondev/sdk)
4
+ [![CI](https://github.com/lelemondev/lelemondev-sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/lelemondev/lelemondev-sdk/actions/workflows/ci.yml)
5
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
+
7
+ Fire-and-forget LLM observability for Node.js. Track your AI agents with 3 lines of code.
8
+
9
+ ## Features
10
+
11
+ - 🔥 **Fire-and-forget** - Never blocks your code
12
+ - 📦 **Auto-batching** - Efficient network usage
13
+ - **Zero config** - Works out of the box
14
+ - 🛡️ **Error-safe** - Never crashes your app
15
+ - 🌐 **Serverless-ready** - Built-in flush for Lambda/Vercel
16
+
17
+ ## Installation
18
+
19
+ ```bash
20
+ npm install @lelemondev/sdk
21
+ ```
22
+
23
+ ## Quick Start
24
+
25
+ ```typescript
26
+ import { init, trace, flush } from '@lelemondev/sdk';
27
+
28
+ // Initialize once at app startup
29
+ init({ apiKey: process.env.LELEMON_API_KEY });
30
+
31
+ // Trace your agent (fire-and-forget, no awaits needed!)
32
+ const t = trace({ input: userMessage });
33
+
34
+ try {
35
+ const result = await myAgent(userMessage);
36
+ t.success(result.messages); // Sync, doesn't block
37
+ } catch (error) {
38
+ t.error(error); // Sync, doesn't block
39
+ throw error;
40
+ }
41
+
42
+ // For serverless: flush before response
43
+ await flush();
44
+ ```
45
+
46
+ ## API Reference
47
+
48
+ ### `init(config)`
49
+
50
+ Initialize the SDK. Call once at app startup.
51
+
52
+ ```typescript
53
+ init({
54
+ apiKey: 'le_xxx', // Required (or set LELEMON_API_KEY env var)
55
+ endpoint: 'https://...', // Optional, custom endpoint
56
+ debug: false, // Optional, enable debug logs
57
+ batchSize: 10, // Optional, items per batch
58
+ flushIntervalMs: 1000, // Optional, auto-flush interval
59
+ });
60
+ ```
61
+
62
+ ### `trace(options)`
63
+
64
+ Start a new trace. Returns a `Trace` object.
65
+
66
+ ```typescript
67
+ const t = trace({
68
+ input: userMessage, // Required, the input to your agent
69
+ sessionId: 'session-123', // Optional, group related traces
70
+ userId: 'user-456', // Optional, identify the user
71
+ name: 'chat-agent', // Optional, name for this trace
72
+ metadata: { ... }, // Optional, custom metadata
73
+ tags: ['prod', 'v2'], // Optional, tags for filtering
74
+ });
75
+ ```
76
+
77
+ ### `Trace.success(messages)`
78
+
79
+ Complete the trace successfully. Fire-and-forget (no await needed).
80
+
81
+ ```typescript
82
+ t.success(result.messages);
83
+ ```
84
+
85
+ ### `Trace.error(error, messages?)`
86
+
87
+ Complete the trace with an error. Fire-and-forget (no await needed).
88
+
89
+ ```typescript
90
+ t.error(error);
91
+ t.error(error, partialMessages); // Include messages up to failure
92
+ ```
93
+
94
+ ### `Trace.log(response)`
95
+
96
+ Log an LLM response for token tracking (optional).
97
+
98
+ ```typescript
99
+ const response = await openai.chat.completions.create(...);
100
+ t.log(response); // Extracts model, tokens automatically
101
+ ```
102
+
103
+ ### `flush()`
104
+
105
+ Wait for all pending traces to be sent. Use in serverless environments.
106
+
107
+ ```typescript
108
+ await flush();
109
+ ```
110
+
111
+ ## Serverless Usage
112
+
113
+ ### Vercel (Next.js)
114
+
115
+ ```typescript
116
+ import { waitUntil } from '@vercel/functions';
117
+ import { trace, flush } from '@lelemondev/sdk';
118
+
119
+ export async function POST(req: Request) {
120
+ const t = trace({ input: message });
121
+
122
+ try {
123
+ const result = await myAgent(message);
124
+ t.success(result);
125
+ return Response.json(result);
126
+ } catch (error) {
127
+ t.error(error);
128
+ throw error;
129
+ } finally {
130
+ waitUntil(flush()); // Flush after response
131
+ }
132
+ }
133
+ ```
134
+
135
+ ### AWS Lambda
136
+
137
+ ```typescript
138
+ import { trace, flush } from '@lelemondev/sdk';
139
+
140
+ export const handler = async (event) => {
141
+ const t = trace({ input: event.body });
142
+
143
+ try {
144
+ const result = await myAgent(event.body);
145
+ t.success(result);
146
+ return { statusCode: 200, body: JSON.stringify(result) };
147
+ } catch (error) {
148
+ t.error(error);
149
+ throw error;
150
+ } finally {
151
+ await flush(); // Always flush before Lambda ends
152
+ }
153
+ };
154
+ ```
155
+
156
+ ## Supported Providers
157
+
158
+ | Provider | Auto-detected |
159
+ |----------|---------------|
160
+ | OpenAI | ✅ |
161
+ | Anthropic | ✅ |
162
+ | Google Gemini | ✅ |
163
+ | AWS Bedrock | ✅ |
164
+
165
+ ## Environment Variables
166
+
167
+ | Variable | Description |
168
+ |----------|-------------|
169
+ | `LELEMON_API_KEY` | Your Lelemon API key (starts with `le_`) |
170
+
171
+ ## License
172
+
173
+ MIT © [Lelemon](https://lelemon.dev)
package/dist/index.d.mts CHANGED
@@ -20,6 +20,18 @@ interface LelemonConfig {
20
20
  * Disable tracing (useful for testing)
21
21
  */
22
22
  disabled?: boolean;
23
+ /**
24
+ * Number of items to batch before sending (default: 10)
25
+ */
26
+ batchSize?: number;
27
+ /**
28
+ * Interval in ms to flush pending items (default: 1000)
29
+ */
30
+ flushIntervalMs?: number;
31
+ /**
32
+ * Request timeout in ms (default: 10000)
33
+ */
34
+ requestTimeoutMs?: number;
23
35
  }
24
36
  interface TraceOptions {
25
37
  /**
@@ -125,7 +137,14 @@ interface CompleteTraceRequest {
125
137
  }
126
138
 
127
139
  /**
128
- * Transport layer for sending data to Lelemon API
140
+ * Transport layer with queue-based batching
141
+ *
142
+ * Features:
143
+ * - Fire-and-forget API (sync enqueue)
144
+ * - Automatic batching (by size or interval)
145
+ * - Single flush promise (no duplicate requests)
146
+ * - Graceful error handling (never crashes caller)
147
+ * - Request timeout protection
129
148
  */
130
149
 
131
150
  interface TransportConfig {
@@ -133,97 +152,146 @@ interface TransportConfig {
133
152
  endpoint: string;
134
153
  debug: boolean;
135
154
  disabled: boolean;
155
+ batchSize?: number;
156
+ flushIntervalMs?: number;
157
+ requestTimeoutMs?: number;
136
158
  }
137
159
  declare class Transport {
138
- private config;
160
+ private readonly config;
161
+ private queue;
162
+ private flushPromise;
163
+ private flushTimer;
164
+ private pendingResolvers;
165
+ private idCounter;
139
166
  constructor(config: TransportConfig);
140
167
  /**
141
168
  * Check if transport is enabled
142
169
  */
143
170
  isEnabled(): boolean;
144
171
  /**
145
- * Create a new trace
172
+ * Enqueue trace creation (returns promise that resolves to trace ID)
173
+ */
174
+ enqueueCreate(data: CreateTraceRequest): Promise<string | null>;
175
+ /**
176
+ * Enqueue trace completion (fire-and-forget)
146
177
  */
147
- createTrace(data: CreateTraceRequest): Promise<{
148
- id: string;
149
- }>;
178
+ enqueueComplete(traceId: string, data: CompleteTraceRequest): void;
150
179
  /**
151
- * Complete a trace (success or error)
180
+ * Flush all pending items
181
+ * Safe to call multiple times (deduplicates)
152
182
  */
153
- completeTrace(traceId: string, data: CompleteTraceRequest): Promise<void>;
183
+ flush(): Promise<void>;
154
184
  /**
155
- * Make HTTP request to API
185
+ * Get pending item count (for testing/debugging)
156
186
  */
187
+ getPendingCount(): number;
188
+ private generateTempId;
189
+ private enqueue;
190
+ private scheduleFlush;
191
+ private cancelScheduledFlush;
192
+ private sendBatch;
157
193
  private request;
194
+ private log;
158
195
  }
159
196
 
160
197
  /**
161
- * Lelemon Tracer - Simple, low-friction API
198
+ * Lelemon Tracer - Fire-and-forget LLM observability
162
199
  *
163
200
  * Usage:
164
201
  * const t = trace({ input: userMessage });
165
202
  * try {
166
- * // your agent code
167
- * t.success(messages);
203
+ * const result = await myAgent(userMessage);
204
+ * t.success(result.messages);
168
205
  * } catch (error) {
169
- * t.error(error, messages);
206
+ * t.error(error);
207
+ * throw error;
170
208
  * }
209
+ *
210
+ * For serverless:
211
+ * await flush(); // Before response
171
212
  */
172
213
 
173
214
  /**
174
- * Initialize the SDK globally (optional)
175
- * If not called, trace() will auto-initialize with env vars
215
+ * Initialize the SDK (optional, will auto-init with env vars)
216
+ *
217
+ * @example
218
+ * init({ apiKey: 'le_xxx' });
219
+ * init({ apiKey: 'le_xxx', debug: true });
176
220
  */
177
221
  declare function init(config?: LelemonConfig): void;
178
222
  /**
179
- * Active trace handle returned by trace()
223
+ * Start a new trace
224
+ *
225
+ * @example
226
+ * const t = trace({ input: userMessage });
227
+ * try {
228
+ * const result = await myAgent(userMessage);
229
+ * t.success(result.messages);
230
+ * } catch (error) {
231
+ * t.error(error);
232
+ * throw error;
233
+ * }
234
+ */
235
+ declare function trace(options: TraceOptions): Trace;
236
+ /**
237
+ * Flush all pending traces to the server
238
+ * Call this before process exit in serverless environments
239
+ *
240
+ * @example
241
+ * // In Next.js API route
242
+ * export async function POST(req: Request) {
243
+ * // ... your code with traces ...
244
+ * await flush();
245
+ * return Response.json(result);
246
+ * }
247
+ *
248
+ * // With Vercel waitUntil
249
+ * import { waitUntil } from '@vercel/functions';
250
+ * waitUntil(flush());
180
251
  */
252
+ declare function flush(): Promise<void>;
253
+ /**
254
+ * Check if SDK is enabled
255
+ */
256
+ declare function isEnabled(): boolean;
181
257
  declare class Trace {
182
258
  private id;
183
- private transport;
184
- private options;
185
- private startTime;
259
+ private idPromise;
260
+ private readonly transport;
261
+ private readonly startTime;
262
+ private readonly debug;
263
+ private readonly disabled;
186
264
  private completed;
187
- private debug;
188
- private disabled;
189
265
  private llmCalls;
190
266
  constructor(options: TraceOptions, transport: Transport, debug: boolean, disabled: boolean);
191
267
  /**
192
- * Initialize trace on server (called internally)
268
+ * Log an LLM response for token tracking
269
+ * Optional - use if you want per-call token counts
193
270
  */
194
- init(): Promise<void>;
271
+ log(response: unknown): this;
195
272
  /**
196
- * Log an LLM response (optional - for tracking individual calls)
197
- * Use this if you want to track tokens per call, not just at the end
273
+ * Complete trace successfully (fire-and-forget)
274
+ *
275
+ * @param messages - Full message history (OpenAI/Anthropic format)
198
276
  */
199
- log(response: unknown): void;
277
+ success(messages: unknown): void;
200
278
  /**
201
- * Complete trace successfully
202
- * @param messages - The full message history (OpenAI/Anthropic format)
279
+ * Complete trace with error (fire-and-forget)
280
+ *
281
+ * @param error - The error that occurred
282
+ * @param messages - Optional message history up to failure
203
283
  */
204
- success(messages: unknown): Promise<void>;
284
+ error(error: Error | unknown, messages?: unknown): void;
205
285
  /**
206
- * Complete trace with error
207
- * @param error - The error that occurred
208
- * @param messages - The message history up to the failure (optional)
286
+ * Get the trace ID (may be null if not yet created or failed)
287
+ */
288
+ getId(): string | null;
289
+ /**
290
+ * Wait for trace ID to be available
209
291
  */
210
- error(error: Error | unknown, messages?: unknown): Promise<void>;
292
+ waitForId(): Promise<string | null>;
293
+ private aggregateCalls;
211
294
  }
212
- /**
213
- * Start a new trace
214
- *
215
- * @example
216
- * const t = trace({ input: userMessage });
217
- * try {
218
- * const messages = [...];
219
- * // ... your agent code ...
220
- * await t.success(messages);
221
- * } catch (error) {
222
- * await t.error(error, messages);
223
- * throw error;
224
- * }
225
- */
226
- declare function trace(options: TraceOptions): Trace;
227
295
 
228
296
  /**
229
297
  * Message Parser
@@ -245,4 +313,4 @@ declare function parseResponse(response: unknown): Partial<ParsedLLMCall>;
245
313
  */
246
314
  declare function parseBedrockResponse(body: unknown): Partial<ParsedLLMCall>;
247
315
 
248
- export { type AnthropicMessage, type LelemonConfig, type Message, type OpenAIMessage, type ParsedLLMCall, type ParsedToolCall, type ParsedTrace, Trace, type TraceOptions, init, parseBedrockResponse, parseMessages, parseResponse, trace };
316
+ export { type AnthropicMessage, type LelemonConfig, type Message, type OpenAIMessage, type ParsedLLMCall, type ParsedToolCall, type ParsedTrace, Trace, type TraceOptions, flush, init, isEnabled, parseBedrockResponse, parseMessages, parseResponse, trace };