@lelemondev/sdk 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +124 -73
- package/dist/index.d.mts +50 -278
- package/dist/index.d.ts +50 -278
- package/dist/index.js +724 -524
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +723 -520
- package/dist/index.mjs.map +1 -1
- package/package.json +2 -2
package/dist/index.d.ts
CHANGED
|
@@ -1,316 +1,88 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Lelemon SDK
|
|
3
|
-
* Minimal, low-friction API for LLM observability
|
|
2
|
+
* Core types for Lelemon SDK
|
|
4
3
|
*/
|
|
5
4
|
interface LelemonConfig {
|
|
6
|
-
/**
|
|
7
|
-
* API key for authentication (starts with 'le_')
|
|
8
|
-
* Can also be set via LELEMON_API_KEY env var
|
|
9
|
-
*/
|
|
5
|
+
/** API key (or set LELEMON_API_KEY env var) */
|
|
10
6
|
apiKey?: string;
|
|
11
|
-
/**
|
|
12
|
-
* API endpoint (default: https://api.lelemon.dev)
|
|
13
|
-
*/
|
|
7
|
+
/** API endpoint (default: https://api.lelemon.dev) */
|
|
14
8
|
endpoint?: string;
|
|
15
|
-
/**
|
|
16
|
-
* Enable debug logging
|
|
17
|
-
*/
|
|
9
|
+
/** Enable debug logging */
|
|
18
10
|
debug?: boolean;
|
|
19
|
-
/**
|
|
20
|
-
* Disable tracing (useful for testing)
|
|
21
|
-
*/
|
|
11
|
+
/** Disable tracing */
|
|
22
12
|
disabled?: boolean;
|
|
23
|
-
/**
|
|
24
|
-
* Number of items to batch before sending (default: 10)
|
|
25
|
-
*/
|
|
13
|
+
/** Batch size before flush (default: 10) */
|
|
26
14
|
batchSize?: number;
|
|
27
|
-
/**
|
|
28
|
-
* Interval in ms to flush pending items (default: 1000)
|
|
29
|
-
*/
|
|
15
|
+
/** Auto-flush interval in ms (default: 1000) */
|
|
30
16
|
flushIntervalMs?: number;
|
|
31
|
-
/**
|
|
32
|
-
* Request timeout in ms (default: 10000)
|
|
33
|
-
*/
|
|
17
|
+
/** Request timeout in ms (default: 10000) */
|
|
34
18
|
requestTimeoutMs?: number;
|
|
35
19
|
}
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
*/
|
|
40
|
-
input: unknown;
|
|
41
|
-
/**
|
|
42
|
-
* Session ID to group related traces
|
|
43
|
-
*/
|
|
20
|
+
type ProviderName = 'openai' | 'anthropic' | 'google' | 'bedrock' | 'unknown';
|
|
21
|
+
interface ObserveOptions {
|
|
22
|
+
/** Session ID to group related calls */
|
|
44
23
|
sessionId?: string;
|
|
45
|
-
/**
|
|
46
|
-
* User ID for the end user
|
|
47
|
-
*/
|
|
24
|
+
/** User ID for the end user */
|
|
48
25
|
userId?: string;
|
|
49
|
-
/**
|
|
50
|
-
* Custom metadata
|
|
51
|
-
*/
|
|
26
|
+
/** Custom metadata added to all traces */
|
|
52
27
|
metadata?: Record<string, unknown>;
|
|
53
|
-
/**
|
|
54
|
-
* Tags for filtering
|
|
55
|
-
*/
|
|
28
|
+
/** Tags for filtering */
|
|
56
29
|
tags?: string[];
|
|
57
|
-
/**
|
|
58
|
-
* Name for this trace (e.g., 'chat-agent', 'summarizer')
|
|
59
|
-
*/
|
|
60
|
-
name?: string;
|
|
61
|
-
}
|
|
62
|
-
interface OpenAIMessage {
|
|
63
|
-
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
64
|
-
content: string | null;
|
|
65
|
-
tool_calls?: OpenAIToolCall[];
|
|
66
|
-
tool_call_id?: string;
|
|
67
|
-
}
|
|
68
|
-
interface OpenAIToolCall {
|
|
69
|
-
id: string;
|
|
70
|
-
type: 'function';
|
|
71
|
-
function: {
|
|
72
|
-
name: string;
|
|
73
|
-
arguments: string;
|
|
74
|
-
};
|
|
75
|
-
}
|
|
76
|
-
interface AnthropicMessage {
|
|
77
|
-
role: 'user' | 'assistant';
|
|
78
|
-
content: string | AnthropicContent[];
|
|
79
|
-
}
|
|
80
|
-
interface AnthropicContent {
|
|
81
|
-
type: 'text' | 'tool_use' | 'tool_result';
|
|
82
|
-
text?: string;
|
|
83
|
-
id?: string;
|
|
84
|
-
name?: string;
|
|
85
|
-
input?: unknown;
|
|
86
|
-
tool_use_id?: string;
|
|
87
|
-
content?: string;
|
|
88
|
-
}
|
|
89
|
-
type Message = OpenAIMessage | AnthropicMessage | Record<string, unknown>;
|
|
90
|
-
interface ParsedTrace {
|
|
91
|
-
systemPrompt?: string;
|
|
92
|
-
userInput?: string;
|
|
93
|
-
output?: string;
|
|
94
|
-
llmCalls: ParsedLLMCall[];
|
|
95
|
-
toolCalls: ParsedToolCall[];
|
|
96
|
-
totalInputTokens: number;
|
|
97
|
-
totalOutputTokens: number;
|
|
98
|
-
models: string[];
|
|
99
|
-
provider?: 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'unknown';
|
|
100
|
-
}
|
|
101
|
-
interface ParsedLLMCall {
|
|
102
|
-
model?: string;
|
|
103
|
-
provider?: string;
|
|
104
|
-
inputTokens?: number;
|
|
105
|
-
outputTokens?: number;
|
|
106
|
-
input?: unknown;
|
|
107
|
-
output?: unknown;
|
|
108
|
-
toolCalls?: ParsedToolCall[];
|
|
109
|
-
}
|
|
110
|
-
interface ParsedToolCall {
|
|
111
|
-
name: string;
|
|
112
|
-
input: unknown;
|
|
113
|
-
output?: unknown;
|
|
114
|
-
}
|
|
115
|
-
interface CreateTraceRequest {
|
|
116
|
-
name?: string;
|
|
117
|
-
sessionId?: string;
|
|
118
|
-
userId?: string;
|
|
119
|
-
input?: unknown;
|
|
120
|
-
metadata?: Record<string, unknown>;
|
|
121
|
-
tags?: string[];
|
|
122
|
-
}
|
|
123
|
-
interface CompleteTraceRequest {
|
|
124
|
-
status: 'completed' | 'error';
|
|
125
|
-
output?: unknown;
|
|
126
|
-
errorMessage?: string;
|
|
127
|
-
errorStack?: string;
|
|
128
|
-
systemPrompt?: string;
|
|
129
|
-
llmCalls?: ParsedLLMCall[];
|
|
130
|
-
toolCalls?: ParsedToolCall[];
|
|
131
|
-
models?: string[];
|
|
132
|
-
totalInputTokens?: number;
|
|
133
|
-
totalOutputTokens?: number;
|
|
134
|
-
totalCostUsd?: number;
|
|
135
|
-
durationMs?: number;
|
|
136
|
-
metadata?: Record<string, unknown>;
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
/**
|
|
140
|
-
* Transport layer with queue-based batching
|
|
141
|
-
*
|
|
142
|
-
* Features:
|
|
143
|
-
* - Fire-and-forget API (sync enqueue)
|
|
144
|
-
* - Automatic batching (by size or interval)
|
|
145
|
-
* - Single flush promise (no duplicate requests)
|
|
146
|
-
* - Graceful error handling (never crashes caller)
|
|
147
|
-
* - Request timeout protection
|
|
148
|
-
*/
|
|
149
|
-
|
|
150
|
-
interface TransportConfig {
|
|
151
|
-
apiKey: string;
|
|
152
|
-
endpoint: string;
|
|
153
|
-
debug: boolean;
|
|
154
|
-
disabled: boolean;
|
|
155
|
-
batchSize?: number;
|
|
156
|
-
flushIntervalMs?: number;
|
|
157
|
-
requestTimeoutMs?: number;
|
|
158
|
-
}
|
|
159
|
-
declare class Transport {
|
|
160
|
-
private readonly config;
|
|
161
|
-
private queue;
|
|
162
|
-
private flushPromise;
|
|
163
|
-
private flushTimer;
|
|
164
|
-
private pendingResolvers;
|
|
165
|
-
private idCounter;
|
|
166
|
-
constructor(config: TransportConfig);
|
|
167
|
-
/**
|
|
168
|
-
* Check if transport is enabled
|
|
169
|
-
*/
|
|
170
|
-
isEnabled(): boolean;
|
|
171
|
-
/**
|
|
172
|
-
* Enqueue trace creation (returns promise that resolves to trace ID)
|
|
173
|
-
*/
|
|
174
|
-
enqueueCreate(data: CreateTraceRequest): Promise<string | null>;
|
|
175
|
-
/**
|
|
176
|
-
* Enqueue trace completion (fire-and-forget)
|
|
177
|
-
*/
|
|
178
|
-
enqueueComplete(traceId: string, data: CompleteTraceRequest): void;
|
|
179
|
-
/**
|
|
180
|
-
* Flush all pending items
|
|
181
|
-
* Safe to call multiple times (deduplicates)
|
|
182
|
-
*/
|
|
183
|
-
flush(): Promise<void>;
|
|
184
|
-
/**
|
|
185
|
-
* Get pending item count (for testing/debugging)
|
|
186
|
-
*/
|
|
187
|
-
getPendingCount(): number;
|
|
188
|
-
private generateTempId;
|
|
189
|
-
private enqueue;
|
|
190
|
-
private scheduleFlush;
|
|
191
|
-
private cancelScheduledFlush;
|
|
192
|
-
private sendBatch;
|
|
193
|
-
private request;
|
|
194
|
-
private log;
|
|
195
30
|
}
|
|
196
31
|
|
|
197
32
|
/**
|
|
198
|
-
*
|
|
199
|
-
*
|
|
200
|
-
* Usage:
|
|
201
|
-
* const t = trace({ input: userMessage });
|
|
202
|
-
* try {
|
|
203
|
-
* const result = await myAgent(userMessage);
|
|
204
|
-
* t.success(result.messages);
|
|
205
|
-
* } catch (error) {
|
|
206
|
-
* t.error(error);
|
|
207
|
-
* throw error;
|
|
208
|
-
* }
|
|
33
|
+
* Global Configuration
|
|
209
34
|
*
|
|
210
|
-
*
|
|
211
|
-
* await flush(); // Before response
|
|
35
|
+
* Manages SDK configuration and transport instance.
|
|
212
36
|
*/
|
|
213
37
|
|
|
214
38
|
/**
|
|
215
|
-
* Initialize the SDK
|
|
216
|
-
*
|
|
217
|
-
* @example
|
|
218
|
-
* init({ apiKey: 'le_xxx' });
|
|
219
|
-
* init({ apiKey: 'le_xxx', debug: true });
|
|
39
|
+
* Initialize the SDK
|
|
40
|
+
* Call once at app startup
|
|
220
41
|
*/
|
|
221
42
|
declare function init(config?: LelemonConfig): void;
|
|
222
|
-
/**
|
|
223
|
-
* Start a new trace
|
|
224
|
-
*
|
|
225
|
-
* @example
|
|
226
|
-
* const t = trace({ input: userMessage });
|
|
227
|
-
* try {
|
|
228
|
-
* const result = await myAgent(userMessage);
|
|
229
|
-
* t.success(result.messages);
|
|
230
|
-
* } catch (error) {
|
|
231
|
-
* t.error(error);
|
|
232
|
-
* throw error;
|
|
233
|
-
* }
|
|
234
|
-
*/
|
|
235
|
-
declare function trace(options: TraceOptions): Trace;
|
|
236
|
-
/**
|
|
237
|
-
* Flush all pending traces to the server
|
|
238
|
-
* Call this before process exit in serverless environments
|
|
239
|
-
*
|
|
240
|
-
* @example
|
|
241
|
-
* // In Next.js API route
|
|
242
|
-
* export async function POST(req: Request) {
|
|
243
|
-
* // ... your code with traces ...
|
|
244
|
-
* await flush();
|
|
245
|
-
* return Response.json(result);
|
|
246
|
-
* }
|
|
247
|
-
*
|
|
248
|
-
* // With Vercel waitUntil
|
|
249
|
-
* import { waitUntil } from '@vercel/functions';
|
|
250
|
-
* waitUntil(flush());
|
|
251
|
-
*/
|
|
252
|
-
declare function flush(): Promise<void>;
|
|
253
43
|
/**
|
|
254
44
|
* Check if SDK is enabled
|
|
255
45
|
*/
|
|
256
46
|
declare function isEnabled(): boolean;
|
|
257
|
-
declare class Trace {
|
|
258
|
-
private id;
|
|
259
|
-
private idPromise;
|
|
260
|
-
private readonly transport;
|
|
261
|
-
private readonly startTime;
|
|
262
|
-
private readonly debug;
|
|
263
|
-
private readonly disabled;
|
|
264
|
-
private completed;
|
|
265
|
-
private llmCalls;
|
|
266
|
-
constructor(options: TraceOptions, transport: Transport, debug: boolean, disabled: boolean);
|
|
267
|
-
/**
|
|
268
|
-
* Log an LLM response for token tracking
|
|
269
|
-
* Optional - use if you want per-call token counts
|
|
270
|
-
*/
|
|
271
|
-
log(response: unknown): this;
|
|
272
|
-
/**
|
|
273
|
-
* Complete trace successfully (fire-and-forget)
|
|
274
|
-
*
|
|
275
|
-
* @param messages - Full message history (OpenAI/Anthropic format)
|
|
276
|
-
*/
|
|
277
|
-
success(messages: unknown): void;
|
|
278
|
-
/**
|
|
279
|
-
* Complete trace with error (fire-and-forget)
|
|
280
|
-
*
|
|
281
|
-
* @param error - The error that occurred
|
|
282
|
-
* @param messages - Optional message history up to failure
|
|
283
|
-
*/
|
|
284
|
-
error(error: Error | unknown, messages?: unknown): void;
|
|
285
|
-
/**
|
|
286
|
-
* Get the trace ID (may be null if not yet created or failed)
|
|
287
|
-
*/
|
|
288
|
-
getId(): string | null;
|
|
289
|
-
/**
|
|
290
|
-
* Wait for trace ID to be available
|
|
291
|
-
*/
|
|
292
|
-
waitForId(): Promise<string | null>;
|
|
293
|
-
private aggregateCalls;
|
|
294
|
-
}
|
|
295
|
-
|
|
296
47
|
/**
|
|
297
|
-
*
|
|
298
|
-
* Auto-detects OpenAI/Anthropic/Gemini message formats and extracts relevant data
|
|
48
|
+
* Flush all pending traces
|
|
299
49
|
*/
|
|
50
|
+
declare function flush(): Promise<void>;
|
|
300
51
|
|
|
301
52
|
/**
|
|
302
|
-
*
|
|
53
|
+
* Observe Function
|
|
54
|
+
*
|
|
55
|
+
* Main entry point for wrapping LLM clients with automatic tracing.
|
|
303
56
|
*/
|
|
304
|
-
|
|
57
|
+
|
|
305
58
|
/**
|
|
306
|
-
*
|
|
307
|
-
*
|
|
59
|
+
* Wrap an LLM client with automatic tracing
|
|
60
|
+
*
|
|
61
|
+
* @param client - OpenAI or Anthropic client instance
|
|
62
|
+
* @param options - Optional context (sessionId, userId, etc.)
|
|
63
|
+
* @returns The wrapped client with the same type
|
|
64
|
+
*
|
|
65
|
+
* @example
|
|
66
|
+
* import { observe } from '@lelemondev/sdk';
|
|
67
|
+
* import OpenAI from 'openai';
|
|
68
|
+
*
|
|
69
|
+
* const openai = observe(new OpenAI());
|
|
70
|
+
*
|
|
71
|
+
* // All calls are now automatically traced
|
|
72
|
+
* const response = await openai.chat.completions.create({...});
|
|
308
73
|
*/
|
|
309
|
-
declare function
|
|
74
|
+
declare function observe<T>(client: T, options?: ObserveOptions): T;
|
|
310
75
|
/**
|
|
311
|
-
*
|
|
312
|
-
*
|
|
76
|
+
* Create a scoped observe function with preset context
|
|
77
|
+
*
|
|
78
|
+
* @example
|
|
79
|
+
* const observeWithSession = createObserve({
|
|
80
|
+
* sessionId: 'session-123',
|
|
81
|
+
* userId: 'user-456',
|
|
82
|
+
* });
|
|
83
|
+
*
|
|
84
|
+
* const openai = observeWithSession(new OpenAI());
|
|
313
85
|
*/
|
|
314
|
-
declare function
|
|
86
|
+
declare function createObserve(defaultOptions: ObserveOptions): <T>(client: T, options?: ObserveOptions) => T;
|
|
315
87
|
|
|
316
|
-
export { type
|
|
88
|
+
export { type LelemonConfig, type ObserveOptions, type ProviderName, createObserve, flush, init, isEnabled, observe };
|