@lelemondev/sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +151 -0
- package/dist/index.d.mts +248 -0
- package/dist/index.d.ts +248 -0
- package/dist/index.js +551 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +544 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +72 -0
package/README.md
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
# @lelemon/sdk
|
|
2
|
+
|
|
3
|
+
Low-friction LLM observability. **3 lines of code.**
|
|
4
|
+
|
|
5
|
+
```typescript
|
|
6
|
+
import { trace } from '@lelemon/sdk';
|
|
7
|
+
|
|
8
|
+
const t = trace({ input: userMessage });
|
|
9
|
+
try {
|
|
10
|
+
// ... your agent code ...
|
|
11
|
+
await t.success(messages);
|
|
12
|
+
} catch (error) {
|
|
13
|
+
await t.error(error, messages);
|
|
14
|
+
}
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
## Installation
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
npm install @lelemon/sdk
|
|
21
|
+
# or
|
|
22
|
+
yarn add @lelemon/sdk
|
|
23
|
+
# or
|
|
24
|
+
pnpm add @lelemon/sdk
|
|
25
|
+
```
|
|
26
|
+
|
|
27
|
+
## Setup
|
|
28
|
+
|
|
29
|
+
Set your API key:
|
|
30
|
+
|
|
31
|
+
```bash
|
|
32
|
+
export LELEMON_API_KEY=le_your_api_key
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
Or configure programmatically:
|
|
36
|
+
|
|
37
|
+
```typescript
|
|
38
|
+
import { init } from '@lelemon/sdk';
|
|
39
|
+
|
|
40
|
+
init({ apiKey: 'le_xxx' });
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
## Quick Start
|
|
44
|
+
|
|
45
|
+
```typescript
|
|
46
|
+
import { trace } from '@lelemon/sdk';
|
|
47
|
+
|
|
48
|
+
async function runAgent(userMessage: string) {
|
|
49
|
+
const t = trace({ input: userMessage });
|
|
50
|
+
|
|
51
|
+
try {
|
|
52
|
+
const messages = [
|
|
53
|
+
{ role: 'system', content: 'You are a helpful assistant.' },
|
|
54
|
+
{ role: 'user', content: userMessage },
|
|
55
|
+
];
|
|
56
|
+
|
|
57
|
+
const response = await openai.chat.completions.create({
|
|
58
|
+
model: 'gpt-4',
|
|
59
|
+
messages,
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
// Optional: log response to capture token usage
|
|
63
|
+
t.log(response);
|
|
64
|
+
|
|
65
|
+
messages.push(response.choices[0].message);
|
|
66
|
+
|
|
67
|
+
await t.success(messages);
|
|
68
|
+
return response.choices[0].message.content;
|
|
69
|
+
} catch (error) {
|
|
70
|
+
await t.error(error, messages);
|
|
71
|
+
throw error;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
## Supported Providers
|
|
77
|
+
|
|
78
|
+
| Provider | Message Format | Auto-detected |
|
|
79
|
+
|----------|---------------|---------------|
|
|
80
|
+
| **OpenAI** | `role: 'user' \| 'assistant'` | Yes |
|
|
81
|
+
| **Anthropic** | `role: 'user' \| 'assistant'` | Yes |
|
|
82
|
+
| **Gemini** | `role: 'user' \| 'model'` | Yes |
|
|
83
|
+
| **AWS Bedrock** | Anthropic format | Yes |
|
|
84
|
+
|
|
85
|
+
## API Reference
|
|
86
|
+
|
|
87
|
+
### `trace(options)`
|
|
88
|
+
|
|
89
|
+
Start a new trace.
|
|
90
|
+
|
|
91
|
+
```typescript
|
|
92
|
+
const t = trace({
|
|
93
|
+
input: userMessage, // Required
|
|
94
|
+
name: 'my-agent', // Optional
|
|
95
|
+
sessionId: 'session-123', // Optional
|
|
96
|
+
userId: 'user-456', // Optional
|
|
97
|
+
metadata: { ... }, // Optional
|
|
98
|
+
tags: ['prod'], // Optional
|
|
99
|
+
});
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### `t.success(messages)`
|
|
103
|
+
|
|
104
|
+
Complete trace successfully.
|
|
105
|
+
|
|
106
|
+
```typescript
|
|
107
|
+
await t.success(messages);
|
|
108
|
+
```
|
|
109
|
+
|
|
110
|
+
### `t.error(error, messages?)`
|
|
111
|
+
|
|
112
|
+
Complete trace with error.
|
|
113
|
+
|
|
114
|
+
```typescript
|
|
115
|
+
await t.error(error, messages);
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
### `t.log(response)`
|
|
119
|
+
|
|
120
|
+
Log an LLM response to capture tokens (optional).
|
|
121
|
+
|
|
122
|
+
```typescript
|
|
123
|
+
t.log(response);
|
|
124
|
+
```
|
|
125
|
+
|
|
126
|
+
### `init(config)`
|
|
127
|
+
|
|
128
|
+
Initialize SDK globally (optional).
|
|
129
|
+
|
|
130
|
+
```typescript
|
|
131
|
+
init({
|
|
132
|
+
apiKey: 'le_xxx',
|
|
133
|
+
endpoint: 'https://custom.endpoint.com',
|
|
134
|
+
debug: true,
|
|
135
|
+
disabled: process.env.NODE_ENV === 'test',
|
|
136
|
+
});
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
## What Gets Captured
|
|
140
|
+
|
|
141
|
+
- System prompt
|
|
142
|
+
- User input
|
|
143
|
+
- Tool calls and results
|
|
144
|
+
- Final output
|
|
145
|
+
- Token usage
|
|
146
|
+
- Duration
|
|
147
|
+
- Errors with stack traces
|
|
148
|
+
|
|
149
|
+
## License
|
|
150
|
+
|
|
151
|
+
MIT
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lelemon SDK Types
|
|
3
|
+
* Minimal, low-friction API for LLM observability
|
|
4
|
+
*/
|
|
5
|
+
interface LelemonConfig {
|
|
6
|
+
/**
|
|
7
|
+
* API key for authentication (starts with 'le_')
|
|
8
|
+
* Can also be set via LELEMON_API_KEY env var
|
|
9
|
+
*/
|
|
10
|
+
apiKey?: string;
|
|
11
|
+
/**
|
|
12
|
+
* API endpoint (default: https://api.lelemon.dev)
|
|
13
|
+
*/
|
|
14
|
+
endpoint?: string;
|
|
15
|
+
/**
|
|
16
|
+
* Enable debug logging
|
|
17
|
+
*/
|
|
18
|
+
debug?: boolean;
|
|
19
|
+
/**
|
|
20
|
+
* Disable tracing (useful for testing)
|
|
21
|
+
*/
|
|
22
|
+
disabled?: boolean;
|
|
23
|
+
}
|
|
24
|
+
interface TraceOptions {
|
|
25
|
+
/**
|
|
26
|
+
* Initial input (user message, prompt, etc.)
|
|
27
|
+
*/
|
|
28
|
+
input: unknown;
|
|
29
|
+
/**
|
|
30
|
+
* Session ID to group related traces
|
|
31
|
+
*/
|
|
32
|
+
sessionId?: string;
|
|
33
|
+
/**
|
|
34
|
+
* User ID for the end user
|
|
35
|
+
*/
|
|
36
|
+
userId?: string;
|
|
37
|
+
/**
|
|
38
|
+
* Custom metadata
|
|
39
|
+
*/
|
|
40
|
+
metadata?: Record<string, unknown>;
|
|
41
|
+
/**
|
|
42
|
+
* Tags for filtering
|
|
43
|
+
*/
|
|
44
|
+
tags?: string[];
|
|
45
|
+
/**
|
|
46
|
+
* Name for this trace (e.g., 'chat-agent', 'summarizer')
|
|
47
|
+
*/
|
|
48
|
+
name?: string;
|
|
49
|
+
}
|
|
50
|
+
interface OpenAIMessage {
|
|
51
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
52
|
+
content: string | null;
|
|
53
|
+
tool_calls?: OpenAIToolCall[];
|
|
54
|
+
tool_call_id?: string;
|
|
55
|
+
}
|
|
56
|
+
interface OpenAIToolCall {
|
|
57
|
+
id: string;
|
|
58
|
+
type: 'function';
|
|
59
|
+
function: {
|
|
60
|
+
name: string;
|
|
61
|
+
arguments: string;
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
interface AnthropicMessage {
|
|
65
|
+
role: 'user' | 'assistant';
|
|
66
|
+
content: string | AnthropicContent[];
|
|
67
|
+
}
|
|
68
|
+
interface AnthropicContent {
|
|
69
|
+
type: 'text' | 'tool_use' | 'tool_result';
|
|
70
|
+
text?: string;
|
|
71
|
+
id?: string;
|
|
72
|
+
name?: string;
|
|
73
|
+
input?: unknown;
|
|
74
|
+
tool_use_id?: string;
|
|
75
|
+
content?: string;
|
|
76
|
+
}
|
|
77
|
+
type Message = OpenAIMessage | AnthropicMessage | Record<string, unknown>;
|
|
78
|
+
interface ParsedTrace {
|
|
79
|
+
systemPrompt?: string;
|
|
80
|
+
userInput?: string;
|
|
81
|
+
output?: string;
|
|
82
|
+
llmCalls: ParsedLLMCall[];
|
|
83
|
+
toolCalls: ParsedToolCall[];
|
|
84
|
+
totalInputTokens: number;
|
|
85
|
+
totalOutputTokens: number;
|
|
86
|
+
models: string[];
|
|
87
|
+
provider?: 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'unknown';
|
|
88
|
+
}
|
|
89
|
+
interface ParsedLLMCall {
|
|
90
|
+
model?: string;
|
|
91
|
+
provider?: string;
|
|
92
|
+
inputTokens?: number;
|
|
93
|
+
outputTokens?: number;
|
|
94
|
+
input?: unknown;
|
|
95
|
+
output?: unknown;
|
|
96
|
+
toolCalls?: ParsedToolCall[];
|
|
97
|
+
}
|
|
98
|
+
interface ParsedToolCall {
|
|
99
|
+
name: string;
|
|
100
|
+
input: unknown;
|
|
101
|
+
output?: unknown;
|
|
102
|
+
}
|
|
103
|
+
interface CreateTraceRequest {
|
|
104
|
+
name?: string;
|
|
105
|
+
sessionId?: string;
|
|
106
|
+
userId?: string;
|
|
107
|
+
input?: unknown;
|
|
108
|
+
metadata?: Record<string, unknown>;
|
|
109
|
+
tags?: string[];
|
|
110
|
+
}
|
|
111
|
+
interface CompleteTraceRequest {
|
|
112
|
+
status: 'completed' | 'error';
|
|
113
|
+
output?: unknown;
|
|
114
|
+
errorMessage?: string;
|
|
115
|
+
errorStack?: string;
|
|
116
|
+
systemPrompt?: string;
|
|
117
|
+
llmCalls?: ParsedLLMCall[];
|
|
118
|
+
toolCalls?: ParsedToolCall[];
|
|
119
|
+
models?: string[];
|
|
120
|
+
totalInputTokens?: number;
|
|
121
|
+
totalOutputTokens?: number;
|
|
122
|
+
totalCostUsd?: number;
|
|
123
|
+
durationMs?: number;
|
|
124
|
+
metadata?: Record<string, unknown>;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Transport layer for sending data to Lelemon API
|
|
129
|
+
*/
|
|
130
|
+
|
|
131
|
+
interface TransportConfig {
|
|
132
|
+
apiKey: string;
|
|
133
|
+
endpoint: string;
|
|
134
|
+
debug: boolean;
|
|
135
|
+
disabled: boolean;
|
|
136
|
+
}
|
|
137
|
+
declare class Transport {
|
|
138
|
+
private config;
|
|
139
|
+
constructor(config: TransportConfig);
|
|
140
|
+
/**
|
|
141
|
+
* Check if transport is enabled
|
|
142
|
+
*/
|
|
143
|
+
isEnabled(): boolean;
|
|
144
|
+
/**
|
|
145
|
+
* Create a new trace
|
|
146
|
+
*/
|
|
147
|
+
createTrace(data: CreateTraceRequest): Promise<{
|
|
148
|
+
id: string;
|
|
149
|
+
}>;
|
|
150
|
+
/**
|
|
151
|
+
* Complete a trace (success or error)
|
|
152
|
+
*/
|
|
153
|
+
completeTrace(traceId: string, data: CompleteTraceRequest): Promise<void>;
|
|
154
|
+
/**
|
|
155
|
+
* Make HTTP request to API
|
|
156
|
+
*/
|
|
157
|
+
private request;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Lelemon Tracer - Simple, low-friction API
|
|
162
|
+
*
|
|
163
|
+
* Usage:
|
|
164
|
+
* const t = trace({ input: userMessage });
|
|
165
|
+
* try {
|
|
166
|
+
* // your agent code
|
|
167
|
+
* t.success(messages);
|
|
168
|
+
* } catch (error) {
|
|
169
|
+
* t.error(error, messages);
|
|
170
|
+
* }
|
|
171
|
+
*/
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Initialize the SDK globally (optional)
|
|
175
|
+
* If not called, trace() will auto-initialize with env vars
|
|
176
|
+
*/
|
|
177
|
+
declare function init(config?: LelemonConfig): void;
|
|
178
|
+
/**
|
|
179
|
+
* Active trace handle returned by trace()
|
|
180
|
+
*/
|
|
181
|
+
declare class Trace {
|
|
182
|
+
private id;
|
|
183
|
+
private transport;
|
|
184
|
+
private options;
|
|
185
|
+
private startTime;
|
|
186
|
+
private completed;
|
|
187
|
+
private debug;
|
|
188
|
+
private disabled;
|
|
189
|
+
private llmCalls;
|
|
190
|
+
constructor(options: TraceOptions, transport: Transport, debug: boolean, disabled: boolean);
|
|
191
|
+
/**
|
|
192
|
+
* Initialize trace on server (called internally)
|
|
193
|
+
*/
|
|
194
|
+
init(): Promise<void>;
|
|
195
|
+
/**
|
|
196
|
+
* Log an LLM response (optional - for tracking individual calls)
|
|
197
|
+
* Use this if you want to track tokens per call, not just at the end
|
|
198
|
+
*/
|
|
199
|
+
log(response: unknown): void;
|
|
200
|
+
/**
|
|
201
|
+
* Complete trace successfully
|
|
202
|
+
* @param messages - The full message history (OpenAI/Anthropic format)
|
|
203
|
+
*/
|
|
204
|
+
success(messages: unknown): Promise<void>;
|
|
205
|
+
/**
|
|
206
|
+
* Complete trace with error
|
|
207
|
+
* @param error - The error that occurred
|
|
208
|
+
* @param messages - The message history up to the failure (optional)
|
|
209
|
+
*/
|
|
210
|
+
error(error: Error | unknown, messages?: unknown): Promise<void>;
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Start a new trace
|
|
214
|
+
*
|
|
215
|
+
* @example
|
|
216
|
+
* const t = trace({ input: userMessage });
|
|
217
|
+
* try {
|
|
218
|
+
* const messages = [...];
|
|
219
|
+
* // ... your agent code ...
|
|
220
|
+
* await t.success(messages);
|
|
221
|
+
* } catch (error) {
|
|
222
|
+
* await t.error(error, messages);
|
|
223
|
+
* throw error;
|
|
224
|
+
* }
|
|
225
|
+
*/
|
|
226
|
+
declare function trace(options: TraceOptions): Trace;
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* Message Parser
|
|
230
|
+
* Auto-detects OpenAI/Anthropic/Gemini message formats and extracts relevant data
|
|
231
|
+
*/
|
|
232
|
+
|
|
233
|
+
/**
|
|
234
|
+
* Parse messages array and extract structured data
|
|
235
|
+
*/
|
|
236
|
+
declare function parseMessages(messages: unknown): ParsedTrace;
|
|
237
|
+
/**
|
|
238
|
+
* Extract data from an OpenAI/Anthropic/Bedrock response object
|
|
239
|
+
* This handles the raw API response (not the messages array)
|
|
240
|
+
*/
|
|
241
|
+
declare function parseResponse(response: unknown): Partial<ParsedLLMCall>;
|
|
242
|
+
/**
|
|
243
|
+
* Parse Bedrock InvokeModel response body
|
|
244
|
+
* Call this with the parsed JSON body from Bedrock
|
|
245
|
+
*/
|
|
246
|
+
declare function parseBedrockResponse(body: unknown): Partial<ParsedLLMCall>;
|
|
247
|
+
|
|
248
|
+
export { type AnthropicMessage, type LelemonConfig, type Message, type OpenAIMessage, type ParsedLLMCall, type ParsedToolCall, type ParsedTrace, Trace, type TraceOptions, init, parseBedrockResponse, parseMessages, parseResponse, trace };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Lelemon SDK Types
|
|
3
|
+
* Minimal, low-friction API for LLM observability
|
|
4
|
+
*/
|
|
5
|
+
interface LelemonConfig {
|
|
6
|
+
/**
|
|
7
|
+
* API key for authentication (starts with 'le_')
|
|
8
|
+
* Can also be set via LELEMON_API_KEY env var
|
|
9
|
+
*/
|
|
10
|
+
apiKey?: string;
|
|
11
|
+
/**
|
|
12
|
+
* API endpoint (default: https://api.lelemon.dev)
|
|
13
|
+
*/
|
|
14
|
+
endpoint?: string;
|
|
15
|
+
/**
|
|
16
|
+
* Enable debug logging
|
|
17
|
+
*/
|
|
18
|
+
debug?: boolean;
|
|
19
|
+
/**
|
|
20
|
+
* Disable tracing (useful for testing)
|
|
21
|
+
*/
|
|
22
|
+
disabled?: boolean;
|
|
23
|
+
}
|
|
24
|
+
interface TraceOptions {
|
|
25
|
+
/**
|
|
26
|
+
* Initial input (user message, prompt, etc.)
|
|
27
|
+
*/
|
|
28
|
+
input: unknown;
|
|
29
|
+
/**
|
|
30
|
+
* Session ID to group related traces
|
|
31
|
+
*/
|
|
32
|
+
sessionId?: string;
|
|
33
|
+
/**
|
|
34
|
+
* User ID for the end user
|
|
35
|
+
*/
|
|
36
|
+
userId?: string;
|
|
37
|
+
/**
|
|
38
|
+
* Custom metadata
|
|
39
|
+
*/
|
|
40
|
+
metadata?: Record<string, unknown>;
|
|
41
|
+
/**
|
|
42
|
+
* Tags for filtering
|
|
43
|
+
*/
|
|
44
|
+
tags?: string[];
|
|
45
|
+
/**
|
|
46
|
+
* Name for this trace (e.g., 'chat-agent', 'summarizer')
|
|
47
|
+
*/
|
|
48
|
+
name?: string;
|
|
49
|
+
}
|
|
50
|
+
interface OpenAIMessage {
|
|
51
|
+
role: 'system' | 'user' | 'assistant' | 'tool';
|
|
52
|
+
content: string | null;
|
|
53
|
+
tool_calls?: OpenAIToolCall[];
|
|
54
|
+
tool_call_id?: string;
|
|
55
|
+
}
|
|
56
|
+
interface OpenAIToolCall {
|
|
57
|
+
id: string;
|
|
58
|
+
type: 'function';
|
|
59
|
+
function: {
|
|
60
|
+
name: string;
|
|
61
|
+
arguments: string;
|
|
62
|
+
};
|
|
63
|
+
}
|
|
64
|
+
interface AnthropicMessage {
|
|
65
|
+
role: 'user' | 'assistant';
|
|
66
|
+
content: string | AnthropicContent[];
|
|
67
|
+
}
|
|
68
|
+
interface AnthropicContent {
|
|
69
|
+
type: 'text' | 'tool_use' | 'tool_result';
|
|
70
|
+
text?: string;
|
|
71
|
+
id?: string;
|
|
72
|
+
name?: string;
|
|
73
|
+
input?: unknown;
|
|
74
|
+
tool_use_id?: string;
|
|
75
|
+
content?: string;
|
|
76
|
+
}
|
|
77
|
+
type Message = OpenAIMessage | AnthropicMessage | Record<string, unknown>;
|
|
78
|
+
interface ParsedTrace {
|
|
79
|
+
systemPrompt?: string;
|
|
80
|
+
userInput?: string;
|
|
81
|
+
output?: string;
|
|
82
|
+
llmCalls: ParsedLLMCall[];
|
|
83
|
+
toolCalls: ParsedToolCall[];
|
|
84
|
+
totalInputTokens: number;
|
|
85
|
+
totalOutputTokens: number;
|
|
86
|
+
models: string[];
|
|
87
|
+
provider?: 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'unknown';
|
|
88
|
+
}
|
|
89
|
+
interface ParsedLLMCall {
|
|
90
|
+
model?: string;
|
|
91
|
+
provider?: string;
|
|
92
|
+
inputTokens?: number;
|
|
93
|
+
outputTokens?: number;
|
|
94
|
+
input?: unknown;
|
|
95
|
+
output?: unknown;
|
|
96
|
+
toolCalls?: ParsedToolCall[];
|
|
97
|
+
}
|
|
98
|
+
interface ParsedToolCall {
|
|
99
|
+
name: string;
|
|
100
|
+
input: unknown;
|
|
101
|
+
output?: unknown;
|
|
102
|
+
}
|
|
103
|
+
interface CreateTraceRequest {
|
|
104
|
+
name?: string;
|
|
105
|
+
sessionId?: string;
|
|
106
|
+
userId?: string;
|
|
107
|
+
input?: unknown;
|
|
108
|
+
metadata?: Record<string, unknown>;
|
|
109
|
+
tags?: string[];
|
|
110
|
+
}
|
|
111
|
+
interface CompleteTraceRequest {
|
|
112
|
+
status: 'completed' | 'error';
|
|
113
|
+
output?: unknown;
|
|
114
|
+
errorMessage?: string;
|
|
115
|
+
errorStack?: string;
|
|
116
|
+
systemPrompt?: string;
|
|
117
|
+
llmCalls?: ParsedLLMCall[];
|
|
118
|
+
toolCalls?: ParsedToolCall[];
|
|
119
|
+
models?: string[];
|
|
120
|
+
totalInputTokens?: number;
|
|
121
|
+
totalOutputTokens?: number;
|
|
122
|
+
totalCostUsd?: number;
|
|
123
|
+
durationMs?: number;
|
|
124
|
+
metadata?: Record<string, unknown>;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Transport layer for sending data to Lelemon API
|
|
129
|
+
*/
|
|
130
|
+
|
|
131
|
+
interface TransportConfig {
|
|
132
|
+
apiKey: string;
|
|
133
|
+
endpoint: string;
|
|
134
|
+
debug: boolean;
|
|
135
|
+
disabled: boolean;
|
|
136
|
+
}
|
|
137
|
+
declare class Transport {
|
|
138
|
+
private config;
|
|
139
|
+
constructor(config: TransportConfig);
|
|
140
|
+
/**
|
|
141
|
+
* Check if transport is enabled
|
|
142
|
+
*/
|
|
143
|
+
isEnabled(): boolean;
|
|
144
|
+
/**
|
|
145
|
+
* Create a new trace
|
|
146
|
+
*/
|
|
147
|
+
createTrace(data: CreateTraceRequest): Promise<{
|
|
148
|
+
id: string;
|
|
149
|
+
}>;
|
|
150
|
+
/**
|
|
151
|
+
* Complete a trace (success or error)
|
|
152
|
+
*/
|
|
153
|
+
completeTrace(traceId: string, data: CompleteTraceRequest): Promise<void>;
|
|
154
|
+
/**
|
|
155
|
+
* Make HTTP request to API
|
|
156
|
+
*/
|
|
157
|
+
private request;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* Lelemon Tracer - Simple, low-friction API
|
|
162
|
+
*
|
|
163
|
+
* Usage:
|
|
164
|
+
* const t = trace({ input: userMessage });
|
|
165
|
+
* try {
|
|
166
|
+
* // your agent code
|
|
167
|
+
* t.success(messages);
|
|
168
|
+
* } catch (error) {
|
|
169
|
+
* t.error(error, messages);
|
|
170
|
+
* }
|
|
171
|
+
*/
|
|
172
|
+
|
|
173
|
+
/**
|
|
174
|
+
* Initialize the SDK globally (optional)
|
|
175
|
+
* If not called, trace() will auto-initialize with env vars
|
|
176
|
+
*/
|
|
177
|
+
declare function init(config?: LelemonConfig): void;
|
|
178
|
+
/**
|
|
179
|
+
* Active trace handle returned by trace()
|
|
180
|
+
*/
|
|
181
|
+
declare class Trace {
|
|
182
|
+
private id;
|
|
183
|
+
private transport;
|
|
184
|
+
private options;
|
|
185
|
+
private startTime;
|
|
186
|
+
private completed;
|
|
187
|
+
private debug;
|
|
188
|
+
private disabled;
|
|
189
|
+
private llmCalls;
|
|
190
|
+
constructor(options: TraceOptions, transport: Transport, debug: boolean, disabled: boolean);
|
|
191
|
+
/**
|
|
192
|
+
* Initialize trace on server (called internally)
|
|
193
|
+
*/
|
|
194
|
+
init(): Promise<void>;
|
|
195
|
+
/**
|
|
196
|
+
* Log an LLM response (optional - for tracking individual calls)
|
|
197
|
+
* Use this if you want to track tokens per call, not just at the end
|
|
198
|
+
*/
|
|
199
|
+
log(response: unknown): void;
|
|
200
|
+
/**
|
|
201
|
+
* Complete trace successfully
|
|
202
|
+
* @param messages - The full message history (OpenAI/Anthropic format)
|
|
203
|
+
*/
|
|
204
|
+
success(messages: unknown): Promise<void>;
|
|
205
|
+
/**
|
|
206
|
+
* Complete trace with error
|
|
207
|
+
* @param error - The error that occurred
|
|
208
|
+
* @param messages - The message history up to the failure (optional)
|
|
209
|
+
*/
|
|
210
|
+
error(error: Error | unknown, messages?: unknown): Promise<void>;
|
|
211
|
+
}
|
|
212
|
+
/**
|
|
213
|
+
* Start a new trace
|
|
214
|
+
*
|
|
215
|
+
* @example
|
|
216
|
+
* const t = trace({ input: userMessage });
|
|
217
|
+
* try {
|
|
218
|
+
* const messages = [...];
|
|
219
|
+
* // ... your agent code ...
|
|
220
|
+
* await t.success(messages);
|
|
221
|
+
* } catch (error) {
|
|
222
|
+
* await t.error(error, messages);
|
|
223
|
+
* throw error;
|
|
224
|
+
* }
|
|
225
|
+
*/
|
|
226
|
+
declare function trace(options: TraceOptions): Trace;
|
|
227
|
+
|
|
228
|
+
/**
|
|
229
|
+
* Message Parser
|
|
230
|
+
* Auto-detects OpenAI/Anthropic/Gemini message formats and extracts relevant data
|
|
231
|
+
*/
|
|
232
|
+
|
|
233
|
+
/**
|
|
234
|
+
* Parse messages array and extract structured data
|
|
235
|
+
*/
|
|
236
|
+
declare function parseMessages(messages: unknown): ParsedTrace;
|
|
237
|
+
/**
|
|
238
|
+
* Extract data from an OpenAI/Anthropic/Bedrock response object
|
|
239
|
+
* This handles the raw API response (not the messages array)
|
|
240
|
+
*/
|
|
241
|
+
declare function parseResponse(response: unknown): Partial<ParsedLLMCall>;
|
|
242
|
+
/**
|
|
243
|
+
* Parse Bedrock InvokeModel response body
|
|
244
|
+
* Call this with the parsed JSON body from Bedrock
|
|
245
|
+
*/
|
|
246
|
+
declare function parseBedrockResponse(body: unknown): Partial<ParsedLLMCall>;
|
|
247
|
+
|
|
248
|
+
export { type AnthropicMessage, type LelemonConfig, type Message, type OpenAIMessage, type ParsedLLMCall, type ParsedToolCall, type ParsedTrace, Trace, type TraceOptions, init, parseBedrockResponse, parseMessages, parseResponse, trace };
|