@lelemondev/sdk 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -5,7 +5,7 @@ var __defProp = Object.defineProperty;
5
5
  var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
6
6
  var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
7
7
 
8
- // src/transport.ts
8
+ // src/core/transport.ts
9
9
  var DEFAULT_BATCH_SIZE = 10;
10
10
  var DEFAULT_FLUSH_INTERVAL_MS = 1e3;
11
11
  var DEFAULT_REQUEST_TIMEOUT_MS = 1e4;
@@ -15,8 +15,6 @@ var Transport = class {
15
15
  __publicField(this, "queue", []);
16
16
  __publicField(this, "flushPromise", null);
17
17
  __publicField(this, "flushTimer", null);
18
- __publicField(this, "pendingResolvers", /* @__PURE__ */ new Map());
19
- __publicField(this, "idCounter", 0);
20
18
  this.config = {
21
19
  apiKey: config.apiKey,
22
20
  endpoint: config.endpoint,
@@ -34,30 +32,21 @@ var Transport = class {
34
32
  return !this.config.disabled && !!this.config.apiKey;
35
33
  }
36
34
  /**
37
- * Enqueue trace creation (returns promise that resolves to trace ID)
35
+ * Enqueue a trace for sending
36
+ * Fire-and-forget - never blocks
38
37
  */
39
- enqueueCreate(data) {
40
- if (this.config.disabled) {
41
- return Promise.resolve(null);
42
- }
43
- const tempId = this.generateTempId();
44
- return new Promise((resolve) => {
45
- this.pendingResolvers.set(tempId, resolve);
46
- this.enqueue({ type: "create", tempId, data });
47
- });
48
- }
49
- /**
50
- * Enqueue trace completion (fire-and-forget)
51
- */
52
- enqueueComplete(traceId, data) {
53
- if (this.config.disabled || !traceId) {
54
- return;
38
+ enqueue(trace) {
39
+ if (this.config.disabled) return;
40
+ this.queue.push(trace);
41
+ if (this.queue.length >= this.config.batchSize) {
42
+ this.flush();
43
+ } else {
44
+ this.scheduleFlush();
55
45
  }
56
- this.enqueue({ type: "complete", traceId, data });
57
46
  }
58
47
  /**
59
- * Flush all pending items
60
- * Safe to call multiple times (deduplicates)
48
+ * Flush all pending traces
49
+ * Safe to call multiple times
61
50
  */
62
51
  async flush() {
63
52
  if (this.flushPromise) {
@@ -75,29 +64,16 @@ var Transport = class {
75
64
  return this.flushPromise;
76
65
  }
77
66
  /**
78
- * Get pending item count (for testing/debugging)
67
+ * Get pending count (for debugging)
79
68
  */
80
69
  getPendingCount() {
81
70
  return this.queue.length;
82
71
  }
83
72
  // ─────────────────────────────────────────────────────────────
84
- // Private methods
73
+ // Private Methods
85
74
  // ─────────────────────────────────────────────────────────────
86
- generateTempId() {
87
- return `temp_${++this.idCounter}_${Date.now()}`;
88
- }
89
- enqueue(item) {
90
- this.queue.push(item);
91
- if (this.queue.length >= this.config.batchSize) {
92
- this.flush();
93
- } else {
94
- this.scheduleFlush();
95
- }
96
- }
97
75
  scheduleFlush() {
98
- if (this.flushTimer !== null) {
99
- return;
100
- }
76
+ if (this.flushTimer !== null) return;
101
77
  this.flushTimer = setTimeout(() => {
102
78
  this.flushTimer = null;
103
79
  this.flush();
@@ -110,53 +86,12 @@ var Transport = class {
110
86
  }
111
87
  }
112
88
  async sendBatch(items) {
113
- const payload = {
114
- creates: [],
115
- completes: []
116
- };
117
- for (const item of items) {
118
- if (item.type === "create") {
119
- payload.creates.push({ tempId: item.tempId, data: item.data });
120
- } else {
121
- payload.completes.push({ traceId: item.traceId, data: item.data });
122
- }
123
- }
124
- if (payload.creates.length === 0 && payload.completes.length === 0) {
125
- return;
126
- }
127
- this.log("Sending batch", {
128
- creates: payload.creates.length,
129
- completes: payload.completes.length
130
- });
89
+ if (items.length === 0) return;
90
+ this.log(`Sending batch of ${items.length} traces`);
131
91
  try {
132
- const response = await this.request(
133
- "POST",
134
- "/api/v1/traces/batch",
135
- payload
136
- );
137
- if (response.created) {
138
- for (const [tempId, realId] of Object.entries(response.created)) {
139
- const resolver = this.pendingResolvers.get(tempId);
140
- if (resolver) {
141
- resolver(realId);
142
- this.pendingResolvers.delete(tempId);
143
- }
144
- }
145
- }
146
- if (response.errors?.length && this.config.debug) {
147
- console.warn("[Lelemon] Batch errors:", response.errors);
148
- }
92
+ await this.request("POST", "/api/v1/traces/batch", { traces: items });
149
93
  } catch (error) {
150
- for (const item of items) {
151
- if (item.type === "create") {
152
- const resolver = this.pendingResolvers.get(item.tempId);
153
- if (resolver) {
154
- resolver(null);
155
- this.pendingResolvers.delete(item.tempId);
156
- }
157
- }
158
- }
159
- this.log("Batch failed", error);
94
+ this.log("Batch send failed", error);
160
95
  }
161
96
  }
162
97
  async request(method, path, body) {
@@ -201,494 +136,759 @@ var Transport = class {
201
136
  }
202
137
  };
203
138
 
204
- // src/parser.ts
205
- function isOpenAIFormat(messages) {
206
- if (!messages.length) return false;
207
- const first = messages[0];
208
- return typeof first === "object" && first !== null && "role" in first && ["system", "user", "assistant", "tool"].includes(first.role);
209
- }
210
- function isAnthropicFormat(messages) {
211
- if (!messages.length) return false;
212
- const first = messages[0];
213
- return typeof first === "object" && first !== null && "role" in first && ["user", "assistant"].includes(first.role) && (typeof first.content === "string" || Array.isArray(first.content));
214
- }
215
- function isGeminiFormat(messages) {
216
- if (!messages.length) return false;
217
- const first = messages[0];
218
- return typeof first === "object" && first !== null && "role" in first && ["user", "model"].includes(first.role) && "parts" in first && Array.isArray(first.parts);
219
- }
220
- function parseOpenAI(messages) {
221
- const result = {
222
- llmCalls: [],
223
- toolCalls: [],
224
- totalInputTokens: 0,
225
- totalOutputTokens: 0,
226
- models: [],
227
- provider: "openai"
228
- };
229
- for (const msg of messages) {
230
- if (msg.role === "system") {
231
- result.systemPrompt = msg.content ?? void 0;
232
- } else if (msg.role === "user" && !result.userInput) {
233
- result.userInput = msg.content ?? void 0;
234
- } else if (msg.role === "assistant") {
235
- const llmCall = {
236
- provider: "openai",
237
- output: msg.content
238
- };
239
- if (msg.tool_calls && msg.tool_calls.length > 0) {
240
- llmCall.toolCalls = msg.tool_calls.map((tc) => ({
241
- name: tc.function.name,
242
- input: safeParseJSON(tc.function.arguments)
243
- }));
244
- for (const tc of msg.tool_calls) {
245
- result.toolCalls.push({
246
- name: tc.function.name,
247
- input: safeParseJSON(tc.function.arguments)
248
- });
249
- }
250
- }
251
- result.llmCalls.push(llmCall);
252
- if (msg.content) {
253
- result.output = msg.content;
254
- }
255
- } else if (msg.role === "tool") {
256
- const lastToolCall = result.toolCalls[result.toolCalls.length - 1];
257
- if (lastToolCall) {
258
- lastToolCall.output = safeParseJSON(msg.content ?? "");
259
- }
260
- }
139
+ // src/core/config.ts
140
+ var globalConfig = {};
141
+ var globalTransport = null;
142
+ var DEFAULT_ENDPOINT = "https://api.lelemon.dev";
143
+ function init(config = {}) {
144
+ globalConfig = config;
145
+ globalTransport = createTransport(config);
146
+ }
147
+ function getConfig() {
148
+ return globalConfig;
149
+ }
150
+ function isEnabled() {
151
+ return getTransport().isEnabled();
152
+ }
153
+ function getTransport() {
154
+ if (!globalTransport) {
155
+ globalTransport = createTransport(globalConfig);
261
156
  }
262
- return result;
157
+ return globalTransport;
263
158
  }
264
- function parseAnthropic(messages) {
265
- const result = {
266
- llmCalls: [],
267
- toolCalls: [],
268
- totalInputTokens: 0,
269
- totalOutputTokens: 0,
270
- models: [],
271
- provider: "anthropic"
272
- };
273
- for (const msg of messages) {
274
- if (msg.role === "user") {
275
- if (!result.userInput) {
276
- if (typeof msg.content === "string") {
277
- result.userInput = msg.content;
278
- } else if (Array.isArray(msg.content)) {
279
- const textContent = msg.content.find(
280
- (c) => c.type === "text"
281
- );
282
- if (textContent && "text" in textContent) {
283
- result.userInput = textContent.text;
284
- }
285
- }
286
- }
287
- if (Array.isArray(msg.content)) {
288
- for (const block of msg.content) {
289
- if (block.type === "tool_result" && block.tool_use_id) {
290
- const toolCall = result.toolCalls.find(
291
- (tc) => tc.id === block.tool_use_id
292
- );
293
- if (toolCall) {
294
- toolCall.output = block.content;
295
- }
296
- }
297
- }
298
- }
299
- } else if (msg.role === "assistant") {
300
- const llmCall = {
301
- provider: "anthropic"
302
- };
303
- if (typeof msg.content === "string") {
304
- llmCall.output = msg.content;
305
- result.output = msg.content;
306
- } else if (Array.isArray(msg.content)) {
307
- const outputs = [];
308
- const toolCalls = [];
309
- for (const block of msg.content) {
310
- if (block.type === "text" && block.text) {
311
- outputs.push(block.text);
312
- } else if (block.type === "tool_use" && block.name) {
313
- const tc = {
314
- name: block.name,
315
- input: block.input
316
- };
317
- if (block.id) {
318
- tc.id = block.id;
319
- }
320
- toolCalls.push(tc);
321
- result.toolCalls.push(tc);
322
- }
323
- }
324
- if (outputs.length) {
325
- llmCall.output = outputs.join("\n");
326
- result.output = outputs.join("\n");
327
- }
328
- if (toolCalls.length) {
329
- llmCall.toolCalls = toolCalls;
330
- }
331
- }
332
- result.llmCalls.push(llmCall);
333
- }
159
+ async function flush() {
160
+ if (globalTransport) {
161
+ await globalTransport.flush();
334
162
  }
335
- return result;
336
163
  }
337
- function parseGemini(messages) {
338
- const result = {
339
- llmCalls: [],
340
- toolCalls: [],
341
- totalInputTokens: 0,
342
- totalOutputTokens: 0,
343
- models: [],
344
- provider: "gemini"
345
- };
346
- for (const msg of messages) {
347
- if (msg.role === "user") {
348
- if (!result.userInput) {
349
- const textPart = msg.parts.find((p) => p.text);
350
- if (textPart?.text) {
351
- result.userInput = textPart.text;
352
- }
353
- }
354
- for (const part of msg.parts) {
355
- if (part.functionResponse) {
356
- const toolCall = result.toolCalls.find(
357
- (tc) => tc.name === part.functionResponse.name
358
- );
359
- if (toolCall) {
360
- toolCall.output = part.functionResponse.response;
361
- }
362
- }
363
- }
364
- } else if (msg.role === "model") {
365
- const llmCall = {
366
- provider: "gemini"
367
- };
368
- const outputs = [];
369
- const toolCalls = [];
370
- for (const part of msg.parts) {
371
- if (part.text) {
372
- outputs.push(part.text);
373
- } else if (part.functionCall) {
374
- const tc = {
375
- name: part.functionCall.name,
376
- input: part.functionCall.args
377
- };
378
- toolCalls.push(tc);
379
- result.toolCalls.push(tc);
380
- }
381
- }
382
- if (outputs.length) {
383
- llmCall.output = outputs.join("\n");
384
- result.output = outputs.join("\n");
385
- }
386
- if (toolCalls.length) {
387
- llmCall.toolCalls = toolCalls;
164
+ function createTransport(config) {
165
+ const apiKey = config.apiKey ?? getEnvVar("LELEMON_API_KEY");
166
+ if (!apiKey && !config.disabled) {
167
+ console.warn(
168
+ "[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled."
169
+ );
170
+ }
171
+ return new Transport({
172
+ apiKey: apiKey ?? "",
173
+ endpoint: config.endpoint ?? DEFAULT_ENDPOINT,
174
+ debug: config.debug ?? false,
175
+ disabled: config.disabled ?? !apiKey,
176
+ batchSize: config.batchSize,
177
+ flushIntervalMs: config.flushIntervalMs,
178
+ requestTimeoutMs: config.requestTimeoutMs
179
+ });
180
+ }
181
+ function getEnvVar(name) {
182
+ if (typeof process !== "undefined" && process.env) {
183
+ return process.env[name];
184
+ }
185
+ return void 0;
186
+ }
187
+
188
+ // src/providers/base.ts
189
+ function safeExtract(fn, fallback) {
190
+ try {
191
+ return fn() ?? fallback;
192
+ } catch {
193
+ return fallback;
194
+ }
195
+ }
196
+ function getNestedValue(obj, path) {
197
+ try {
198
+ const parts = path.split(".");
199
+ let current = obj;
200
+ for (const part of parts) {
201
+ if (current == null || typeof current !== "object") {
202
+ return void 0;
388
203
  }
389
- result.llmCalls.push(llmCall);
204
+ current = current[part];
390
205
  }
206
+ return current;
207
+ } catch {
208
+ return void 0;
391
209
  }
392
- return result;
393
210
  }
394
- function safeParseJSON(str) {
211
+ function isValidNumber(value) {
212
+ return typeof value === "number" && !isNaN(value) && isFinite(value);
213
+ }
214
+
215
+ // src/core/capture.ts
216
+ var globalContext = {};
217
+ function setGlobalContext(options) {
218
+ globalContext = options;
219
+ }
220
+ function getGlobalContext() {
221
+ return globalContext;
222
+ }
223
+ function captureTrace(params) {
395
224
  try {
396
- return JSON.parse(str);
225
+ const transport = getTransport();
226
+ if (!transport.isEnabled()) return;
227
+ const context = getGlobalContext();
228
+ const request = {
229
+ provider: params.provider,
230
+ model: params.model,
231
+ input: sanitizeInput(params.input),
232
+ output: sanitizeOutput(params.output),
233
+ inputTokens: params.inputTokens,
234
+ outputTokens: params.outputTokens,
235
+ durationMs: params.durationMs,
236
+ status: params.status,
237
+ streaming: params.streaming,
238
+ sessionId: context.sessionId,
239
+ userId: context.userId,
240
+ metadata: { ...context.metadata, ...params.metadata },
241
+ tags: context.tags
242
+ };
243
+ transport.enqueue(request);
397
244
  } catch {
398
- return str;
399
245
  }
400
246
  }
401
- function parseMessages(messages) {
402
- if (!messages) {
403
- return {
404
- llmCalls: [],
405
- toolCalls: [],
406
- totalInputTokens: 0,
407
- totalOutputTokens: 0,
408
- models: [],
409
- provider: "unknown"
247
+ function captureError(params) {
248
+ try {
249
+ const transport = getTransport();
250
+ if (!transport.isEnabled()) return;
251
+ const context = getGlobalContext();
252
+ const request = {
253
+ provider: params.provider,
254
+ model: params.model,
255
+ input: sanitizeInput(params.input),
256
+ output: null,
257
+ inputTokens: 0,
258
+ outputTokens: 0,
259
+ durationMs: params.durationMs,
260
+ status: "error",
261
+ errorMessage: params.error.message,
262
+ errorStack: params.error.stack,
263
+ streaming: params.streaming,
264
+ sessionId: context.sessionId,
265
+ userId: context.userId,
266
+ metadata: { ...context.metadata, ...params.metadata },
267
+ tags: context.tags
410
268
  };
269
+ transport.enqueue(request);
270
+ } catch {
411
271
  }
412
- if (!Array.isArray(messages)) {
413
- return {
414
- llmCalls: [],
415
- toolCalls: [],
416
- totalInputTokens: 0,
417
- totalOutputTokens: 0,
418
- models: [],
419
- provider: "unknown",
420
- output: typeof messages === "string" ? messages : JSON.stringify(messages)
421
- };
272
+ }
273
+ var MAX_STRING_LENGTH = 1e5;
274
+ var SENSITIVE_KEYS = ["api_key", "apikey", "password", "secret", "token", "authorization"];
275
+ function sanitizeInput(input) {
276
+ return sanitize(input, 0);
277
+ }
278
+ function sanitizeOutput(output) {
279
+ return sanitize(output, 0);
280
+ }
281
+ function sanitize(value, depth) {
282
+ if (depth > 10) return "[max depth exceeded]";
283
+ if (value === null || value === void 0) return value;
284
+ if (typeof value === "string") {
285
+ return value.length > MAX_STRING_LENGTH ? value.slice(0, MAX_STRING_LENGTH) + "...[truncated]" : value;
286
+ }
287
+ if (typeof value === "number" || typeof value === "boolean") {
288
+ return value;
289
+ }
290
+ if (Array.isArray(value)) {
291
+ return value.map((item) => sanitize(item, depth + 1));
292
+ }
293
+ if (typeof value === "object") {
294
+ const sanitized = {};
295
+ for (const [key, val] of Object.entries(value)) {
296
+ if (SENSITIVE_KEYS.some((k) => key.toLowerCase().includes(k))) {
297
+ sanitized[key] = "[REDACTED]";
298
+ } else {
299
+ sanitized[key] = sanitize(val, depth + 1);
300
+ }
301
+ }
302
+ return sanitized;
422
303
  }
423
- if (isGeminiFormat(messages)) {
424
- return parseGemini(messages);
425
- } else if (isOpenAIFormat(messages)) {
426
- return parseOpenAI(messages);
427
- } else if (isAnthropicFormat(messages)) {
428
- return parseAnthropic(messages);
429
- }
430
- return {
431
- llmCalls: [],
432
- toolCalls: [],
433
- totalInputTokens: 0,
434
- totalOutputTokens: 0,
435
- models: [],
436
- provider: "unknown"
304
+ return String(value);
305
+ }
306
+
307
+ // src/providers/openai.ts
308
+ var PROVIDER_NAME = "openai";
309
+ function canHandle(client) {
310
+ if (!client || typeof client !== "object") return false;
311
+ const constructorName = client.constructor?.name;
312
+ if (constructorName === "OpenAI") return true;
313
+ const c = client;
314
+ return !!(c.chat && c.completions);
315
+ }
316
+ function wrapChatCreate(originalFn) {
317
+ return async function wrappedChatCreate(...args) {
318
+ const startTime = Date.now();
319
+ const request = args[0] || {};
320
+ const isStreaming = request.stream === true;
321
+ try {
322
+ const response = await originalFn(...args);
323
+ if (isStreaming && isAsyncIterable(response)) {
324
+ return wrapStream(response, request, startTime);
325
+ }
326
+ const durationMs = Date.now() - startTime;
327
+ const extracted = extractChatCompletion(response);
328
+ captureTrace({
329
+ provider: PROVIDER_NAME,
330
+ model: request.model || extracted.model || "unknown",
331
+ input: request.messages,
332
+ output: extracted.output,
333
+ inputTokens: extracted.tokens?.inputTokens || 0,
334
+ outputTokens: extracted.tokens?.outputTokens || 0,
335
+ durationMs,
336
+ status: "success",
337
+ streaming: false
338
+ });
339
+ return response;
340
+ } catch (error) {
341
+ const durationMs = Date.now() - startTime;
342
+ captureError({
343
+ provider: PROVIDER_NAME,
344
+ model: request.model || "unknown",
345
+ input: request.messages,
346
+ error: error instanceof Error ? error : new Error(String(error)),
347
+ durationMs,
348
+ streaming: isStreaming
349
+ });
350
+ throw error;
351
+ }
437
352
  };
438
353
  }
439
- function parseResponse(response) {
440
- if (!response || typeof response !== "object") {
441
- return {};
442
- }
443
- const res = response;
444
- const result = {};
445
- if ("model" in res) {
446
- result.model = res.model;
447
- }
448
- if ("modelId" in res) {
449
- result.model = res.modelId;
450
- }
451
- if ("usage" in res && typeof res.usage === "object" && res.usage !== null) {
452
- const usage = res.usage;
453
- result.inputTokens = usage.prompt_tokens ?? usage.input_tokens;
454
- result.outputTokens = usage.completion_tokens ?? usage.output_tokens;
455
- }
456
- if ("$metadata" in res && "usage" in res) {
457
- result.provider = "bedrock";
458
- }
459
- if ("anthropic_version" in res || "amazon-bedrock-invocationMetrics" in res) {
460
- result.provider = "bedrock";
461
- }
462
- if ("candidates" in res || "promptFeedback" in res) {
463
- result.provider = "gemini";
464
- if ("usageMetadata" in res && typeof res.usageMetadata === "object" && res.usageMetadata !== null) {
465
- const usage = res.usageMetadata;
466
- result.inputTokens = usage.promptTokenCount;
467
- result.outputTokens = usage.candidatesTokenCount;
354
+ function wrapCompletionCreate(originalFn) {
355
+ return async function wrappedCompletionCreate(...args) {
356
+ const startTime = Date.now();
357
+ const request = args[0] || {};
358
+ try {
359
+ const response = await originalFn(...args);
360
+ const durationMs = Date.now() - startTime;
361
+ const extracted = extractLegacyCompletion(response);
362
+ captureTrace({
363
+ provider: PROVIDER_NAME,
364
+ model: request.model || extracted.model || "unknown",
365
+ input: request.prompt,
366
+ output: extracted.output,
367
+ inputTokens: extracted.tokens?.inputTokens || 0,
368
+ outputTokens: extracted.tokens?.outputTokens || 0,
369
+ durationMs,
370
+ status: "success",
371
+ streaming: false
372
+ });
373
+ return response;
374
+ } catch (error) {
375
+ const durationMs = Date.now() - startTime;
376
+ captureError({
377
+ provider: PROVIDER_NAME,
378
+ model: request.model || "unknown",
379
+ input: request.prompt,
380
+ error: error instanceof Error ? error : new Error(String(error)),
381
+ durationMs,
382
+ streaming: false
383
+ });
384
+ throw error;
385
+ }
386
+ };
387
+ }
388
+ function wrapEmbeddingsCreate(originalFn) {
389
+ return async function wrappedEmbeddingsCreate(...args) {
390
+ const startTime = Date.now();
391
+ const request = args[0] || {};
392
+ try {
393
+ const response = await originalFn(...args);
394
+ const durationMs = Date.now() - startTime;
395
+ const tokens = extractEmbeddingTokens(response);
396
+ captureTrace({
397
+ provider: PROVIDER_NAME,
398
+ model: request.model || "unknown",
399
+ input: request.input,
400
+ output: "[embedding vectors]",
401
+ inputTokens: tokens?.inputTokens || 0,
402
+ outputTokens: 0,
403
+ durationMs,
404
+ status: "success",
405
+ streaming: false
406
+ });
407
+ return response;
408
+ } catch (error) {
409
+ const durationMs = Date.now() - startTime;
410
+ captureError({
411
+ provider: PROVIDER_NAME,
412
+ model: request.model || "unknown",
413
+ input: request.input,
414
+ error: error instanceof Error ? error : new Error(String(error)),
415
+ durationMs,
416
+ streaming: false
417
+ });
418
+ throw error;
419
+ }
420
+ };
421
+ }
422
+ function isAsyncIterable(value) {
423
+ return value != null && typeof value[Symbol.asyncIterator] === "function";
424
+ }
425
+ async function* wrapStream(stream, request, startTime) {
426
+ const chunks = [];
427
+ let tokens = null;
428
+ let error = null;
429
+ try {
430
+ for await (const chunk of stream) {
431
+ const content = extractStreamChunkContent(chunk);
432
+ if (content) {
433
+ chunks.push(content);
434
+ }
435
+ const chunkTokens = extractStreamChunkTokens(chunk);
436
+ if (chunkTokens) {
437
+ tokens = chunkTokens;
438
+ }
439
+ yield chunk;
440
+ }
441
+ } catch (err) {
442
+ error = err instanceof Error ? err : new Error(String(err));
443
+ throw err;
444
+ } finally {
445
+ const durationMs = Date.now() - startTime;
446
+ const output = chunks.join("");
447
+ if (error) {
448
+ captureError({
449
+ provider: PROVIDER_NAME,
450
+ model: request.model || "unknown",
451
+ input: request.messages,
452
+ error,
453
+ durationMs,
454
+ streaming: true
455
+ });
456
+ } else {
457
+ captureTrace({
458
+ provider: PROVIDER_NAME,
459
+ model: request.model || "unknown",
460
+ input: request.messages,
461
+ output,
462
+ inputTokens: tokens?.inputTokens || 0,
463
+ outputTokens: tokens?.outputTokens || 0,
464
+ durationMs,
465
+ status: "success",
466
+ streaming: true
467
+ });
468
468
  }
469
469
  }
470
- if (result.model && !result.provider) {
471
- if (result.model.startsWith("gpt") || result.model.startsWith("o1")) {
472
- result.provider = "openai";
473
- } else if (result.model.startsWith("claude")) {
474
- result.provider = "anthropic";
475
- } else if (result.model.startsWith("gemini")) {
476
- result.provider = "gemini";
477
- } else if (result.model.startsWith("anthropic.") || result.model.startsWith("amazon.") || result.model.startsWith("meta.") || result.model.startsWith("cohere.") || result.model.startsWith("mistral.") || result.model.includes(":")) {
478
- result.provider = "bedrock";
470
+ }
471
+ function extractChatCompletion(response) {
472
+ const model = safeExtract(() => getNestedValue(response, "model"), null);
473
+ const output = safeExtract(
474
+ () => getNestedValue(response, "choices.0.message.content"),
475
+ null
476
+ );
477
+ const tokens = extractTokens(response);
478
+ return { model, output, tokens };
479
+ }
480
+ function extractLegacyCompletion(response) {
481
+ const model = safeExtract(() => getNestedValue(response, "model"), null);
482
+ const output = safeExtract(
483
+ () => getNestedValue(response, "choices.0.text"),
484
+ null
485
+ );
486
+ const tokens = extractTokens(response);
487
+ return { model, output, tokens };
488
+ }
489
+ function extractTokens(response) {
490
+ try {
491
+ const usage = getNestedValue(response, "usage");
492
+ if (!usage || typeof usage !== "object") return null;
493
+ const u = usage;
494
+ const promptTokens = u.prompt_tokens;
495
+ const completionTokens = u.completion_tokens;
496
+ const totalTokens = u.total_tokens;
497
+ if (!isValidNumber(promptTokens) && !isValidNumber(completionTokens)) {
498
+ return null;
479
499
  }
500
+ return {
501
+ inputTokens: isValidNumber(promptTokens) ? promptTokens : 0,
502
+ outputTokens: isValidNumber(completionTokens) ? completionTokens : 0,
503
+ totalTokens: isValidNumber(totalTokens) ? totalTokens : 0
504
+ };
505
+ } catch {
506
+ return null;
480
507
  }
481
- return result;
482
508
  }
483
- function parseBedrockResponse(body) {
484
- if (!body || typeof body !== "object") {
485
- return {};
509
+ function extractEmbeddingTokens(response) {
510
+ try {
511
+ const usage = getNestedValue(response, "usage");
512
+ if (!usage || typeof usage !== "object") return null;
513
+ const u = usage;
514
+ const promptTokens = u.prompt_tokens;
515
+ const totalTokens = u.total_tokens;
516
+ return {
517
+ inputTokens: isValidNumber(promptTokens) ? promptTokens : 0,
518
+ outputTokens: 0,
519
+ totalTokens: isValidNumber(totalTokens) ? totalTokens : 0
520
+ };
521
+ } catch {
522
+ return null;
486
523
  }
487
- const res = body;
488
- const result = { provider: "bedrock" };
489
- if ("usage" in res && typeof res.usage === "object" && res.usage !== null) {
490
- const usage = res.usage;
491
- result.inputTokens = usage.input_tokens;
492
- result.outputTokens = usage.output_tokens;
524
+ }
525
+ function extractStreamChunkContent(chunk) {
526
+ try {
527
+ return chunk?.choices?.[0]?.delta?.content ?? null;
528
+ } catch {
529
+ return null;
493
530
  }
494
- if ("amazon-bedrock-invocationMetrics" in res) {
495
- const metrics = res["amazon-bedrock-invocationMetrics"];
496
- if (metrics.inputTokenCount) result.inputTokens = metrics.inputTokenCount;
497
- if (metrics.outputTokenCount) result.outputTokens = metrics.outputTokenCount;
531
+ }
532
+ function extractStreamChunkTokens(chunk) {
533
+ try {
534
+ const usage = chunk?.usage;
535
+ if (!usage) return null;
536
+ return {
537
+ inputTokens: isValidNumber(usage.prompt_tokens) ? usage.prompt_tokens : 0,
538
+ outputTokens: isValidNumber(usage.completion_tokens) ? usage.completion_tokens : 0,
539
+ totalTokens: 0
540
+ };
541
+ } catch {
542
+ return null;
498
543
  }
499
- return result;
500
544
  }
501
545
 
502
- // src/tracer.ts
503
- var DEFAULT_ENDPOINT = "https://api.lelemon.dev";
504
- var globalConfig = {};
505
- var globalTransport = null;
506
- function init(config = {}) {
507
- globalConfig = config;
508
- globalTransport = createTransport(config);
546
+ // src/providers/anthropic.ts
547
+ var PROVIDER_NAME2 = "anthropic";
548
+ function canHandle2(client) {
549
+ if (!client || typeof client !== "object") return false;
550
+ const constructorName = client.constructor?.name;
551
+ if (constructorName === "Anthropic") return true;
552
+ const c = client;
553
+ return !!(c.messages && typeof c.messages === "object");
509
554
  }
510
- function trace(options) {
511
- const transport = getTransport();
512
- const debug = globalConfig.debug ?? false;
513
- const disabled = globalConfig.disabled ?? !transport.isEnabled();
514
- return new Trace(options, transport, debug, disabled);
555
+ function wrapMessagesCreate(originalFn) {
556
+ return async function wrappedMessagesCreate(...args) {
557
+ const startTime = Date.now();
558
+ const request = args[0] || {};
559
+ const isStreaming = request.stream === true;
560
+ try {
561
+ const response = await originalFn(...args);
562
+ if (isStreaming && isAsyncIterable2(response)) {
563
+ return wrapStream2(response, request, startTime);
564
+ }
565
+ const durationMs = Date.now() - startTime;
566
+ const extracted = extractMessageResponse(response);
567
+ captureTrace({
568
+ provider: PROVIDER_NAME2,
569
+ model: request.model || extracted.model || "unknown",
570
+ input: { system: request.system, messages: request.messages },
571
+ output: extracted.output,
572
+ inputTokens: extracted.tokens?.inputTokens || 0,
573
+ outputTokens: extracted.tokens?.outputTokens || 0,
574
+ durationMs,
575
+ status: "success",
576
+ streaming: false
577
+ });
578
+ return response;
579
+ } catch (error) {
580
+ const durationMs = Date.now() - startTime;
581
+ captureError({
582
+ provider: PROVIDER_NAME2,
583
+ model: request.model || "unknown",
584
+ input: { system: request.system, messages: request.messages },
585
+ error: error instanceof Error ? error : new Error(String(error)),
586
+ durationMs,
587
+ streaming: isStreaming
588
+ });
589
+ throw error;
590
+ }
591
+ };
515
592
  }
516
- async function flush() {
517
- if (globalTransport) {
518
- await globalTransport.flush();
519
- }
593
+ function wrapMessagesStream(originalFn) {
594
+ return function wrappedMessagesStream(...args) {
595
+ const startTime = Date.now();
596
+ const request = args[0] || {};
597
+ try {
598
+ const stream = originalFn(...args);
599
+ if (stream && typeof stream === "object") {
600
+ return wrapAnthropicStream(stream, request, startTime);
601
+ }
602
+ return stream;
603
+ } catch (error) {
604
+ const durationMs = Date.now() - startTime;
605
+ captureError({
606
+ provider: PROVIDER_NAME2,
607
+ model: request.model || "unknown",
608
+ input: { system: request.system, messages: request.messages },
609
+ error: error instanceof Error ? error : new Error(String(error)),
610
+ durationMs,
611
+ streaming: true
612
+ });
613
+ throw error;
614
+ }
615
+ };
520
616
  }
521
- function isEnabled() {
522
- return getTransport().isEnabled();
617
+ function isAsyncIterable2(value) {
618
+ return value != null && typeof value[Symbol.asyncIterator] === "function";
523
619
  }
524
- var Trace = class {
525
- constructor(options, transport, debug, disabled) {
526
- __publicField(this, "id", null);
527
- __publicField(this, "idPromise");
528
- __publicField(this, "transport");
529
- __publicField(this, "startTime");
530
- __publicField(this, "debug");
531
- __publicField(this, "disabled");
532
- __publicField(this, "completed", false);
533
- __publicField(this, "llmCalls", []);
534
- this.transport = transport;
535
- this.startTime = Date.now();
536
- this.debug = debug;
537
- this.disabled = disabled;
538
- if (disabled) {
539
- this.idPromise = Promise.resolve(null);
540
- } else {
541
- this.idPromise = transport.enqueueCreate({
542
- name: options.name,
543
- sessionId: options.sessionId,
544
- userId: options.userId,
545
- input: options.input,
546
- metadata: options.metadata,
547
- tags: options.tags
548
- });
549
- this.idPromise.then((id) => {
550
- this.id = id;
551
- });
620
+ function wrapAnthropicStream(stream, request, startTime) {
621
+ const originalStream = stream;
622
+ if (!originalStream[Symbol.asyncIterator]) {
623
+ return stream;
624
+ }
625
+ const chunks = [];
626
+ let inputTokens = 0;
627
+ let outputTokens = 0;
628
+ let model = request.model || "unknown";
629
+ let captured = false;
630
+ const wrappedIterator = async function* () {
631
+ try {
632
+ for await (const event of originalStream) {
633
+ if (event.type === "message_start" && event.message) {
634
+ model = event.message.model || model;
635
+ if (event.message.usage) {
636
+ inputTokens = event.message.usage.input_tokens || 0;
637
+ }
638
+ }
639
+ if (event.type === "content_block_delta" && event.delta?.text) {
640
+ chunks.push(event.delta.text);
641
+ }
642
+ if (event.type === "message_delta" && event.usage) {
643
+ outputTokens = event.usage.output_tokens || 0;
644
+ }
645
+ yield event;
646
+ }
647
+ } catch (error) {
648
+ if (!captured) {
649
+ captured = true;
650
+ const durationMs = Date.now() - startTime;
651
+ captureError({
652
+ provider: PROVIDER_NAME2,
653
+ model,
654
+ input: { system: request.system, messages: request.messages },
655
+ error: error instanceof Error ? error : new Error(String(error)),
656
+ durationMs,
657
+ streaming: true
658
+ });
659
+ }
660
+ throw error;
661
+ } finally {
662
+ if (!captured) {
663
+ captured = true;
664
+ const durationMs = Date.now() - startTime;
665
+ captureTrace({
666
+ provider: PROVIDER_NAME2,
667
+ model,
668
+ input: { system: request.system, messages: request.messages },
669
+ output: chunks.join(""),
670
+ inputTokens,
671
+ outputTokens,
672
+ durationMs,
673
+ status: "success",
674
+ streaming: true
675
+ });
676
+ }
552
677
  }
553
- }
554
- /**
555
- * Log an LLM response for token tracking
556
- * Optional - use if you want per-call token counts
557
- */
558
- log(response) {
559
- if (this.disabled || this.completed) return this;
560
- const parsed = parseResponse(response);
561
- if (parsed.model || parsed.inputTokens || parsed.outputTokens) {
562
- this.llmCalls.push(parsed);
678
+ };
679
+ return new Proxy(stream, {
680
+ get(target, prop, receiver) {
681
+ if (prop === Symbol.asyncIterator) {
682
+ return () => wrappedIterator()[Symbol.asyncIterator]();
683
+ }
684
+ return Reflect.get(target, prop, receiver);
563
685
  }
564
- return this;
565
- }
566
- /**
567
- * Complete trace successfully (fire-and-forget)
568
- *
569
- * @param messages - Full message history (OpenAI/Anthropic format)
570
- */
571
- success(messages) {
572
- if (this.completed || this.disabled) return;
573
- this.completed = true;
574
- const durationMs = Date.now() - this.startTime;
575
- const parsed = parseMessages(messages);
576
- const allLLMCalls = [...this.llmCalls, ...parsed.llmCalls];
577
- const { totalInputTokens, totalOutputTokens, models } = this.aggregateCalls(allLLMCalls);
578
- this.idPromise.then((id) => {
579
- if (!id) return;
580
- this.transport.enqueueComplete(id, {
581
- status: "completed",
582
- output: parsed.output,
583
- systemPrompt: parsed.systemPrompt,
584
- llmCalls: allLLMCalls,
585
- toolCalls: parsed.toolCalls,
586
- models,
587
- totalInputTokens,
588
- totalOutputTokens,
589
- durationMs
686
+ });
687
+ }
688
+ async function* wrapStream2(stream, request, startTime) {
689
+ const chunks = [];
690
+ let inputTokens = 0;
691
+ let outputTokens = 0;
692
+ let model = request.model || "unknown";
693
+ let error = null;
694
+ try {
695
+ for await (const event of stream) {
696
+ if (event.type === "message_start" && event.message) {
697
+ model = event.message.model || model;
698
+ if (event.message.usage) {
699
+ inputTokens = event.message.usage.input_tokens || 0;
700
+ }
701
+ }
702
+ if (event.type === "content_block_delta" && event.delta?.text) {
703
+ chunks.push(event.delta.text);
704
+ }
705
+ if (event.type === "message_delta" && event.usage) {
706
+ outputTokens = event.usage.output_tokens || 0;
707
+ }
708
+ yield event;
709
+ }
710
+ } catch (err) {
711
+ error = err instanceof Error ? err : new Error(String(err));
712
+ throw err;
713
+ } finally {
714
+ const durationMs = Date.now() - startTime;
715
+ if (error) {
716
+ captureError({
717
+ provider: PROVIDER_NAME2,
718
+ model,
719
+ input: { system: request.system, messages: request.messages },
720
+ error,
721
+ durationMs,
722
+ streaming: true
590
723
  });
591
- });
592
- }
593
- /**
594
- * Complete trace with error (fire-and-forget)
595
- *
596
- * @param error - The error that occurred
597
- * @param messages - Optional message history up to failure
598
- */
599
- error(error, messages) {
600
- if (this.completed || this.disabled) return;
601
- this.completed = true;
602
- const durationMs = Date.now() - this.startTime;
603
- const parsed = messages ? parseMessages(messages) : null;
604
- const errorObj = error instanceof Error ? error : new Error(String(error));
605
- const allLLMCalls = parsed ? [...this.llmCalls, ...parsed.llmCalls] : this.llmCalls;
606
- const { totalInputTokens, totalOutputTokens, models } = this.aggregateCalls(allLLMCalls);
607
- this.idPromise.then((id) => {
608
- if (!id) return;
609
- this.transport.enqueueComplete(id, {
610
- status: "error",
611
- errorMessage: errorObj.message,
612
- errorStack: errorObj.stack,
613
- output: parsed?.output,
614
- systemPrompt: parsed?.systemPrompt,
615
- llmCalls: allLLMCalls.length > 0 ? allLLMCalls : void 0,
616
- toolCalls: parsed?.toolCalls,
617
- models: models.length > 0 ? models : void 0,
618
- totalInputTokens,
619
- totalOutputTokens,
620
- durationMs
724
+ } else {
725
+ captureTrace({
726
+ provider: PROVIDER_NAME2,
727
+ model,
728
+ input: { system: request.system, messages: request.messages },
729
+ output: chunks.join(""),
730
+ inputTokens,
731
+ outputTokens,
732
+ durationMs,
733
+ status: "success",
734
+ streaming: true
621
735
  });
622
- });
623
- }
624
- /**
625
- * Get the trace ID (may be null if not yet created or failed)
626
- */
627
- getId() {
628
- return this.id;
629
- }
630
- /**
631
- * Wait for trace ID to be available
632
- */
633
- async waitForId() {
634
- return this.idPromise;
736
+ }
635
737
  }
636
- // ─────────────────────────────────────────────────────────────
637
- // Private methods
638
- // ─────────────────────────────────────────────────────────────
639
- aggregateCalls(calls) {
640
- let totalInputTokens = 0;
641
- let totalOutputTokens = 0;
642
- const modelSet = /* @__PURE__ */ new Set();
643
- for (const call of calls) {
644
- if (call.inputTokens) totalInputTokens += call.inputTokens;
645
- if (call.outputTokens) totalOutputTokens += call.outputTokens;
646
- if (call.model) modelSet.add(call.model);
738
+ }
739
+ function extractMessageResponse(response) {
740
+ const model = safeExtract(() => response.model ?? null, null);
741
+ const output = safeExtract(() => {
742
+ if (!response.content || !Array.isArray(response.content)) return null;
743
+ const textBlocks = response.content.filter((block) => block.type === "text" && block.text).map((block) => block.text);
744
+ return textBlocks.join("") || null;
745
+ }, null);
746
+ const tokens = extractTokens2(response);
747
+ return { model, output, tokens };
748
+ }
749
+ function extractTokens2(response) {
750
+ try {
751
+ const usage = response.usage;
752
+ if (!usage) return null;
753
+ const inputTokens = usage.input_tokens;
754
+ const outputTokens = usage.output_tokens;
755
+ if (!isValidNumber(inputTokens) && !isValidNumber(outputTokens)) {
756
+ return null;
647
757
  }
648
758
  return {
649
- totalInputTokens,
650
- totalOutputTokens,
651
- models: Array.from(modelSet)
759
+ inputTokens: isValidNumber(inputTokens) ? inputTokens : 0,
760
+ outputTokens: isValidNumber(outputTokens) ? outputTokens : 0,
761
+ totalTokens: (isValidNumber(inputTokens) ? inputTokens : 0) + (isValidNumber(outputTokens) ? outputTokens : 0)
652
762
  };
763
+ } catch {
764
+ return null;
653
765
  }
654
- };
655
- function getTransport() {
656
- if (!globalTransport) {
657
- globalTransport = createTransport(globalConfig);
658
- }
659
- return globalTransport;
660
766
  }
661
- function createTransport(config) {
662
- const apiKey = config.apiKey ?? getEnvVar("LELEMON_API_KEY");
663
- if (!apiKey && !config.disabled) {
664
- console.warn(
665
- "[Lelemon] No API key provided. Set apiKey in config or LELEMON_API_KEY env var. Tracing disabled."
666
- );
767
+
768
+ // src/observe.ts
769
+ function observe(client, options) {
770
+ if (options) {
771
+ setGlobalContext(options);
772
+ }
773
+ const config = getConfig();
774
+ if (config.disabled) {
775
+ return client;
776
+ }
777
+ if (canHandle(client)) {
778
+ if (config.debug) {
779
+ console.log("[Lelemon] Wrapping OpenAI client");
780
+ }
781
+ return wrapOpenAI(client);
667
782
  }
668
- return new Transport({
669
- apiKey: apiKey ?? "",
670
- endpoint: config.endpoint ?? DEFAULT_ENDPOINT,
671
- debug: config.debug ?? false,
672
- disabled: config.disabled ?? !apiKey,
673
- batchSize: config.batchSize,
674
- flushIntervalMs: config.flushIntervalMs,
675
- requestTimeoutMs: config.requestTimeoutMs
783
+ if (canHandle2(client)) {
784
+ if (config.debug) {
785
+ console.log("[Lelemon] Wrapping Anthropic client");
786
+ }
787
+ return wrapAnthropic(client);
788
+ }
789
+ console.warn(
790
+ "[Lelemon] Unknown client type. Tracing not enabled. Supported: OpenAI, Anthropic"
791
+ );
792
+ return client;
793
+ }
794
+ function wrapOpenAI(client) {
795
+ const typed = client;
796
+ return new Proxy(typed, {
797
+ get(target, prop, receiver) {
798
+ const value = Reflect.get(target, prop, receiver);
799
+ if (prop === "chat" && value && typeof value === "object") {
800
+ return wrapOpenAIChat(value);
801
+ }
802
+ if (prop === "completions" && value && typeof value === "object") {
803
+ return wrapOpenAICompletions(value);
804
+ }
805
+ if (prop === "embeddings" && value && typeof value === "object") {
806
+ return wrapOpenAIEmbeddings(value);
807
+ }
808
+ return value;
809
+ }
676
810
  });
677
811
  }
678
- function getEnvVar(name) {
679
- if (typeof process !== "undefined" && process.env) {
680
- return process.env[name];
681
- }
682
- return void 0;
812
+ function wrapOpenAIChat(chat) {
813
+ return new Proxy(chat, {
814
+ get(target, prop, receiver) {
815
+ const value = Reflect.get(target, prop, receiver);
816
+ if (prop === "completions" && value && typeof value === "object") {
817
+ return wrapOpenAIChatCompletions(value);
818
+ }
819
+ return value;
820
+ }
821
+ });
822
+ }
823
+ function wrapOpenAIChatCompletions(completions) {
824
+ return new Proxy(completions, {
825
+ get(target, prop, receiver) {
826
+ const value = Reflect.get(target, prop, receiver);
827
+ if (prop === "create" && typeof value === "function") {
828
+ return wrapChatCreate(value.bind(target));
829
+ }
830
+ return value;
831
+ }
832
+ });
833
+ }
834
+ function wrapOpenAICompletions(completions) {
835
+ return new Proxy(completions, {
836
+ get(target, prop, receiver) {
837
+ const value = Reflect.get(target, prop, receiver);
838
+ if (prop === "create" && typeof value === "function") {
839
+ return wrapCompletionCreate(value.bind(target));
840
+ }
841
+ return value;
842
+ }
843
+ });
844
+ }
845
+ function wrapOpenAIEmbeddings(embeddings) {
846
+ return new Proxy(embeddings, {
847
+ get(target, prop, receiver) {
848
+ const value = Reflect.get(target, prop, receiver);
849
+ if (prop === "create" && typeof value === "function") {
850
+ return wrapEmbeddingsCreate(value.bind(target));
851
+ }
852
+ return value;
853
+ }
854
+ });
855
+ }
856
+ function wrapAnthropic(client) {
857
+ const typed = client;
858
+ return new Proxy(typed, {
859
+ get(target, prop, receiver) {
860
+ const value = Reflect.get(target, prop, receiver);
861
+ if (prop === "messages" && value && typeof value === "object") {
862
+ return wrapAnthropicMessages(value);
863
+ }
864
+ return value;
865
+ }
866
+ });
867
+ }
868
+ function wrapAnthropicMessages(messages) {
869
+ return new Proxy(messages, {
870
+ get(target, prop, receiver) {
871
+ const value = Reflect.get(target, prop, receiver);
872
+ if (prop === "create" && typeof value === "function") {
873
+ return wrapMessagesCreate(value.bind(target));
874
+ }
875
+ if (prop === "stream" && typeof value === "function") {
876
+ return wrapMessagesStream(value.bind(target));
877
+ }
878
+ return value;
879
+ }
880
+ });
881
+ }
882
+ function createObserve(defaultOptions) {
883
+ return function scopedObserve(client, options) {
884
+ return observe(client, { ...defaultOptions, ...options });
885
+ };
683
886
  }
684
887
 
685
- exports.Trace = Trace;
888
+ exports.createObserve = createObserve;
686
889
  exports.flush = flush;
687
890
  exports.init = init;
688
891
  exports.isEnabled = isEnabled;
689
- exports.parseBedrockResponse = parseBedrockResponse;
690
- exports.parseMessages = parseMessages;
691
- exports.parseResponse = parseResponse;
692
- exports.trace = trace;
892
+ exports.observe = observe;
693
893
  //# sourceMappingURL=index.js.map
694
894
  //# sourceMappingURL=index.js.map