@lelemondev/sdk 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -3,7 +3,7 @@ var __defProp = Object.defineProperty;
3
3
  var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
4
4
  var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
5
5
 
6
- // src/transport.ts
6
+ // src/core/transport.ts
7
7
  var DEFAULT_BATCH_SIZE = 10;
8
8
  var DEFAULT_FLUSH_INTERVAL_MS = 1e3;
9
9
  var DEFAULT_REQUEST_TIMEOUT_MS = 1e4;
@@ -13,8 +13,6 @@ var Transport = class {
13
13
  __publicField(this, "queue", []);
14
14
  __publicField(this, "flushPromise", null);
15
15
  __publicField(this, "flushTimer", null);
16
- __publicField(this, "pendingResolvers", /* @__PURE__ */ new Map());
17
- __publicField(this, "idCounter", 0);
18
16
  this.config = {
19
17
  apiKey: config.apiKey,
20
18
  endpoint: config.endpoint,
@@ -32,30 +30,21 @@ var Transport = class {
32
30
  return !this.config.disabled && !!this.config.apiKey;
33
31
  }
34
32
  /**
35
- * Enqueue trace creation (returns promise that resolves to trace ID)
33
+ * Enqueue a trace for sending
34
+ * Fire-and-forget - never blocks
36
35
  */
37
- enqueueCreate(data) {
38
- if (this.config.disabled) {
39
- return Promise.resolve(null);
40
- }
41
- const tempId = this.generateTempId();
42
- return new Promise((resolve) => {
43
- this.pendingResolvers.set(tempId, resolve);
44
- this.enqueue({ type: "create", tempId, data });
45
- });
46
- }
47
- /**
48
- * Enqueue trace completion (fire-and-forget)
49
- */
50
- enqueueComplete(traceId, data) {
51
- if (this.config.disabled || !traceId) {
52
- return;
36
+ enqueue(trace) {
37
+ if (this.config.disabled) return;
38
+ this.queue.push(trace);
39
+ if (this.queue.length >= this.config.batchSize) {
40
+ this.flush();
41
+ } else {
42
+ this.scheduleFlush();
53
43
  }
54
- this.enqueue({ type: "complete", traceId, data });
55
44
  }
56
45
  /**
57
- * Flush all pending items
58
- * Safe to call multiple times (deduplicates)
46
+ * Flush all pending traces
47
+ * Safe to call multiple times
59
48
  */
60
49
  async flush() {
61
50
  if (this.flushPromise) {
@@ -73,29 +62,16 @@ var Transport = class {
73
62
  return this.flushPromise;
74
63
  }
75
64
  /**
76
- * Get pending item count (for testing/debugging)
65
+ * Get pending count (for debugging)
77
66
  */
78
67
  getPendingCount() {
79
68
  return this.queue.length;
80
69
  }
81
70
  // ─────────────────────────────────────────────────────────────
82
- // Private methods
71
+ // Private Methods
83
72
  // ─────────────────────────────────────────────────────────────
84
- generateTempId() {
85
- return `temp_${++this.idCounter}_${Date.now()}`;
86
- }
87
- enqueue(item) {
88
- this.queue.push(item);
89
- if (this.queue.length >= this.config.batchSize) {
90
- this.flush();
91
- } else {
92
- this.scheduleFlush();
93
- }
94
- }
95
73
  scheduleFlush() {
96
- if (this.flushTimer !== null) {
97
- return;
98
- }
74
+ if (this.flushTimer !== null) return;
99
75
  this.flushTimer = setTimeout(() => {
100
76
  this.flushTimer = null;
101
77
  this.flush();
@@ -108,53 +84,12 @@ var Transport = class {
108
84
  }
109
85
  }
110
86
  async sendBatch(items) {
111
- const payload = {
112
- creates: [],
113
- completes: []
114
- };
115
- for (const item of items) {
116
- if (item.type === "create") {
117
- payload.creates.push({ tempId: item.tempId, data: item.data });
118
- } else {
119
- payload.completes.push({ traceId: item.traceId, data: item.data });
120
- }
121
- }
122
- if (payload.creates.length === 0 && payload.completes.length === 0) {
123
- return;
124
- }
125
- this.log("Sending batch", {
126
- creates: payload.creates.length,
127
- completes: payload.completes.length
128
- });
87
+ if (items.length === 0) return;
88
+ this.log(`Sending batch of ${items.length} traces`);
129
89
  try {
130
- const response = await this.request(
131
- "POST",
132
- "/api/v1/traces/batch",
133
- payload
134
- );
135
- if (response.created) {
136
- for (const [tempId, realId] of Object.entries(response.created)) {
137
- const resolver = this.pendingResolvers.get(tempId);
138
- if (resolver) {
139
- resolver(realId);
140
- this.pendingResolvers.delete(tempId);
141
- }
142
- }
143
- }
144
- if (response.errors?.length && this.config.debug) {
145
- console.warn("[Lelemon] Batch errors:", response.errors);
146
- }
90
+ await this.request("POST", "/api/v1/traces/batch", { traces: items });
147
91
  } catch (error) {
148
- for (const item of items) {
149
- if (item.type === "create") {
150
- const resolver = this.pendingResolvers.get(item.tempId);
151
- if (resolver) {
152
- resolver(null);
153
- this.pendingResolvers.delete(item.tempId);
154
- }
155
- }
156
- }
157
- this.log("Batch failed", error);
92
+ this.log("Batch send failed", error);
158
93
  }
159
94
  }
160
95
  async request(method, path, body) {
@@ -199,487 +134,755 @@ var Transport = class {
199
134
  }
200
135
  };
201
136
 
202
- // src/parser.ts
203
- function isOpenAIFormat(messages) {
204
- if (!messages.length) return false;
205
- const first = messages[0];
206
- return typeof first === "object" && first !== null && "role" in first && ["system", "user", "assistant", "tool"].includes(first.role);
207
- }
208
- function isAnthropicFormat(messages) {
209
- if (!messages.length) return false;
210
- const first = messages[0];
211
- return typeof first === "object" && first !== null && "role" in first && ["user", "assistant"].includes(first.role) && (typeof first.content === "string" || Array.isArray(first.content));
212
- }
213
- function isGeminiFormat(messages) {
214
- if (!messages.length) return false;
215
- const first = messages[0];
216
- return typeof first === "object" && first !== null && "role" in first && ["user", "model"].includes(first.role) && "parts" in first && Array.isArray(first.parts);
217
- }
218
- function parseOpenAI(messages) {
219
- const result = {
220
- llmCalls: [],
221
- toolCalls: [],
222
- totalInputTokens: 0,
223
- totalOutputTokens: 0,
224
- models: [],
225
- provider: "openai"
226
- };
227
- for (const msg of messages) {
228
- if (msg.role === "system") {
229
- result.systemPrompt = msg.content ?? void 0;
230
- } else if (msg.role === "user" && !result.userInput) {
231
- result.userInput = msg.content ?? void 0;
232
- } else if (msg.role === "assistant") {
233
- const llmCall = {
234
- provider: "openai",
235
- output: msg.content
236
- };
237
- if (msg.tool_calls && msg.tool_calls.length > 0) {
238
- llmCall.toolCalls = msg.tool_calls.map((tc) => ({
239
- name: tc.function.name,
240
- input: safeParseJSON(tc.function.arguments)
241
- }));
242
- for (const tc of msg.tool_calls) {
243
- result.toolCalls.push({
244
- name: tc.function.name,
245
- input: safeParseJSON(tc.function.arguments)
246
- });
247
- }
248
- }
249
- result.llmCalls.push(llmCall);
250
- if (msg.content) {
251
- result.output = msg.content;
252
- }
253
- } else if (msg.role === "tool") {
254
- const lastToolCall = result.toolCalls[result.toolCalls.length - 1];
255
- if (lastToolCall) {
256
- lastToolCall.output = safeParseJSON(msg.content ?? "");
257
- }
258
- }
137
+ // src/core/config.ts
138
+ var globalConfig = {};
139
+ var globalTransport = null;
140
+ var DEFAULT_ENDPOINT = "https://api.lelemon.dev";
141
+ function init(config = {}) {
142
+ globalConfig = config;
143
+ globalTransport = createTransport(config);
144
+ }
145
+ function getConfig() {
146
+ return globalConfig;
147
+ }
148
+ function isEnabled() {
149
+ return getTransport().isEnabled();
150
+ }
151
+ function getTransport() {
152
+ if (!globalTransport) {
153
+ globalTransport = createTransport(globalConfig);
259
154
  }
260
- return result;
155
+ return globalTransport;
261
156
  }
262
- function parseAnthropic(messages) {
263
- const result = {
264
- llmCalls: [],
265
- toolCalls: [],
266
- totalInputTokens: 0,
267
- totalOutputTokens: 0,
268
- models: [],
269
- provider: "anthropic"
270
- };
271
- for (const msg of messages) {
272
- if (msg.role === "user") {
273
- if (!result.userInput) {
274
- if (typeof msg.content === "string") {
275
- result.userInput = msg.content;
276
- } else if (Array.isArray(msg.content)) {
277
- const textContent = msg.content.find(
278
- (c) => c.type === "text"
279
- );
280
- if (textContent && "text" in textContent) {
281
- result.userInput = textContent.text;
282
- }
283
- }
284
- }
285
- if (Array.isArray(msg.content)) {
286
- for (const block of msg.content) {
287
- if (block.type === "tool_result" && block.tool_use_id) {
288
- const toolCall = result.toolCalls.find(
289
- (tc) => tc.id === block.tool_use_id
290
- );
291
- if (toolCall) {
292
- toolCall.output = block.content;
293
- }
294
- }
295
- }
296
- }
297
- } else if (msg.role === "assistant") {
298
- const llmCall = {
299
- provider: "anthropic"
300
- };
301
- if (typeof msg.content === "string") {
302
- llmCall.output = msg.content;
303
- result.output = msg.content;
304
- } else if (Array.isArray(msg.content)) {
305
- const outputs = [];
306
- const toolCalls = [];
307
- for (const block of msg.content) {
308
- if (block.type === "text" && block.text) {
309
- outputs.push(block.text);
310
- } else if (block.type === "tool_use" && block.name) {
311
- const tc = {
312
- name: block.name,
313
- input: block.input
314
- };
315
- if (block.id) {
316
- tc.id = block.id;
317
- }
318
- toolCalls.push(tc);
319
- result.toolCalls.push(tc);
320
- }
321
- }
322
- if (outputs.length) {
323
- llmCall.output = outputs.join("\n");
324
- result.output = outputs.join("\n");
325
- }
326
- if (toolCalls.length) {
327
- llmCall.toolCalls = toolCalls;
328
- }
329
- }
330
- result.llmCalls.push(llmCall);
331
- }
157
+ async function flush() {
158
+ if (globalTransport) {
159
+ await globalTransport.flush();
332
160
  }
333
- return result;
334
161
  }
335
- function parseGemini(messages) {
336
- const result = {
337
- llmCalls: [],
338
- toolCalls: [],
339
- totalInputTokens: 0,
340
- totalOutputTokens: 0,
341
- models: [],
342
- provider: "gemini"
343
- };
344
- for (const msg of messages) {
345
- if (msg.role === "user") {
346
- if (!result.userInput) {
347
- const textPart = msg.parts.find((p) => p.text);
348
- if (textPart?.text) {
349
- result.userInput = textPart.text;
350
- }
351
- }
352
- for (const part of msg.parts) {
353
- if (part.functionResponse) {
354
- const toolCall = result.toolCalls.find(
355
- (tc) => tc.name === part.functionResponse.name
356
- );
357
- if (toolCall) {
358
- toolCall.output = part.functionResponse.response;
359
- }
360
- }
361
- }
362
- } else if (msg.role === "model") {
363
- const llmCall = {
364
- provider: "gemini"
365
- };
366
- const outputs = [];
367
- const toolCalls = [];
368
- for (const part of msg.parts) {
369
- if (part.text) {
370
- outputs.push(part.text);
371
- } else if (part.functionCall) {
372
- const tc = {
373
- name: part.functionCall.name,
374
- input: part.functionCall.args
375
- };
376
- toolCalls.push(tc);
377
- result.toolCalls.push(tc);
378
- }
379
- }
380
- if (outputs.length) {
381
- llmCall.output = outputs.join("\n");
382
- result.output = outputs.join("\n");
383
- }
384
- if (toolCalls.length) {
385
- llmCall.toolCalls = toolCalls;
162
+ function createTransport(config) {
163
+ const apiKey = config.apiKey ?? getEnvVar("LELEMON_API_KEY");
164
+ if (!apiKey && !config.disabled) {
165
+ console.warn(
166
+ "[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled."
167
+ );
168
+ }
169
+ return new Transport({
170
+ apiKey: apiKey ?? "",
171
+ endpoint: config.endpoint ?? DEFAULT_ENDPOINT,
172
+ debug: config.debug ?? false,
173
+ disabled: config.disabled ?? !apiKey,
174
+ batchSize: config.batchSize,
175
+ flushIntervalMs: config.flushIntervalMs,
176
+ requestTimeoutMs: config.requestTimeoutMs
177
+ });
178
+ }
179
+ function getEnvVar(name) {
180
+ if (typeof process !== "undefined" && process.env) {
181
+ return process.env[name];
182
+ }
183
+ return void 0;
184
+ }
185
+
186
+ // src/providers/base.ts
187
+ function safeExtract(fn, fallback) {
188
+ try {
189
+ return fn() ?? fallback;
190
+ } catch {
191
+ return fallback;
192
+ }
193
+ }
194
+ function getNestedValue(obj, path) {
195
+ try {
196
+ const parts = path.split(".");
197
+ let current = obj;
198
+ for (const part of parts) {
199
+ if (current == null || typeof current !== "object") {
200
+ return void 0;
386
201
  }
387
- result.llmCalls.push(llmCall);
202
+ current = current[part];
388
203
  }
204
+ return current;
205
+ } catch {
206
+ return void 0;
389
207
  }
390
- return result;
391
208
  }
392
- function safeParseJSON(str) {
209
+ function isValidNumber(value) {
210
+ return typeof value === "number" && !isNaN(value) && isFinite(value);
211
+ }
212
+
213
+ // src/core/capture.ts
214
+ var globalContext = {};
215
+ function setGlobalContext(options) {
216
+ globalContext = options;
217
+ }
218
+ function getGlobalContext() {
219
+ return globalContext;
220
+ }
221
+ function captureTrace(params) {
393
222
  try {
394
- return JSON.parse(str);
223
+ const transport = getTransport();
224
+ if (!transport.isEnabled()) return;
225
+ const context = getGlobalContext();
226
+ const request = {
227
+ provider: params.provider,
228
+ model: params.model,
229
+ input: sanitizeInput(params.input),
230
+ output: sanitizeOutput(params.output),
231
+ inputTokens: params.inputTokens,
232
+ outputTokens: params.outputTokens,
233
+ durationMs: params.durationMs,
234
+ status: params.status,
235
+ streaming: params.streaming,
236
+ sessionId: context.sessionId,
237
+ userId: context.userId,
238
+ metadata: { ...context.metadata, ...params.metadata },
239
+ tags: context.tags
240
+ };
241
+ transport.enqueue(request);
395
242
  } catch {
396
- return str;
397
243
  }
398
244
  }
399
- function parseMessages(messages) {
400
- if (!messages) {
401
- return {
402
- llmCalls: [],
403
- toolCalls: [],
404
- totalInputTokens: 0,
405
- totalOutputTokens: 0,
406
- models: [],
407
- provider: "unknown"
245
+ function captureError(params) {
246
+ try {
247
+ const transport = getTransport();
248
+ if (!transport.isEnabled()) return;
249
+ const context = getGlobalContext();
250
+ const request = {
251
+ provider: params.provider,
252
+ model: params.model,
253
+ input: sanitizeInput(params.input),
254
+ output: null,
255
+ inputTokens: 0,
256
+ outputTokens: 0,
257
+ durationMs: params.durationMs,
258
+ status: "error",
259
+ errorMessage: params.error.message,
260
+ errorStack: params.error.stack,
261
+ streaming: params.streaming,
262
+ sessionId: context.sessionId,
263
+ userId: context.userId,
264
+ metadata: { ...context.metadata, ...params.metadata },
265
+ tags: context.tags
408
266
  };
267
+ transport.enqueue(request);
268
+ } catch {
409
269
  }
410
- if (!Array.isArray(messages)) {
411
- return {
412
- llmCalls: [],
413
- toolCalls: [],
414
- totalInputTokens: 0,
415
- totalOutputTokens: 0,
416
- models: [],
417
- provider: "unknown",
418
- output: typeof messages === "string" ? messages : JSON.stringify(messages)
419
- };
270
+ }
271
+ var MAX_STRING_LENGTH = 1e5;
272
+ var SENSITIVE_KEYS = ["api_key", "apikey", "password", "secret", "token", "authorization"];
273
+ function sanitizeInput(input) {
274
+ return sanitize(input, 0);
275
+ }
276
+ function sanitizeOutput(output) {
277
+ return sanitize(output, 0);
278
+ }
279
+ function sanitize(value, depth) {
280
+ if (depth > 10) return "[max depth exceeded]";
281
+ if (value === null || value === void 0) return value;
282
+ if (typeof value === "string") {
283
+ return value.length > MAX_STRING_LENGTH ? value.slice(0, MAX_STRING_LENGTH) + "...[truncated]" : value;
284
+ }
285
+ if (typeof value === "number" || typeof value === "boolean") {
286
+ return value;
287
+ }
288
+ if (Array.isArray(value)) {
289
+ return value.map((item) => sanitize(item, depth + 1));
290
+ }
291
+ if (typeof value === "object") {
292
+ const sanitized = {};
293
+ for (const [key, val] of Object.entries(value)) {
294
+ if (SENSITIVE_KEYS.some((k) => key.toLowerCase().includes(k))) {
295
+ sanitized[key] = "[REDACTED]";
296
+ } else {
297
+ sanitized[key] = sanitize(val, depth + 1);
298
+ }
299
+ }
300
+ return sanitized;
420
301
  }
421
- if (isGeminiFormat(messages)) {
422
- return parseGemini(messages);
423
- } else if (isOpenAIFormat(messages)) {
424
- return parseOpenAI(messages);
425
- } else if (isAnthropicFormat(messages)) {
426
- return parseAnthropic(messages);
427
- }
428
- return {
429
- llmCalls: [],
430
- toolCalls: [],
431
- totalInputTokens: 0,
432
- totalOutputTokens: 0,
433
- models: [],
434
- provider: "unknown"
302
+ return String(value);
303
+ }
304
+
305
+ // src/providers/openai.ts
306
+ var PROVIDER_NAME = "openai";
307
+ function canHandle(client) {
308
+ if (!client || typeof client !== "object") return false;
309
+ const constructorName = client.constructor?.name;
310
+ if (constructorName === "OpenAI") return true;
311
+ const c = client;
312
+ return !!(c.chat && c.completions);
313
+ }
314
+ function wrapChatCreate(originalFn) {
315
+ return async function wrappedChatCreate(...args) {
316
+ const startTime = Date.now();
317
+ const request = args[0] || {};
318
+ const isStreaming = request.stream === true;
319
+ try {
320
+ const response = await originalFn(...args);
321
+ if (isStreaming && isAsyncIterable(response)) {
322
+ return wrapStream(response, request, startTime);
323
+ }
324
+ const durationMs = Date.now() - startTime;
325
+ const extracted = extractChatCompletion(response);
326
+ captureTrace({
327
+ provider: PROVIDER_NAME,
328
+ model: request.model || extracted.model || "unknown",
329
+ input: request.messages,
330
+ output: extracted.output,
331
+ inputTokens: extracted.tokens?.inputTokens || 0,
332
+ outputTokens: extracted.tokens?.outputTokens || 0,
333
+ durationMs,
334
+ status: "success",
335
+ streaming: false
336
+ });
337
+ return response;
338
+ } catch (error) {
339
+ const durationMs = Date.now() - startTime;
340
+ captureError({
341
+ provider: PROVIDER_NAME,
342
+ model: request.model || "unknown",
343
+ input: request.messages,
344
+ error: error instanceof Error ? error : new Error(String(error)),
345
+ durationMs,
346
+ streaming: isStreaming
347
+ });
348
+ throw error;
349
+ }
435
350
  };
436
351
  }
437
- function parseResponse(response) {
438
- if (!response || typeof response !== "object") {
439
- return {};
440
- }
441
- const res = response;
442
- const result = {};
443
- if ("model" in res) {
444
- result.model = res.model;
445
- }
446
- if ("modelId" in res) {
447
- result.model = res.modelId;
448
- }
449
- if ("usage" in res && typeof res.usage === "object" && res.usage !== null) {
450
- const usage = res.usage;
451
- result.inputTokens = usage.prompt_tokens ?? usage.input_tokens;
452
- result.outputTokens = usage.completion_tokens ?? usage.output_tokens;
453
- }
454
- if ("$metadata" in res && "usage" in res) {
455
- result.provider = "bedrock";
456
- }
457
- if ("anthropic_version" in res || "amazon-bedrock-invocationMetrics" in res) {
458
- result.provider = "bedrock";
459
- }
460
- if ("candidates" in res || "promptFeedback" in res) {
461
- result.provider = "gemini";
462
- if ("usageMetadata" in res && typeof res.usageMetadata === "object" && res.usageMetadata !== null) {
463
- const usage = res.usageMetadata;
464
- result.inputTokens = usage.promptTokenCount;
465
- result.outputTokens = usage.candidatesTokenCount;
352
+ function wrapCompletionCreate(originalFn) {
353
+ return async function wrappedCompletionCreate(...args) {
354
+ const startTime = Date.now();
355
+ const request = args[0] || {};
356
+ try {
357
+ const response = await originalFn(...args);
358
+ const durationMs = Date.now() - startTime;
359
+ const extracted = extractLegacyCompletion(response);
360
+ captureTrace({
361
+ provider: PROVIDER_NAME,
362
+ model: request.model || extracted.model || "unknown",
363
+ input: request.prompt,
364
+ output: extracted.output,
365
+ inputTokens: extracted.tokens?.inputTokens || 0,
366
+ outputTokens: extracted.tokens?.outputTokens || 0,
367
+ durationMs,
368
+ status: "success",
369
+ streaming: false
370
+ });
371
+ return response;
372
+ } catch (error) {
373
+ const durationMs = Date.now() - startTime;
374
+ captureError({
375
+ provider: PROVIDER_NAME,
376
+ model: request.model || "unknown",
377
+ input: request.prompt,
378
+ error: error instanceof Error ? error : new Error(String(error)),
379
+ durationMs,
380
+ streaming: false
381
+ });
382
+ throw error;
383
+ }
384
+ };
385
+ }
386
+ function wrapEmbeddingsCreate(originalFn) {
387
+ return async function wrappedEmbeddingsCreate(...args) {
388
+ const startTime = Date.now();
389
+ const request = args[0] || {};
390
+ try {
391
+ const response = await originalFn(...args);
392
+ const durationMs = Date.now() - startTime;
393
+ const tokens = extractEmbeddingTokens(response);
394
+ captureTrace({
395
+ provider: PROVIDER_NAME,
396
+ model: request.model || "unknown",
397
+ input: request.input,
398
+ output: "[embedding vectors]",
399
+ inputTokens: tokens?.inputTokens || 0,
400
+ outputTokens: 0,
401
+ durationMs,
402
+ status: "success",
403
+ streaming: false
404
+ });
405
+ return response;
406
+ } catch (error) {
407
+ const durationMs = Date.now() - startTime;
408
+ captureError({
409
+ provider: PROVIDER_NAME,
410
+ model: request.model || "unknown",
411
+ input: request.input,
412
+ error: error instanceof Error ? error : new Error(String(error)),
413
+ durationMs,
414
+ streaming: false
415
+ });
416
+ throw error;
417
+ }
418
+ };
419
+ }
420
+ function isAsyncIterable(value) {
421
+ return value != null && typeof value[Symbol.asyncIterator] === "function";
422
+ }
423
+ async function* wrapStream(stream, request, startTime) {
424
+ const chunks = [];
425
+ let tokens = null;
426
+ let error = null;
427
+ try {
428
+ for await (const chunk of stream) {
429
+ const content = extractStreamChunkContent(chunk);
430
+ if (content) {
431
+ chunks.push(content);
432
+ }
433
+ const chunkTokens = extractStreamChunkTokens(chunk);
434
+ if (chunkTokens) {
435
+ tokens = chunkTokens;
436
+ }
437
+ yield chunk;
438
+ }
439
+ } catch (err) {
440
+ error = err instanceof Error ? err : new Error(String(err));
441
+ throw err;
442
+ } finally {
443
+ const durationMs = Date.now() - startTime;
444
+ const output = chunks.join("");
445
+ if (error) {
446
+ captureError({
447
+ provider: PROVIDER_NAME,
448
+ model: request.model || "unknown",
449
+ input: request.messages,
450
+ error,
451
+ durationMs,
452
+ streaming: true
453
+ });
454
+ } else {
455
+ captureTrace({
456
+ provider: PROVIDER_NAME,
457
+ model: request.model || "unknown",
458
+ input: request.messages,
459
+ output,
460
+ inputTokens: tokens?.inputTokens || 0,
461
+ outputTokens: tokens?.outputTokens || 0,
462
+ durationMs,
463
+ status: "success",
464
+ streaming: true
465
+ });
466
466
  }
467
467
  }
468
- if (result.model && !result.provider) {
469
- if (result.model.startsWith("gpt") || result.model.startsWith("o1")) {
470
- result.provider = "openai";
471
- } else if (result.model.startsWith("claude")) {
472
- result.provider = "anthropic";
473
- } else if (result.model.startsWith("gemini")) {
474
- result.provider = "gemini";
475
- } else if (result.model.startsWith("anthropic.") || result.model.startsWith("amazon.") || result.model.startsWith("meta.") || result.model.startsWith("cohere.") || result.model.startsWith("mistral.") || result.model.includes(":")) {
476
- result.provider = "bedrock";
468
+ }
469
+ function extractChatCompletion(response) {
470
+ const model = safeExtract(() => getNestedValue(response, "model"), null);
471
+ const output = safeExtract(
472
+ () => getNestedValue(response, "choices.0.message.content"),
473
+ null
474
+ );
475
+ const tokens = extractTokens(response);
476
+ return { model, output, tokens };
477
+ }
478
+ function extractLegacyCompletion(response) {
479
+ const model = safeExtract(() => getNestedValue(response, "model"), null);
480
+ const output = safeExtract(
481
+ () => getNestedValue(response, "choices.0.text"),
482
+ null
483
+ );
484
+ const tokens = extractTokens(response);
485
+ return { model, output, tokens };
486
+ }
487
+ function extractTokens(response) {
488
+ try {
489
+ const usage = getNestedValue(response, "usage");
490
+ if (!usage || typeof usage !== "object") return null;
491
+ const u = usage;
492
+ const promptTokens = u.prompt_tokens;
493
+ const completionTokens = u.completion_tokens;
494
+ const totalTokens = u.total_tokens;
495
+ if (!isValidNumber(promptTokens) && !isValidNumber(completionTokens)) {
496
+ return null;
477
497
  }
498
+ return {
499
+ inputTokens: isValidNumber(promptTokens) ? promptTokens : 0,
500
+ outputTokens: isValidNumber(completionTokens) ? completionTokens : 0,
501
+ totalTokens: isValidNumber(totalTokens) ? totalTokens : 0
502
+ };
503
+ } catch {
504
+ return null;
478
505
  }
479
- return result;
480
506
  }
481
- function parseBedrockResponse(body) {
482
- if (!body || typeof body !== "object") {
483
- return {};
507
+ function extractEmbeddingTokens(response) {
508
+ try {
509
+ const usage = getNestedValue(response, "usage");
510
+ if (!usage || typeof usage !== "object") return null;
511
+ const u = usage;
512
+ const promptTokens = u.prompt_tokens;
513
+ const totalTokens = u.total_tokens;
514
+ return {
515
+ inputTokens: isValidNumber(promptTokens) ? promptTokens : 0,
516
+ outputTokens: 0,
517
+ totalTokens: isValidNumber(totalTokens) ? totalTokens : 0
518
+ };
519
+ } catch {
520
+ return null;
484
521
  }
485
- const res = body;
486
- const result = { provider: "bedrock" };
487
- if ("usage" in res && typeof res.usage === "object" && res.usage !== null) {
488
- const usage = res.usage;
489
- result.inputTokens = usage.input_tokens;
490
- result.outputTokens = usage.output_tokens;
522
+ }
523
+ function extractStreamChunkContent(chunk) {
524
+ try {
525
+ return chunk?.choices?.[0]?.delta?.content ?? null;
526
+ } catch {
527
+ return null;
491
528
  }
492
- if ("amazon-bedrock-invocationMetrics" in res) {
493
- const metrics = res["amazon-bedrock-invocationMetrics"];
494
- if (metrics.inputTokenCount) result.inputTokens = metrics.inputTokenCount;
495
- if (metrics.outputTokenCount) result.outputTokens = metrics.outputTokenCount;
529
+ }
530
+ function extractStreamChunkTokens(chunk) {
531
+ try {
532
+ const usage = chunk?.usage;
533
+ if (!usage) return null;
534
+ return {
535
+ inputTokens: isValidNumber(usage.prompt_tokens) ? usage.prompt_tokens : 0,
536
+ outputTokens: isValidNumber(usage.completion_tokens) ? usage.completion_tokens : 0,
537
+ totalTokens: 0
538
+ };
539
+ } catch {
540
+ return null;
496
541
  }
497
- return result;
498
542
  }
499
543
 
500
- // src/tracer.ts
501
- var DEFAULT_ENDPOINT = "https://api.lelemon.dev";
502
- var globalConfig = {};
503
- var globalTransport = null;
504
- function init(config = {}) {
505
- globalConfig = config;
506
- globalTransport = createTransport(config);
544
+ // src/providers/anthropic.ts
545
+ var PROVIDER_NAME2 = "anthropic";
546
+ function canHandle2(client) {
547
+ if (!client || typeof client !== "object") return false;
548
+ const constructorName = client.constructor?.name;
549
+ if (constructorName === "Anthropic") return true;
550
+ const c = client;
551
+ return !!(c.messages && typeof c.messages === "object");
507
552
  }
508
- function trace(options) {
509
- const transport = getTransport();
510
- const debug = globalConfig.debug ?? false;
511
- const disabled = globalConfig.disabled ?? !transport.isEnabled();
512
- return new Trace(options, transport, debug, disabled);
553
+ function wrapMessagesCreate(originalFn) {
554
+ return async function wrappedMessagesCreate(...args) {
555
+ const startTime = Date.now();
556
+ const request = args[0] || {};
557
+ const isStreaming = request.stream === true;
558
+ try {
559
+ const response = await originalFn(...args);
560
+ if (isStreaming && isAsyncIterable2(response)) {
561
+ return wrapStream2(response, request, startTime);
562
+ }
563
+ const durationMs = Date.now() - startTime;
564
+ const extracted = extractMessageResponse(response);
565
+ captureTrace({
566
+ provider: PROVIDER_NAME2,
567
+ model: request.model || extracted.model || "unknown",
568
+ input: { system: request.system, messages: request.messages },
569
+ output: extracted.output,
570
+ inputTokens: extracted.tokens?.inputTokens || 0,
571
+ outputTokens: extracted.tokens?.outputTokens || 0,
572
+ durationMs,
573
+ status: "success",
574
+ streaming: false
575
+ });
576
+ return response;
577
+ } catch (error) {
578
+ const durationMs = Date.now() - startTime;
579
+ captureError({
580
+ provider: PROVIDER_NAME2,
581
+ model: request.model || "unknown",
582
+ input: { system: request.system, messages: request.messages },
583
+ error: error instanceof Error ? error : new Error(String(error)),
584
+ durationMs,
585
+ streaming: isStreaming
586
+ });
587
+ throw error;
588
+ }
589
+ };
513
590
  }
514
- async function flush() {
515
- if (globalTransport) {
516
- await globalTransport.flush();
517
- }
591
+ function wrapMessagesStream(originalFn) {
592
+ return function wrappedMessagesStream(...args) {
593
+ const startTime = Date.now();
594
+ const request = args[0] || {};
595
+ try {
596
+ const stream = originalFn(...args);
597
+ if (stream && typeof stream === "object") {
598
+ return wrapAnthropicStream(stream, request, startTime);
599
+ }
600
+ return stream;
601
+ } catch (error) {
602
+ const durationMs = Date.now() - startTime;
603
+ captureError({
604
+ provider: PROVIDER_NAME2,
605
+ model: request.model || "unknown",
606
+ input: { system: request.system, messages: request.messages },
607
+ error: error instanceof Error ? error : new Error(String(error)),
608
+ durationMs,
609
+ streaming: true
610
+ });
611
+ throw error;
612
+ }
613
+ };
518
614
  }
519
- function isEnabled() {
520
- return getTransport().isEnabled();
615
+ function isAsyncIterable2(value) {
616
+ return value != null && typeof value[Symbol.asyncIterator] === "function";
521
617
  }
522
- var Trace = class {
523
- constructor(options, transport, debug, disabled) {
524
- __publicField(this, "id", null);
525
- __publicField(this, "idPromise");
526
- __publicField(this, "transport");
527
- __publicField(this, "startTime");
528
- __publicField(this, "debug");
529
- __publicField(this, "disabled");
530
- __publicField(this, "completed", false);
531
- __publicField(this, "llmCalls", []);
532
- this.transport = transport;
533
- this.startTime = Date.now();
534
- this.debug = debug;
535
- this.disabled = disabled;
536
- if (disabled) {
537
- this.idPromise = Promise.resolve(null);
538
- } else {
539
- this.idPromise = transport.enqueueCreate({
540
- name: options.name,
541
- sessionId: options.sessionId,
542
- userId: options.userId,
543
- input: options.input,
544
- metadata: options.metadata,
545
- tags: options.tags
546
- });
547
- this.idPromise.then((id) => {
548
- this.id = id;
549
- });
618
+ function wrapAnthropicStream(stream, request, startTime) {
619
+ const originalStream = stream;
620
+ if (!originalStream[Symbol.asyncIterator]) {
621
+ return stream;
622
+ }
623
+ const chunks = [];
624
+ let inputTokens = 0;
625
+ let outputTokens = 0;
626
+ let model = request.model || "unknown";
627
+ let captured = false;
628
+ const wrappedIterator = async function* () {
629
+ try {
630
+ for await (const event of originalStream) {
631
+ if (event.type === "message_start" && event.message) {
632
+ model = event.message.model || model;
633
+ if (event.message.usage) {
634
+ inputTokens = event.message.usage.input_tokens || 0;
635
+ }
636
+ }
637
+ if (event.type === "content_block_delta" && event.delta?.text) {
638
+ chunks.push(event.delta.text);
639
+ }
640
+ if (event.type === "message_delta" && event.usage) {
641
+ outputTokens = event.usage.output_tokens || 0;
642
+ }
643
+ yield event;
644
+ }
645
+ } catch (error) {
646
+ if (!captured) {
647
+ captured = true;
648
+ const durationMs = Date.now() - startTime;
649
+ captureError({
650
+ provider: PROVIDER_NAME2,
651
+ model,
652
+ input: { system: request.system, messages: request.messages },
653
+ error: error instanceof Error ? error : new Error(String(error)),
654
+ durationMs,
655
+ streaming: true
656
+ });
657
+ }
658
+ throw error;
659
+ } finally {
660
+ if (!captured) {
661
+ captured = true;
662
+ const durationMs = Date.now() - startTime;
663
+ captureTrace({
664
+ provider: PROVIDER_NAME2,
665
+ model,
666
+ input: { system: request.system, messages: request.messages },
667
+ output: chunks.join(""),
668
+ inputTokens,
669
+ outputTokens,
670
+ durationMs,
671
+ status: "success",
672
+ streaming: true
673
+ });
674
+ }
550
675
  }
551
- }
552
- /**
553
- * Log an LLM response for token tracking
554
- * Optional - use if you want per-call token counts
555
- */
556
- log(response) {
557
- if (this.disabled || this.completed) return this;
558
- const parsed = parseResponse(response);
559
- if (parsed.model || parsed.inputTokens || parsed.outputTokens) {
560
- this.llmCalls.push(parsed);
676
+ };
677
+ return new Proxy(stream, {
678
+ get(target, prop, receiver) {
679
+ if (prop === Symbol.asyncIterator) {
680
+ return () => wrappedIterator()[Symbol.asyncIterator]();
681
+ }
682
+ return Reflect.get(target, prop, receiver);
561
683
  }
562
- return this;
563
- }
564
- /**
565
- * Complete trace successfully (fire-and-forget)
566
- *
567
- * @param messages - Full message history (OpenAI/Anthropic format)
568
- */
569
- success(messages) {
570
- if (this.completed || this.disabled) return;
571
- this.completed = true;
572
- const durationMs = Date.now() - this.startTime;
573
- const parsed = parseMessages(messages);
574
- const allLLMCalls = [...this.llmCalls, ...parsed.llmCalls];
575
- const { totalInputTokens, totalOutputTokens, models } = this.aggregateCalls(allLLMCalls);
576
- this.idPromise.then((id) => {
577
- if (!id) return;
578
- this.transport.enqueueComplete(id, {
579
- status: "completed",
580
- output: parsed.output,
581
- systemPrompt: parsed.systemPrompt,
582
- llmCalls: allLLMCalls,
583
- toolCalls: parsed.toolCalls,
584
- models,
585
- totalInputTokens,
586
- totalOutputTokens,
587
- durationMs
684
+ });
685
+ }
686
+ async function* wrapStream2(stream, request, startTime) {
687
+ const chunks = [];
688
+ let inputTokens = 0;
689
+ let outputTokens = 0;
690
+ let model = request.model || "unknown";
691
+ let error = null;
692
+ try {
693
+ for await (const event of stream) {
694
+ if (event.type === "message_start" && event.message) {
695
+ model = event.message.model || model;
696
+ if (event.message.usage) {
697
+ inputTokens = event.message.usage.input_tokens || 0;
698
+ }
699
+ }
700
+ if (event.type === "content_block_delta" && event.delta?.text) {
701
+ chunks.push(event.delta.text);
702
+ }
703
+ if (event.type === "message_delta" && event.usage) {
704
+ outputTokens = event.usage.output_tokens || 0;
705
+ }
706
+ yield event;
707
+ }
708
+ } catch (err) {
709
+ error = err instanceof Error ? err : new Error(String(err));
710
+ throw err;
711
+ } finally {
712
+ const durationMs = Date.now() - startTime;
713
+ if (error) {
714
+ captureError({
715
+ provider: PROVIDER_NAME2,
716
+ model,
717
+ input: { system: request.system, messages: request.messages },
718
+ error,
719
+ durationMs,
720
+ streaming: true
588
721
  });
589
- });
590
- }
591
- /**
592
- * Complete trace with error (fire-and-forget)
593
- *
594
- * @param error - The error that occurred
595
- * @param messages - Optional message history up to failure
596
- */
597
- error(error, messages) {
598
- if (this.completed || this.disabled) return;
599
- this.completed = true;
600
- const durationMs = Date.now() - this.startTime;
601
- const parsed = messages ? parseMessages(messages) : null;
602
- const errorObj = error instanceof Error ? error : new Error(String(error));
603
- const allLLMCalls = parsed ? [...this.llmCalls, ...parsed.llmCalls] : this.llmCalls;
604
- const { totalInputTokens, totalOutputTokens, models } = this.aggregateCalls(allLLMCalls);
605
- this.idPromise.then((id) => {
606
- if (!id) return;
607
- this.transport.enqueueComplete(id, {
608
- status: "error",
609
- errorMessage: errorObj.message,
610
- errorStack: errorObj.stack,
611
- output: parsed?.output,
612
- systemPrompt: parsed?.systemPrompt,
613
- llmCalls: allLLMCalls.length > 0 ? allLLMCalls : void 0,
614
- toolCalls: parsed?.toolCalls,
615
- models: models.length > 0 ? models : void 0,
616
- totalInputTokens,
617
- totalOutputTokens,
618
- durationMs
722
+ } else {
723
+ captureTrace({
724
+ provider: PROVIDER_NAME2,
725
+ model,
726
+ input: { system: request.system, messages: request.messages },
727
+ output: chunks.join(""),
728
+ inputTokens,
729
+ outputTokens,
730
+ durationMs,
731
+ status: "success",
732
+ streaming: true
619
733
  });
620
- });
621
- }
622
- /**
623
- * Get the trace ID (may be null if not yet created or failed)
624
- */
625
- getId() {
626
- return this.id;
627
- }
628
- /**
629
- * Wait for trace ID to be available
630
- */
631
- async waitForId() {
632
- return this.idPromise;
734
+ }
633
735
  }
634
- // ─────────────────────────────────────────────────────────────
635
- // Private methods
636
- // ─────────────────────────────────────────────────────────────
637
- aggregateCalls(calls) {
638
- let totalInputTokens = 0;
639
- let totalOutputTokens = 0;
640
- const modelSet = /* @__PURE__ */ new Set();
641
- for (const call of calls) {
642
- if (call.inputTokens) totalInputTokens += call.inputTokens;
643
- if (call.outputTokens) totalOutputTokens += call.outputTokens;
644
- if (call.model) modelSet.add(call.model);
736
+ }
737
+ function extractMessageResponse(response) {
738
+ const model = safeExtract(() => response.model ?? null, null);
739
+ const output = safeExtract(() => {
740
+ if (!response.content || !Array.isArray(response.content)) return null;
741
+ const textBlocks = response.content.filter((block) => block.type === "text" && block.text).map((block) => block.text);
742
+ return textBlocks.join("") || null;
743
+ }, null);
744
+ const tokens = extractTokens2(response);
745
+ return { model, output, tokens };
746
+ }
747
+ function extractTokens2(response) {
748
+ try {
749
+ const usage = response.usage;
750
+ if (!usage) return null;
751
+ const inputTokens = usage.input_tokens;
752
+ const outputTokens = usage.output_tokens;
753
+ if (!isValidNumber(inputTokens) && !isValidNumber(outputTokens)) {
754
+ return null;
645
755
  }
646
756
  return {
647
- totalInputTokens,
648
- totalOutputTokens,
649
- models: Array.from(modelSet)
757
+ inputTokens: isValidNumber(inputTokens) ? inputTokens : 0,
758
+ outputTokens: isValidNumber(outputTokens) ? outputTokens : 0,
759
+ totalTokens: (isValidNumber(inputTokens) ? inputTokens : 0) + (isValidNumber(outputTokens) ? outputTokens : 0)
650
760
  };
761
+ } catch {
762
+ return null;
651
763
  }
652
- };
653
- function getTransport() {
654
- if (!globalTransport) {
655
- globalTransport = createTransport(globalConfig);
656
- }
657
- return globalTransport;
658
764
  }
659
- function createTransport(config) {
660
- const apiKey = config.apiKey ?? getEnvVar("LELEMON_API_KEY");
661
- if (!apiKey && !config.disabled) {
662
- console.warn(
663
- "[Lelemon] No API key provided. Set apiKey in config or LELEMON_API_KEY env var. Tracing disabled."
664
- );
765
+
766
+ // src/observe.ts
767
+ function observe(client, options) {
768
+ if (options) {
769
+ setGlobalContext(options);
770
+ }
771
+ const config = getConfig();
772
+ if (config.disabled) {
773
+ return client;
774
+ }
775
+ if (canHandle(client)) {
776
+ if (config.debug) {
777
+ console.log("[Lelemon] Wrapping OpenAI client");
778
+ }
779
+ return wrapOpenAI(client);
665
780
  }
666
- return new Transport({
667
- apiKey: apiKey ?? "",
668
- endpoint: config.endpoint ?? DEFAULT_ENDPOINT,
669
- debug: config.debug ?? false,
670
- disabled: config.disabled ?? !apiKey,
671
- batchSize: config.batchSize,
672
- flushIntervalMs: config.flushIntervalMs,
673
- requestTimeoutMs: config.requestTimeoutMs
781
+ if (canHandle2(client)) {
782
+ if (config.debug) {
783
+ console.log("[Lelemon] Wrapping Anthropic client");
784
+ }
785
+ return wrapAnthropic(client);
786
+ }
787
+ console.warn(
788
+ "[Lelemon] Unknown client type. Tracing not enabled. Supported: OpenAI, Anthropic"
789
+ );
790
+ return client;
791
+ }
792
+ function wrapOpenAI(client) {
793
+ const typed = client;
794
+ return new Proxy(typed, {
795
+ get(target, prop, receiver) {
796
+ const value = Reflect.get(target, prop, receiver);
797
+ if (prop === "chat" && value && typeof value === "object") {
798
+ return wrapOpenAIChat(value);
799
+ }
800
+ if (prop === "completions" && value && typeof value === "object") {
801
+ return wrapOpenAICompletions(value);
802
+ }
803
+ if (prop === "embeddings" && value && typeof value === "object") {
804
+ return wrapOpenAIEmbeddings(value);
805
+ }
806
+ return value;
807
+ }
674
808
  });
675
809
  }
676
- function getEnvVar(name) {
677
- if (typeof process !== "undefined" && process.env) {
678
- return process.env[name];
679
- }
680
- return void 0;
810
+ function wrapOpenAIChat(chat) {
811
+ return new Proxy(chat, {
812
+ get(target, prop, receiver) {
813
+ const value = Reflect.get(target, prop, receiver);
814
+ if (prop === "completions" && value && typeof value === "object") {
815
+ return wrapOpenAIChatCompletions(value);
816
+ }
817
+ return value;
818
+ }
819
+ });
820
+ }
821
+ function wrapOpenAIChatCompletions(completions) {
822
+ return new Proxy(completions, {
823
+ get(target, prop, receiver) {
824
+ const value = Reflect.get(target, prop, receiver);
825
+ if (prop === "create" && typeof value === "function") {
826
+ return wrapChatCreate(value.bind(target));
827
+ }
828
+ return value;
829
+ }
830
+ });
831
+ }
832
+ function wrapOpenAICompletions(completions) {
833
+ return new Proxy(completions, {
834
+ get(target, prop, receiver) {
835
+ const value = Reflect.get(target, prop, receiver);
836
+ if (prop === "create" && typeof value === "function") {
837
+ return wrapCompletionCreate(value.bind(target));
838
+ }
839
+ return value;
840
+ }
841
+ });
842
+ }
843
+ function wrapOpenAIEmbeddings(embeddings) {
844
+ return new Proxy(embeddings, {
845
+ get(target, prop, receiver) {
846
+ const value = Reflect.get(target, prop, receiver);
847
+ if (prop === "create" && typeof value === "function") {
848
+ return wrapEmbeddingsCreate(value.bind(target));
849
+ }
850
+ return value;
851
+ }
852
+ });
853
+ }
854
+ function wrapAnthropic(client) {
855
+ const typed = client;
856
+ return new Proxy(typed, {
857
+ get(target, prop, receiver) {
858
+ const value = Reflect.get(target, prop, receiver);
859
+ if (prop === "messages" && value && typeof value === "object") {
860
+ return wrapAnthropicMessages(value);
861
+ }
862
+ return value;
863
+ }
864
+ });
865
+ }
866
+ function wrapAnthropicMessages(messages) {
867
+ return new Proxy(messages, {
868
+ get(target, prop, receiver) {
869
+ const value = Reflect.get(target, prop, receiver);
870
+ if (prop === "create" && typeof value === "function") {
871
+ return wrapMessagesCreate(value.bind(target));
872
+ }
873
+ if (prop === "stream" && typeof value === "function") {
874
+ return wrapMessagesStream(value.bind(target));
875
+ }
876
+ return value;
877
+ }
878
+ });
879
+ }
880
+ function createObserve(defaultOptions) {
881
+ return function scopedObserve(client, options) {
882
+ return observe(client, { ...defaultOptions, ...options });
883
+ };
681
884
  }
682
885
 
683
- export { Trace, flush, init, isEnabled, parseBedrockResponse, parseMessages, parseResponse, trace };
886
+ export { createObserve, flush, init, isEnabled, observe };
684
887
  //# sourceMappingURL=index.mjs.map
685
888
  //# sourceMappingURL=index.mjs.map