@lelemondev/sdk 0.7.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. package/LICENSE +21 -0
  2. package/dist/anthropic.d.mts +25 -0
  3. package/dist/anthropic.d.ts +25 -0
  4. package/dist/anthropic.js +5 -0
  5. package/dist/anthropic.js.map +1 -0
  6. package/dist/anthropic.mjs +5 -0
  7. package/dist/anthropic.mjs.map +1 -0
  8. package/dist/bedrock.d.mts +25 -0
  9. package/dist/bedrock.d.ts +25 -0
  10. package/dist/bedrock.js +4 -0
  11. package/dist/bedrock.js.map +1 -0
  12. package/dist/bedrock.mjs +4 -0
  13. package/dist/bedrock.mjs.map +1 -0
  14. package/dist/express.js +2 -20
  15. package/dist/express.js.map +1 -1
  16. package/dist/express.mjs +1 -17
  17. package/dist/express.mjs.map +1 -1
  18. package/dist/gemini.d.mts +26 -0
  19. package/dist/gemini.d.ts +26 -0
  20. package/dist/gemini.js +4 -0
  21. package/dist/gemini.js.map +1 -0
  22. package/dist/gemini.mjs +4 -0
  23. package/dist/gemini.mjs.map +1 -0
  24. package/dist/hono.js +2 -22
  25. package/dist/hono.js.map +1 -1
  26. package/dist/hono.mjs +1 -19
  27. package/dist/hono.mjs.map +1 -1
  28. package/dist/index.d.mts +118 -26
  29. package/dist/index.d.ts +118 -26
  30. package/dist/index.js +3 -2128
  31. package/dist/index.js.map +1 -1
  32. package/dist/index.mjs +3 -2121
  33. package/dist/index.mjs.map +1 -1
  34. package/dist/integrations.js +2 -92
  35. package/dist/integrations.js.map +1 -1
  36. package/dist/integrations.mjs +1 -86
  37. package/dist/integrations.mjs.map +1 -1
  38. package/dist/lambda.js +2 -20
  39. package/dist/lambda.js.map +1 -1
  40. package/dist/lambda.mjs +1 -17
  41. package/dist/lambda.mjs.map +1 -1
  42. package/dist/next.js +2 -32
  43. package/dist/next.js.map +1 -1
  44. package/dist/next.mjs +1 -28
  45. package/dist/next.mjs.map +1 -1
  46. package/dist/openai.d.mts +25 -0
  47. package/dist/openai.d.ts +25 -0
  48. package/dist/openai.js +4 -0
  49. package/dist/openai.js.map +1 -0
  50. package/dist/openai.mjs +4 -0
  51. package/dist/openai.mjs.map +1 -0
  52. package/dist/openrouter.d.mts +33 -0
  53. package/dist/openrouter.d.ts +33 -0
  54. package/dist/openrouter.js +4 -0
  55. package/dist/openrouter.js.map +1 -0
  56. package/dist/openrouter.mjs +4 -0
  57. package/dist/openrouter.mjs.map +1 -0
  58. package/package.json +41 -1
package/dist/hono.mjs.map CHANGED
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/core/config.ts","../src/integrations/hono.ts"],"names":[],"mappings":";;AA2FA,eAAsB,KAAA,GAAuB;AAI7C;;;ACZO,SAAS,gBAAA,GAAmC;AACjD,EAAA,OAAO,OAAO,GAAG,IAAA,KAAS;AACxB,IAAA,MAAM,IAAA,EAAK;AAGX,IAAA,IAAI,CAAA,CAAE,cAAc,SAAA,EAAW;AAC7B,MAAA,CAAA,CAAE,YAAA,CAAa,SAAA,CAAU,KAAA,EAAO,CAAA;AAAA,IAClC,CAAA,MAAO;AAEL,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAAC,CAAC,CAAA;AAAA,IACxB;AAAA,EACF,CAAA;AACF","file":"hono.mjs","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\nimport { setDebug, info, warn, debug } from './logger';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://www.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n\n // Configure debug mode\n if (config.debug) {\n setDebug(true);\n }\n\n info('Initializing SDK', {\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? false,\n });\n\n globalTransport = createTransport(config);\n initialized = true;\n\n // Log status after transport is created\n if (globalTransport.isEnabled()) {\n info('SDK initialized - tracing enabled');\n } else {\n debug('SDK initialized - tracing disabled (no API key or explicitly disabled)');\n }\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n warn('No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.');\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Hono Integration\n *\n * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).\n * Uses executionCtx.waitUntil() when available for non-blocking flush.\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring hono as dependency)\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Execution context for edge runtimes (Cloudflare Workers, Deno Deploy)\n */\nexport interface ExecutionContext {\n waitUntil(promise: Promise<unknown>): void;\n passThroughOnException(): void;\n}\n\n/**\n * Minimal Hono context type (avoids requiring hono as dependency)\n */\nexport interface HonoContext {\n req: {\n raw: Request;\n [key: string]: unknown;\n };\n res: Response | undefined;\n executionCtx?: ExecutionContext;\n [key: string]: unknown;\n}\n\n/**\n * Hono next function type\n */\nexport type HonoNextFunction = () => Promise<void>;\n\n/**\n * Hono middleware function type\n *\n * @param c - Hono context object\n * @param next - Next function to continue middleware chain\n */\nexport type HonoMiddleware = (c: HonoContext, next: HonoNextFunction) => Promise<void>;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Hono middleware for automatic trace flushing\n *\n * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush\n * On Node.js/Bun: flushes after response (fire-and-forget)\n *\n * @returns Hono middleware function\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n *\n * // Global middleware\n * app.use(createMiddleware());\n *\n * app.post('/chat', async (c) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return c.json(result);\n * });\n *\n * export default app;\n */\nexport function createMiddleware(): HonoMiddleware {\n return async (c, next) => {\n await next();\n\n // Use waitUntil if available (Cloudflare Workers, Deno Deploy)\n if (c.executionCtx?.waitUntil) {\n c.executionCtx.waitUntil(flush());\n } else {\n // Fire-and-forget for Node.js/Bun\n flush().catch(() => {});\n }\n };\n}\n"]}
1
+ {"version":3,"sources":["../src/core/config.ts","../src/integrations/hono.ts"],"names":["flush","createMiddleware","c","next"],"mappings":";AA2FA,eAAsBA,CAAAA,EAAuB,CAI7C,CCZO,SAASC,CAAAA,EAAmC,CACjD,OAAO,MAAOC,EAAGC,CAAAA,GAAS,CACxB,MAAMA,CAAAA,GAGFD,CAAAA,CAAE,YAAA,EAAc,SAAA,CAClBA,CAAAA,CAAE,aAAa,SAAA,CAAUF,CAAAA,EAAO,CAAA,CAGhCA,GAAM,CAAE,KAAA,CAAM,IAAM,CAAC,CAAC,EAE1B,CACF","file":"hono.mjs","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\nimport { setDebug, info, warn, debug } from './logger';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://www.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n\n // Configure debug mode\n if (config.debug) {\n setDebug(true);\n }\n\n info('Initializing SDK', {\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? false,\n });\n\n globalTransport = createTransport(config);\n initialized = true;\n\n // Log status after transport is created\n if (globalTransport.isEnabled()) {\n info('SDK initialized - tracing enabled');\n } else {\n debug('SDK initialized - tracing disabled (no API key or explicitly disabled)');\n }\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n warn('No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.');\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Hono Integration\n *\n * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).\n * Uses executionCtx.waitUntil() when available for non-blocking flush.\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring hono as dependency)\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Execution context for edge runtimes (Cloudflare Workers, Deno Deploy)\n */\nexport interface ExecutionContext {\n waitUntil(promise: Promise<unknown>): void;\n passThroughOnException(): void;\n}\n\n/**\n * Minimal Hono context type (avoids requiring hono as dependency)\n */\nexport interface HonoContext {\n req: {\n raw: Request;\n [key: string]: unknown;\n };\n res: Response | undefined;\n executionCtx?: ExecutionContext;\n [key: string]: unknown;\n}\n\n/**\n * Hono next function type\n */\nexport type HonoNextFunction = () => Promise<void>;\n\n/**\n * Hono middleware function type\n *\n * @param c - Hono context object\n * @param next - Next function to continue middleware chain\n */\nexport type HonoMiddleware = (c: HonoContext, next: HonoNextFunction) => Promise<void>;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Hono middleware for automatic trace flushing\n *\n * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush\n * On Node.js/Bun: flushes after response (fire-and-forget)\n *\n * @returns Hono middleware function\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n *\n * // Global middleware\n * app.use(createMiddleware());\n *\n * app.post('/chat', async (c) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return c.json(result);\n * });\n *\n * export default app;\n */\nexport function createMiddleware(): HonoMiddleware {\n return async (c, next) => {\n await next();\n\n // Use waitUntil if available (Cloudflare Workers, Deno Deploy)\n if (c.executionCtx?.waitUntil) {\n c.executionCtx.waitUntil(flush());\n } else {\n // Fire-and-forget for Node.js/Bun\n flush().catch(() => {});\n }\n };\n}\n"]}
package/dist/index.d.mts CHANGED
@@ -17,7 +17,7 @@ interface LelemonConfig {
17
17
  /** Request timeout in ms (default: 10000) */
18
18
  requestTimeoutMs?: number;
19
19
  }
20
- type ProviderName = 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'openrouter' | 'unknown';
20
+ type ProviderName = 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'openrouter' | 'agent' | 'unknown';
21
21
  interface ObserveOptions {
22
22
  /** Session ID to group related calls */
23
23
  sessionId?: string;
@@ -28,7 +28,7 @@ interface ObserveOptions {
28
28
  /** Tags for filtering */
29
29
  tags?: string[];
30
30
  }
31
- type SpanType = 'llm' | 'tool' | 'retrieval' | 'custom';
31
+ type SpanType = 'llm' | 'agent' | 'tool' | 'retrieval' | 'embedding' | 'guardrail' | 'rerank' | 'custom';
32
32
  interface CaptureSpanOptions {
33
33
  /** Span type */
34
34
  type: SpanType;
@@ -71,40 +71,132 @@ declare function isEnabled(): boolean;
71
71
  declare function flush(): Promise<void>;
72
72
 
73
73
  /**
74
- * Observe Function
74
+ * Trace Context Module
75
75
  *
76
- * Main entry point for wrapping LLM clients with automatic tracing.
77
- */
78
-
79
- /**
80
- * Wrap an LLM client with automatic tracing
81
- *
82
- * @param client - OpenAI, Anthropic, or Bedrock client instance
83
- * @param options - Optional context (sessionId, userId, etc.)
84
- * @returns The wrapped client with the same type
76
+ * Provides AsyncLocalStorage-based context for grouping spans under a parent trace.
77
+ * Supports hierarchical tracing where:
78
+ * - trace() creates a root "agent" span
79
+ * - LLM calls become children of the root
80
+ * - Tool calls become children of the LLM that triggered them (via toolCallId linking)
85
81
  *
86
82
  * @example
87
- * import { observe } from '@lelemondev/sdk';
88
- * import OpenAI from 'openai';
83
+ * ```typescript
84
+ * import { trace, span } from '@lelemondev/sdk';
85
+ *
86
+ * await trace({ name: 'sales-agent', input: userMessage }, async () => {
87
+ * const response = await client.send(new ConverseCommand({...}));
88
+ * // Tools automatically linked to their parent LLM via toolCallId
89
+ * return response;
90
+ * });
91
+ * ```
92
+ */
93
+ interface TraceContext {
94
+ /** Unique trace ID (shared by all spans in this trace) */
95
+ traceId: string;
96
+ /** Root span ID (the agent/workflow span) */
97
+ rootSpanId: string;
98
+ /** Current span ID (for nesting - LLM calls become children of this) */
99
+ currentSpanId: string;
100
+ /** Parent span ID (for nested trace() calls) */
101
+ parentSpanId?: string;
102
+ /** Trace name */
103
+ name: string;
104
+ /** Start time in ms */
105
+ startTime: number;
106
+ /** Input data */
107
+ input?: unknown;
108
+ /** Trace metadata */
109
+ metadata?: Record<string, unknown>;
110
+ /** Trace tags */
111
+ tags?: string[];
112
+ /** Map of toolCallId → llmSpanId for linking tool spans to their parent LLM */
113
+ pendingToolCalls: Map<string, string>;
114
+ }
115
+ interface TraceOptions {
116
+ /** Name for the trace (e.g., 'sales-agent', 'rag-query') */
117
+ name: string;
118
+ /** Input data for the trace */
119
+ input?: unknown;
120
+ /** Custom metadata */
121
+ metadata?: Record<string, unknown>;
122
+ /** Tags for filtering */
123
+ tags?: string[];
124
+ }
125
+ interface SpanOptions {
126
+ /** Span type */
127
+ type: 'retrieval' | 'embedding' | 'tool' | 'guardrail' | 'rerank' | 'custom';
128
+ /** Span name (e.g., 'pinecone-search', 'cohere-rerank') */
129
+ name: string;
130
+ /** Input data */
131
+ input?: unknown;
132
+ /** Output data */
133
+ output?: unknown;
134
+ /** Duration in milliseconds (optional, will be set automatically if not provided) */
135
+ durationMs?: number;
136
+ /** Status */
137
+ status?: 'success' | 'error';
138
+ /** Error message if status is 'error' */
139
+ errorMessage?: string;
140
+ /** Tool call ID (links this tool span to the LLM that requested it) */
141
+ toolCallId?: string;
142
+ /** Custom metadata */
143
+ metadata?: Record<string, unknown>;
144
+ }
145
+ /**
146
+ * Get the current trace context, if any
147
+ */
148
+ declare function getTraceContext(): TraceContext | undefined;
149
+ /**
150
+ * Execute a function within a trace context.
151
+ * Creates a root "agent" span that contains all LLM calls and tool executions.
152
+ * The result of the function becomes the output of the root span.
89
153
  *
90
- * const openai = observe(new OpenAI());
154
+ * @example Simple usage (just name)
155
+ * ```typescript
156
+ * await trace('sales-agent', async () => {
157
+ * await client.send(new ConverseCommand({...}));
158
+ * return finalResponse;
159
+ * });
160
+ * ```
91
161
  *
92
- * // All calls are now automatically traced
93
- * const response = await openai.chat.completions.create({...});
162
+ * @example With options
163
+ * ```typescript
164
+ * await trace({ name: 'rag-query', input: question, tags: ['production'] }, async () => {
165
+ * const docs = await search(question);
166
+ * span({ type: 'retrieval', name: 'pinecone', output: { count: docs.length } });
167
+ * return client.send(new ConverseCommand({...}));
168
+ * });
169
+ * ```
94
170
  */
95
- declare function observe<T>(client: T, options?: ObserveOptions): T;
171
+ declare function trace<T>(nameOrOptions: string | TraceOptions, fn: () => Promise<T>): Promise<T>;
96
172
  /**
97
- * Create a scoped observe function with preset context
173
+ * Manually capture a span for non-LLM operations (retrieval, embedding, tool, etc.)
174
+ * Must be called within a trace() block.
98
175
  *
99
- * @example
100
- * const observeWithSession = createObserve({
101
- * sessionId: 'session-123',
102
- * userId: 'user-456',
176
+ * @example Tool with toolCallId (links to parent LLM)
177
+ * ```typescript
178
+ * span({
179
+ * type: 'tool',
180
+ * name: 'query_database',
181
+ * toolCallId: 'tooluse_abc123', // Links to LLM that requested this
182
+ * input: { sql: 'SELECT ...' },
183
+ * output: { rows: [...] },
184
+ * durationMs: 15,
103
185
  * });
186
+ * ```
104
187
  *
105
- * const openai = observeWithSession(new OpenAI());
188
+ * @example Retrieval without toolCallId
189
+ * ```typescript
190
+ * span({
191
+ * type: 'retrieval',
192
+ * name: 'pinecone-search',
193
+ * input: { topK: 5 },
194
+ * output: { count: 10 },
195
+ * durationMs: 50,
196
+ * });
197
+ * ```
106
198
  */
107
- declare function createObserve(defaultOptions: ObserveOptions): <T>(client: T, options?: ObserveOptions) => T;
199
+ declare function span(options: SpanOptions): void;
108
200
 
109
201
  /**
110
202
  * Capture Module
@@ -139,4 +231,4 @@ declare function createObserve(defaultOptions: ObserveOptions): <T>(client: T, o
139
231
  */
140
232
  declare function captureSpan(options: CaptureSpanOptions): void;
141
233
 
142
- export { type CaptureSpanOptions, type LelemonConfig, type ObserveOptions, type ProviderName, type SpanType, captureSpan, createObserve, flush, init, isEnabled, observe };
234
+ export { type CaptureSpanOptions, type LelemonConfig, type ObserveOptions, type ProviderName, type SpanOptions, type SpanType, type TraceContext, type TraceOptions, captureSpan, flush, getTraceContext, init, isEnabled, span, trace };
package/dist/index.d.ts CHANGED
@@ -17,7 +17,7 @@ interface LelemonConfig {
17
17
  /** Request timeout in ms (default: 10000) */
18
18
  requestTimeoutMs?: number;
19
19
  }
20
- type ProviderName = 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'openrouter' | 'unknown';
20
+ type ProviderName = 'openai' | 'anthropic' | 'gemini' | 'bedrock' | 'openrouter' | 'agent' | 'unknown';
21
21
  interface ObserveOptions {
22
22
  /** Session ID to group related calls */
23
23
  sessionId?: string;
@@ -28,7 +28,7 @@ interface ObserveOptions {
28
28
  /** Tags for filtering */
29
29
  tags?: string[];
30
30
  }
31
- type SpanType = 'llm' | 'tool' | 'retrieval' | 'custom';
31
+ type SpanType = 'llm' | 'agent' | 'tool' | 'retrieval' | 'embedding' | 'guardrail' | 'rerank' | 'custom';
32
32
  interface CaptureSpanOptions {
33
33
  /** Span type */
34
34
  type: SpanType;
@@ -71,40 +71,132 @@ declare function isEnabled(): boolean;
71
71
  declare function flush(): Promise<void>;
72
72
 
73
73
  /**
74
- * Observe Function
74
+ * Trace Context Module
75
75
  *
76
- * Main entry point for wrapping LLM clients with automatic tracing.
77
- */
78
-
79
- /**
80
- * Wrap an LLM client with automatic tracing
81
- *
82
- * @param client - OpenAI, Anthropic, or Bedrock client instance
83
- * @param options - Optional context (sessionId, userId, etc.)
84
- * @returns The wrapped client with the same type
76
+ * Provides AsyncLocalStorage-based context for grouping spans under a parent trace.
77
+ * Supports hierarchical tracing where:
78
+ * - trace() creates a root "agent" span
79
+ * - LLM calls become children of the root
80
+ * - Tool calls become children of the LLM that triggered them (via toolCallId linking)
85
81
  *
86
82
  * @example
87
- * import { observe } from '@lelemondev/sdk';
88
- * import OpenAI from 'openai';
83
+ * ```typescript
84
+ * import { trace, span } from '@lelemondev/sdk';
85
+ *
86
+ * await trace({ name: 'sales-agent', input: userMessage }, async () => {
87
+ * const response = await client.send(new ConverseCommand({...}));
88
+ * // Tools automatically linked to their parent LLM via toolCallId
89
+ * return response;
90
+ * });
91
+ * ```
92
+ */
93
+ interface TraceContext {
94
+ /** Unique trace ID (shared by all spans in this trace) */
95
+ traceId: string;
96
+ /** Root span ID (the agent/workflow span) */
97
+ rootSpanId: string;
98
+ /** Current span ID (for nesting - LLM calls become children of this) */
99
+ currentSpanId: string;
100
+ /** Parent span ID (for nested trace() calls) */
101
+ parentSpanId?: string;
102
+ /** Trace name */
103
+ name: string;
104
+ /** Start time in ms */
105
+ startTime: number;
106
+ /** Input data */
107
+ input?: unknown;
108
+ /** Trace metadata */
109
+ metadata?: Record<string, unknown>;
110
+ /** Trace tags */
111
+ tags?: string[];
112
+ /** Map of toolCallId → llmSpanId for linking tool spans to their parent LLM */
113
+ pendingToolCalls: Map<string, string>;
114
+ }
115
+ interface TraceOptions {
116
+ /** Name for the trace (e.g., 'sales-agent', 'rag-query') */
117
+ name: string;
118
+ /** Input data for the trace */
119
+ input?: unknown;
120
+ /** Custom metadata */
121
+ metadata?: Record<string, unknown>;
122
+ /** Tags for filtering */
123
+ tags?: string[];
124
+ }
125
+ interface SpanOptions {
126
+ /** Span type */
127
+ type: 'retrieval' | 'embedding' | 'tool' | 'guardrail' | 'rerank' | 'custom';
128
+ /** Span name (e.g., 'pinecone-search', 'cohere-rerank') */
129
+ name: string;
130
+ /** Input data */
131
+ input?: unknown;
132
+ /** Output data */
133
+ output?: unknown;
134
+ /** Duration in milliseconds (optional, will be set automatically if not provided) */
135
+ durationMs?: number;
136
+ /** Status */
137
+ status?: 'success' | 'error';
138
+ /** Error message if status is 'error' */
139
+ errorMessage?: string;
140
+ /** Tool call ID (links this tool span to the LLM that requested it) */
141
+ toolCallId?: string;
142
+ /** Custom metadata */
143
+ metadata?: Record<string, unknown>;
144
+ }
145
+ /**
146
+ * Get the current trace context, if any
147
+ */
148
+ declare function getTraceContext(): TraceContext | undefined;
149
+ /**
150
+ * Execute a function within a trace context.
151
+ * Creates a root "agent" span that contains all LLM calls and tool executions.
152
+ * The result of the function becomes the output of the root span.
89
153
  *
90
- * const openai = observe(new OpenAI());
154
+ * @example Simple usage (just name)
155
+ * ```typescript
156
+ * await trace('sales-agent', async () => {
157
+ * await client.send(new ConverseCommand({...}));
158
+ * return finalResponse;
159
+ * });
160
+ * ```
91
161
  *
92
- * // All calls are now automatically traced
93
- * const response = await openai.chat.completions.create({...});
162
+ * @example With options
163
+ * ```typescript
164
+ * await trace({ name: 'rag-query', input: question, tags: ['production'] }, async () => {
165
+ * const docs = await search(question);
166
+ * span({ type: 'retrieval', name: 'pinecone', output: { count: docs.length } });
167
+ * return client.send(new ConverseCommand({...}));
168
+ * });
169
+ * ```
94
170
  */
95
- declare function observe<T>(client: T, options?: ObserveOptions): T;
171
+ declare function trace<T>(nameOrOptions: string | TraceOptions, fn: () => Promise<T>): Promise<T>;
96
172
  /**
97
- * Create a scoped observe function with preset context
173
+ * Manually capture a span for non-LLM operations (retrieval, embedding, tool, etc.)
174
+ * Must be called within a trace() block.
98
175
  *
99
- * @example
100
- * const observeWithSession = createObserve({
101
- * sessionId: 'session-123',
102
- * userId: 'user-456',
176
+ * @example Tool with toolCallId (links to parent LLM)
177
+ * ```typescript
178
+ * span({
179
+ * type: 'tool',
180
+ * name: 'query_database',
181
+ * toolCallId: 'tooluse_abc123', // Links to LLM that requested this
182
+ * input: { sql: 'SELECT ...' },
183
+ * output: { rows: [...] },
184
+ * durationMs: 15,
103
185
  * });
186
+ * ```
104
187
  *
105
- * const openai = observeWithSession(new OpenAI());
188
+ * @example Retrieval without toolCallId
189
+ * ```typescript
190
+ * span({
191
+ * type: 'retrieval',
192
+ * name: 'pinecone-search',
193
+ * input: { topK: 5 },
194
+ * output: { count: 10 },
195
+ * durationMs: 50,
196
+ * });
197
+ * ```
106
198
  */
107
- declare function createObserve(defaultOptions: ObserveOptions): <T>(client: T, options?: ObserveOptions) => T;
199
+ declare function span(options: SpanOptions): void;
108
200
 
109
201
  /**
110
202
  * Capture Module
@@ -139,4 +231,4 @@ declare function createObserve(defaultOptions: ObserveOptions): <T>(client: T, o
139
231
  */
140
232
  declare function captureSpan(options: CaptureSpanOptions): void;
141
233
 
142
- export { type CaptureSpanOptions, type LelemonConfig, type ObserveOptions, type ProviderName, type SpanType, captureSpan, createObserve, flush, init, isEnabled, observe };
234
+ export { type CaptureSpanOptions, type LelemonConfig, type ObserveOptions, type ProviderName, type SpanOptions, type SpanType, type TraceContext, type TraceOptions, captureSpan, flush, getTraceContext, init, isEnabled, span, trace };