@lelemondev/sdk 0.2.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/README.md +164 -98
  2. package/dist/express-Cmb_A4sI.d.mts +47 -0
  3. package/dist/express-Cmb_A4sI.d.ts +47 -0
  4. package/dist/express.d.mts +1 -0
  5. package/dist/express.d.ts +1 -0
  6. package/dist/express.js +21 -0
  7. package/dist/express.js.map +1 -0
  8. package/dist/express.mjs +19 -0
  9. package/dist/express.mjs.map +1 -0
  10. package/dist/hono-ChTmQk_V.d.mts +61 -0
  11. package/dist/hono-ChTmQk_V.d.ts +61 -0
  12. package/dist/hono.d.mts +1 -0
  13. package/dist/hono.d.ts +1 -0
  14. package/dist/hono.js +23 -0
  15. package/dist/hono.js.map +1 -0
  16. package/dist/hono.mjs +21 -0
  17. package/dist/hono.mjs.map +1 -0
  18. package/dist/index.d.mts +50 -278
  19. package/dist/index.d.ts +50 -278
  20. package/dist/index.js +725 -525
  21. package/dist/index.js.map +1 -1
  22. package/dist/index.mjs +724 -521
  23. package/dist/index.mjs.map +1 -1
  24. package/dist/integrations.d.mts +4 -0
  25. package/dist/integrations.d.ts +4 -0
  26. package/dist/integrations.js +93 -0
  27. package/dist/integrations.js.map +1 -0
  28. package/dist/integrations.mjs +88 -0
  29. package/dist/integrations.mjs.map +1 -0
  30. package/dist/lambda-DQmEfWXC.d.mts +75 -0
  31. package/dist/lambda-DQmEfWXC.d.ts +75 -0
  32. package/dist/lambda.d.mts +1 -0
  33. package/dist/lambda.d.ts +1 -0
  34. package/dist/lambda.js +21 -0
  35. package/dist/lambda.js.map +1 -0
  36. package/dist/lambda.mjs +19 -0
  37. package/dist/lambda.mjs.map +1 -0
  38. package/dist/next-0nso_zEN.d.mts +94 -0
  39. package/dist/next-0nso_zEN.d.ts +94 -0
  40. package/dist/next.d.mts +1 -0
  41. package/dist/next.d.ts +1 -0
  42. package/dist/next.js +33 -0
  43. package/dist/next.js.map +1 -0
  44. package/dist/next.mjs +30 -0
  45. package/dist/next.mjs.map +1 -0
  46. package/package.json +39 -14
package/README.md CHANGED
@@ -4,15 +4,15 @@
4
4
  [![CI](https://github.com/lelemondev/lelemondev-sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/lelemondev/lelemondev-sdk/actions/workflows/ci.yml)
5
5
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
- Fire-and-forget LLM observability for Node.js. Track your AI agents with 3 lines of code.
7
+ Automatic LLM observability for Node.js. Wrap your client, everything is traced.
8
8
 
9
9
  ## Features
10
10
 
11
- - 🔥 **Fire-and-forget** - Never blocks your code
12
- - 📦 **Auto-batching** - Efficient network usage
13
- - **Zero config** - Works out of the box
14
- - 🛡️ **Error-safe** - Never crashes your app
15
- - 🌐 **Serverless-ready** - Built-in flush for Lambda/Vercel
11
+ - **Automatic Tracing** - Wrap your client, all calls are traced
12
+ - **Zero Config** - Works out of the box
13
+ - **Framework Integrations** - Next.js, Express, Lambda, Hono
14
+ - **Streaming Support** - Full support for streaming responses
15
+ - **Type-safe** - Preserves your client's TypeScript types
16
16
 
17
17
  ## Installation
18
18
 
@@ -23,151 +23,217 @@ npm install @lelemondev/sdk
23
23
  ## Quick Start
24
24
 
25
25
  ```typescript
26
- import { init, trace, flush } from '@lelemondev/sdk';
26
+ import { init, observe } from '@lelemondev/sdk';
27
+ import OpenAI from 'openai';
27
28
 
28
- // Initialize once at app startup
29
+ // 1. Initialize once
29
30
  init({ apiKey: process.env.LELEMON_API_KEY });
30
31
 
31
- // Trace your agent (fire-and-forget, no awaits needed!)
32
- const t = trace({ input: userMessage });
32
+ // 2. Wrap your client
33
+ const openai = observe(new OpenAI());
33
34
 
34
- try {
35
- const result = await myAgent(userMessage);
36
- t.success(result.messages); // Sync, doesn't block
37
- } catch (error) {
38
- t.error(error); // Sync, doesn't block
39
- throw error;
40
- }
41
-
42
- // For serverless: flush before response
43
- await flush();
35
+ // 3. Use normally - all calls traced automatically
36
+ const response = await openai.chat.completions.create({
37
+ model: 'gpt-4',
38
+ messages: [{ role: 'user', content: 'Hello!' }],
39
+ });
44
40
  ```
45
41
 
46
- ## API Reference
42
+ ## Framework Integrations
47
43
 
48
- ### `init(config)`
44
+ ### Next.js App Router
49
45
 
50
- Initialize the SDK. Call once at app startup.
46
+ ```typescript
47
+ // app/api/chat/route.ts
48
+ import { init, observe } from '@lelemondev/sdk';
49
+ import { withObserve } from '@lelemondev/sdk/next';
50
+ import { after } from 'next/server';
51
+ import OpenAI from 'openai';
52
+
53
+ init({ apiKey: process.env.LELEMON_API_KEY });
54
+ const openai = observe(new OpenAI());
55
+
56
+ export const POST = withObserve(
57
+ async (req) => {
58
+ const { message } = await req.json();
59
+ const result = await openai.chat.completions.create({
60
+ model: 'gpt-4',
61
+ messages: [{ role: 'user', content: message }],
62
+ });
63
+ return Response.json(result.choices[0].message);
64
+ },
65
+ { after } // Non-blocking flush (Next.js 15+)
66
+ );
67
+ ```
68
+
69
+ ### Express
51
70
 
52
71
  ```typescript
53
- init({
54
- apiKey: 'le_xxx', // Required (or set LELEMON_API_KEY env var)
55
- endpoint: 'https://...', // Optional, custom endpoint
56
- debug: false, // Optional, enable debug logs
57
- batchSize: 10, // Optional, items per batch
58
- flushIntervalMs: 1000, // Optional, auto-flush interval
72
+ import express from 'express';
73
+ import { init, observe } from '@lelemondev/sdk';
74
+ import { createMiddleware } from '@lelemondev/sdk/express';
75
+ import OpenAI from 'openai';
76
+
77
+ init({ apiKey: process.env.LELEMON_API_KEY });
78
+ const openai = observe(new OpenAI());
79
+
80
+ const app = express();
81
+ app.use(createMiddleware()); // Auto-flush on response finish
82
+
83
+ app.post('/chat', async (req, res) => {
84
+ const result = await openai.chat.completions.create({
85
+ model: 'gpt-4',
86
+ messages: [{ role: 'user', content: req.body.message }],
87
+ });
88
+ res.json(result.choices[0].message);
59
89
  });
60
90
  ```
61
91
 
62
- ### `trace(options)`
63
-
64
- Start a new trace. Returns a `Trace` object.
92
+ ### AWS Lambda
65
93
 
66
94
  ```typescript
67
- const t = trace({
68
- input: userMessage, // Required, the input to your agent
69
- sessionId: 'session-123', // Optional, group related traces
70
- userId: 'user-456', // Optional, identify the user
71
- name: 'chat-agent', // Optional, name for this trace
72
- metadata: { ... }, // Optional, custom metadata
73
- tags: ['prod', 'v2'], // Optional, tags for filtering
95
+ import { init, observe } from '@lelemondev/sdk';
96
+ import { withObserve } from '@lelemondev/sdk/lambda';
97
+ import OpenAI from 'openai';
98
+
99
+ init({ apiKey: process.env.LELEMON_API_KEY });
100
+ const openai = observe(new OpenAI());
101
+
102
+ export const handler = withObserve(async (event) => {
103
+ const body = JSON.parse(event.body);
104
+ const result = await openai.chat.completions.create({
105
+ model: 'gpt-4',
106
+ messages: [{ role: 'user', content: body.message }],
107
+ });
108
+ return {
109
+ statusCode: 200,
110
+ body: JSON.stringify(result.choices[0].message),
111
+ };
74
112
  });
75
113
  ```
76
114
 
77
- ### `Trace.success(messages)`
78
-
79
- Complete the trace successfully. Fire-and-forget (no await needed).
115
+ ### Hono (Cloudflare Workers, Deno, Bun)
80
116
 
81
117
  ```typescript
82
- t.success(result.messages);
118
+ import { Hono } from 'hono';
119
+ import { init, observe } from '@lelemondev/sdk';
120
+ import { createMiddleware } from '@lelemondev/sdk/hono';
121
+ import OpenAI from 'openai';
122
+
123
+ init({ apiKey: process.env.LELEMON_API_KEY });
124
+ const openai = observe(new OpenAI());
125
+
126
+ const app = new Hono();
127
+ app.use(createMiddleware()); // Uses waitUntil on Workers
128
+
129
+ app.post('/chat', async (c) => {
130
+ const { message } = await c.req.json();
131
+ const result = await openai.chat.completions.create({
132
+ model: 'gpt-4',
133
+ messages: [{ role: 'user', content: message }],
134
+ });
135
+ return c.json(result.choices[0].message);
136
+ });
137
+
138
+ export default app;
83
139
  ```
84
140
 
85
- ### `Trace.error(error, messages?)`
141
+ ## Supported Providers
142
+
143
+ | Provider | Status | Methods |
144
+ |----------|--------|---------|
145
+ | OpenAI | Supported | `chat.completions.create()`, `completions.create()`, `embeddings.create()` |
146
+ | Anthropic | Supported | `messages.create()`, `messages.stream()` |
147
+
148
+ ## API Reference
149
+
150
+ ### `init(config)`
86
151
 
87
- Complete the trace with an error. Fire-and-forget (no await needed).
152
+ Initialize the SDK. Call once at app startup.
88
153
 
89
154
  ```typescript
90
- t.error(error);
91
- t.error(error, partialMessages); // Include messages up to failure
155
+ init({
156
+ apiKey: 'le_xxx', // Required (or LELEMON_API_KEY env var)
157
+ endpoint: 'https://...', // Optional, custom endpoint
158
+ debug: false, // Optional, enable debug logs
159
+ disabled: false, // Optional, disable tracing
160
+ batchSize: 10, // Optional, items per batch
161
+ flushIntervalMs: 1000, // Optional, auto-flush interval
162
+ });
92
163
  ```
93
164
 
94
- ### `Trace.log(response)`
165
+ ### `observe(client, options?)`
95
166
 
96
- Log an LLM response for token tracking (optional).
167
+ Wrap an LLM client with automatic tracing.
97
168
 
98
169
  ```typescript
99
- const response = await openai.chat.completions.create(...);
100
- t.log(response); // Extracts model, tokens automatically
170
+ const openai = observe(new OpenAI(), {
171
+ sessionId: 'session-123',
172
+ userId: 'user-456',
173
+ metadata: { source: 'api' },
174
+ tags: ['production'],
175
+ });
101
176
  ```
102
177
 
103
178
  ### `flush()`
104
179
 
105
- Wait for all pending traces to be sent. Use in serverless environments.
180
+ Manually flush pending traces. Use in serverless without framework integration.
106
181
 
107
182
  ```typescript
108
183
  await flush();
109
184
  ```
110
185
 
111
- ## Serverless Usage
186
+ ## Streaming
112
187
 
113
- ### Vercel (Next.js)
188
+ Both OpenAI and Anthropic streaming are fully supported:
114
189
 
115
190
  ```typescript
116
- import { waitUntil } from '@vercel/functions';
117
- import { trace, flush } from '@lelemondev/sdk';
118
-
119
- export async function POST(req: Request) {
120
- const t = trace({ input: message });
121
-
122
- try {
123
- const result = await myAgent(message);
124
- t.success(result);
125
- return Response.json(result);
126
- } catch (error) {
127
- t.error(error);
128
- throw error;
129
- } finally {
130
- waitUntil(flush()); // Flush after response
131
- }
191
+ const stream = await openai.chat.completions.create({
192
+ model: 'gpt-4',
193
+ messages: [{ role: 'user', content: 'Hello!' }],
194
+ stream: true,
195
+ });
196
+
197
+ for await (const chunk of stream) {
198
+ process.stdout.write(chunk.choices[0]?.delta?.content || '');
132
199
  }
200
+ // Trace captured automatically when stream completes
133
201
  ```
134
202
 
135
- ### AWS Lambda
203
+ ## What Gets Traced
136
204
 
137
- ```typescript
138
- import { trace, flush } from '@lelemondev/sdk';
139
-
140
- export const handler = async (event) => {
141
- const t = trace({ input: event.body });
142
-
143
- try {
144
- const result = await myAgent(event.body);
145
- t.success(result);
146
- return { statusCode: 200, body: JSON.stringify(result) };
147
- } catch (error) {
148
- t.error(error);
149
- throw error;
150
- } finally {
151
- await flush(); // Always flush before Lambda ends
152
- }
153
- };
154
- ```
205
+ Each LLM call automatically captures:
155
206
 
156
- ## Supported Providers
207
+ - **Provider** - openai, anthropic
208
+ - **Model** - gpt-4, claude-3-opus, etc.
209
+ - **Input** - Messages/prompt (sanitized)
210
+ - **Output** - Response content
211
+ - **Tokens** - Input and output counts
212
+ - **Duration** - Request latency in ms
213
+ - **Status** - success or error
214
+ - **Streaming** - Whether streaming was used
215
+
216
+ ## Security
157
217
 
158
- | Provider | Auto-detected |
159
- |----------|---------------|
160
- | OpenAI | |
161
- | Anthropic | |
162
- | Google Gemini | ✅ |
163
- | AWS Bedrock | ✅ |
218
+ The SDK automatically sanitizes sensitive data:
219
+
220
+ - API keys and tokens are redacted
221
+ - Large payloads are truncated
222
+ - Errors are captured safely
164
223
 
165
224
  ## Environment Variables
166
225
 
167
226
  | Variable | Description |
168
227
  |----------|-------------|
169
- | `LELEMON_API_KEY` | Your Lelemon API key (starts with `le_`) |
228
+ | `LELEMON_API_KEY` | Your API key (starts with `le_`) |
170
229
 
171
230
  ## License
172
231
 
173
- MIT © [Lelemon](https://lelemon.dev)
232
+ MIT
233
+
234
+ ## Sources
235
+
236
+ Framework integration patterns based on:
237
+ - [Next.js after() docs](https://nextjs.org/docs/app/api-reference/functions/after)
238
+ - [Vercel waitUntil](https://www.inngest.com/blog/vercel-cloudflare-wait-until)
239
+ - [Langfuse SDK patterns](https://langfuse.com/docs/observability/sdk/typescript/advanced-usage)
@@ -0,0 +1,47 @@
1
+ /**
2
+ * Express Integration
3
+ *
4
+ * Middleware that automatically flushes traces when response finishes.
5
+ *
6
+ * @example
7
+ * import express from 'express';
8
+ * import { createMiddleware } from '@lelemondev/sdk/express';
9
+ *
10
+ * const app = express();
11
+ * app.use(createMiddleware());
12
+ */
13
+ interface ExpressRequest {
14
+ [key: string]: unknown;
15
+ }
16
+ interface ExpressResponse {
17
+ on(event: 'finish' | 'close' | 'error', listener: () => void): this;
18
+ [key: string]: unknown;
19
+ }
20
+ type NextFunction = (error?: unknown) => void;
21
+ type ExpressMiddleware = (req: ExpressRequest, res: ExpressResponse, next: NextFunction) => void;
22
+ /**
23
+ * Create Express middleware for automatic trace flushing
24
+ *
25
+ * Flushes traces when the response finishes (after res.send/res.json).
26
+ * This is fire-and-forget and doesn't block the response.
27
+ *
28
+ * @returns Express middleware function
29
+ *
30
+ * @example
31
+ * // Global middleware
32
+ * app.use(createMiddleware());
33
+ *
34
+ * @example
35
+ * // Per-route middleware
36
+ * app.post('/chat', createMiddleware(), async (req, res) => {
37
+ * res.json({ ok: true });
38
+ * });
39
+ */
40
+ declare function createMiddleware(): ExpressMiddleware;
41
+
42
+ declare const express_createMiddleware: typeof createMiddleware;
43
+ declare namespace express {
44
+ export { express_createMiddleware as createMiddleware };
45
+ }
46
+
47
+ export { createMiddleware as c, express as e };
@@ -0,0 +1,47 @@
1
+ /**
2
+ * Express Integration
3
+ *
4
+ * Middleware that automatically flushes traces when response finishes.
5
+ *
6
+ * @example
7
+ * import express from 'express';
8
+ * import { createMiddleware } from '@lelemondev/sdk/express';
9
+ *
10
+ * const app = express();
11
+ * app.use(createMiddleware());
12
+ */
13
+ interface ExpressRequest {
14
+ [key: string]: unknown;
15
+ }
16
+ interface ExpressResponse {
17
+ on(event: 'finish' | 'close' | 'error', listener: () => void): this;
18
+ [key: string]: unknown;
19
+ }
20
+ type NextFunction = (error?: unknown) => void;
21
+ type ExpressMiddleware = (req: ExpressRequest, res: ExpressResponse, next: NextFunction) => void;
22
+ /**
23
+ * Create Express middleware for automatic trace flushing
24
+ *
25
+ * Flushes traces when the response finishes (after res.send/res.json).
26
+ * This is fire-and-forget and doesn't block the response.
27
+ *
28
+ * @returns Express middleware function
29
+ *
30
+ * @example
31
+ * // Global middleware
32
+ * app.use(createMiddleware());
33
+ *
34
+ * @example
35
+ * // Per-route middleware
36
+ * app.post('/chat', createMiddleware(), async (req, res) => {
37
+ * res.json({ ok: true });
38
+ * });
39
+ */
40
+ declare function createMiddleware(): ExpressMiddleware;
41
+
42
+ declare const express_createMiddleware: typeof createMiddleware;
43
+ declare namespace express {
44
+ export { express_createMiddleware as createMiddleware };
45
+ }
46
+
47
+ export { createMiddleware as c, express as e };
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './express-Cmb_A4sI.mjs';
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './express-Cmb_A4sI.js';
@@ -0,0 +1,21 @@
1
+ 'use strict';
2
+
3
+ /* @lelemondev/sdk - LLM Observability */
4
+
5
+ async function flush() {
6
+ }
7
+
8
+ // src/integrations/express.ts
9
+ function createMiddleware() {
10
+ return (_req, res, next) => {
11
+ res.on("finish", () => {
12
+ flush().catch(() => {
13
+ });
14
+ });
15
+ next();
16
+ };
17
+ }
18
+
19
+ exports.createMiddleware = createMiddleware;
20
+ //# sourceMappingURL=express.js.map
21
+ //# sourceMappingURL=express.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/core/config.ts","../src/integrations/express.ts"],"names":[],"mappings":";;;;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACjBO,SAAS,gBAAA,GAAsC;AACpD,EAAA,OAAO,CAAC,IAAA,EAAM,GAAA,EAAK,IAAA,KAAS;AAE1B,IAAA,GAAA,CAAI,EAAA,CAAG,UAAU,MAAM;AACrB,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAEpB,CAAC,CAAA;AAAA,IACH,CAAC,CAAA;AAED,IAAA,IAAA,EAAK;AAAA,EACP,CAAA;AACF","file":"express.js","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Express Integration\n *\n * Middleware that automatically flushes traces when response finishes.\n *\n * @example\n * import express from 'express';\n * import { createMiddleware } from '@lelemondev/sdk/express';\n *\n * const app = express();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring express as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExpressRequest {\n [key: string]: unknown;\n}\n\ninterface ExpressResponse {\n on(event: 'finish' | 'close' | 'error', listener: () => void): this;\n [key: string]: unknown;\n}\n\ntype NextFunction = (error?: unknown) => void;\n\ntype ExpressMiddleware = (\n req: ExpressRequest,\n res: ExpressResponse,\n next: NextFunction\n) => void;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Express middleware for automatic trace flushing\n *\n * Flushes traces when the response finishes (after res.send/res.json).\n * This is fire-and-forget and doesn't block the response.\n *\n * @returns Express middleware function\n *\n * @example\n * // Global middleware\n * app.use(createMiddleware());\n *\n * @example\n * // Per-route middleware\n * app.post('/chat', createMiddleware(), async (req, res) => {\n * res.json({ ok: true });\n * });\n */\nexport function createMiddleware(): ExpressMiddleware {\n return (_req, res, next) => {\n // Flush when response is finished (after headers + body sent)\n res.on('finish', () => {\n flush().catch(() => {\n // Silently ignore flush errors - fire and forget\n });\n });\n\n next();\n };\n}\n"]}
@@ -0,0 +1,19 @@
1
+ /* @lelemondev/sdk - LLM Observability */
2
+
3
+ async function flush() {
4
+ }
5
+
6
+ // src/integrations/express.ts
7
+ function createMiddleware() {
8
+ return (_req, res, next) => {
9
+ res.on("finish", () => {
10
+ flush().catch(() => {
11
+ });
12
+ });
13
+ next();
14
+ };
15
+ }
16
+
17
+ export { createMiddleware };
18
+ //# sourceMappingURL=express.mjs.map
19
+ //# sourceMappingURL=express.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/core/config.ts","../src/integrations/express.ts"],"names":[],"mappings":";;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACjBO,SAAS,gBAAA,GAAsC;AACpD,EAAA,OAAO,CAAC,IAAA,EAAM,GAAA,EAAK,IAAA,KAAS;AAE1B,IAAA,GAAA,CAAI,EAAA,CAAG,UAAU,MAAM;AACrB,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAEpB,CAAC,CAAA;AAAA,IACH,CAAC,CAAA;AAED,IAAA,IAAA,EAAK;AAAA,EACP,CAAA;AACF","file":"express.mjs","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Express Integration\n *\n * Middleware that automatically flushes traces when response finishes.\n *\n * @example\n * import express from 'express';\n * import { createMiddleware } from '@lelemondev/sdk/express';\n *\n * const app = express();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring express as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExpressRequest {\n [key: string]: unknown;\n}\n\ninterface ExpressResponse {\n on(event: 'finish' | 'close' | 'error', listener: () => void): this;\n [key: string]: unknown;\n}\n\ntype NextFunction = (error?: unknown) => void;\n\ntype ExpressMiddleware = (\n req: ExpressRequest,\n res: ExpressResponse,\n next: NextFunction\n) => void;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Express middleware for automatic trace flushing\n *\n * Flushes traces when the response finishes (after res.send/res.json).\n * This is fire-and-forget and doesn't block the response.\n *\n * @returns Express middleware function\n *\n * @example\n * // Global middleware\n * app.use(createMiddleware());\n *\n * @example\n * // Per-route middleware\n * app.post('/chat', createMiddleware(), async (req, res) => {\n * res.json({ ok: true });\n * });\n */\nexport function createMiddleware(): ExpressMiddleware {\n return (_req, res, next) => {\n // Flush when response is finished (after headers + body sent)\n res.on('finish', () => {\n flush().catch(() => {\n // Silently ignore flush errors - fire and forget\n });\n });\n\n next();\n };\n}\n"]}
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Hono Integration
3
+ *
4
+ * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).
5
+ * Uses executionCtx.waitUntil() when available for non-blocking flush.
6
+ *
7
+ * @example
8
+ * import { Hono } from 'hono';
9
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
10
+ *
11
+ * const app = new Hono();
12
+ * app.use(createMiddleware());
13
+ */
14
+ interface ExecutionContext {
15
+ waitUntil(promise: Promise<unknown>): void;
16
+ passThroughOnException(): void;
17
+ }
18
+ interface HonoContext {
19
+ req: {
20
+ raw: Request;
21
+ [key: string]: unknown;
22
+ };
23
+ res: Response | undefined;
24
+ executionCtx?: ExecutionContext;
25
+ [key: string]: unknown;
26
+ }
27
+ type NextFunction = () => Promise<void>;
28
+ type HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;
29
+ /**
30
+ * Create Hono middleware for automatic trace flushing
31
+ *
32
+ * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush
33
+ * On Node.js/Bun: flushes after response (fire-and-forget)
34
+ *
35
+ * @returns Hono middleware function
36
+ *
37
+ * @example
38
+ * import { Hono } from 'hono';
39
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
40
+ *
41
+ * const app = new Hono();
42
+ *
43
+ * // Global middleware
44
+ * app.use(createMiddleware());
45
+ *
46
+ * app.post('/chat', async (c) => {
47
+ * const openai = observe(new OpenAI());
48
+ * const result = await openai.chat.completions.create({...});
49
+ * return c.json(result);
50
+ * });
51
+ *
52
+ * export default app;
53
+ */
54
+ declare function createMiddleware(): HonoMiddleware;
55
+
56
+ declare const hono_createMiddleware: typeof createMiddleware;
57
+ declare namespace hono {
58
+ export { hono_createMiddleware as createMiddleware };
59
+ }
60
+
61
+ export { createMiddleware as c, hono as h };
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Hono Integration
3
+ *
4
+ * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).
5
+ * Uses executionCtx.waitUntil() when available for non-blocking flush.
6
+ *
7
+ * @example
8
+ * import { Hono } from 'hono';
9
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
10
+ *
11
+ * const app = new Hono();
12
+ * app.use(createMiddleware());
13
+ */
14
+ interface ExecutionContext {
15
+ waitUntil(promise: Promise<unknown>): void;
16
+ passThroughOnException(): void;
17
+ }
18
+ interface HonoContext {
19
+ req: {
20
+ raw: Request;
21
+ [key: string]: unknown;
22
+ };
23
+ res: Response | undefined;
24
+ executionCtx?: ExecutionContext;
25
+ [key: string]: unknown;
26
+ }
27
+ type NextFunction = () => Promise<void>;
28
+ type HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;
29
+ /**
30
+ * Create Hono middleware for automatic trace flushing
31
+ *
32
+ * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush
33
+ * On Node.js/Bun: flushes after response (fire-and-forget)
34
+ *
35
+ * @returns Hono middleware function
36
+ *
37
+ * @example
38
+ * import { Hono } from 'hono';
39
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
40
+ *
41
+ * const app = new Hono();
42
+ *
43
+ * // Global middleware
44
+ * app.use(createMiddleware());
45
+ *
46
+ * app.post('/chat', async (c) => {
47
+ * const openai = observe(new OpenAI());
48
+ * const result = await openai.chat.completions.create({...});
49
+ * return c.json(result);
50
+ * });
51
+ *
52
+ * export default app;
53
+ */
54
+ declare function createMiddleware(): HonoMiddleware;
55
+
56
+ declare const hono_createMiddleware: typeof createMiddleware;
57
+ declare namespace hono {
58
+ export { hono_createMiddleware as createMiddleware };
59
+ }
60
+
61
+ export { createMiddleware as c, hono as h };
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './hono-ChTmQk_V.mjs';
package/dist/hono.d.ts ADDED
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './hono-ChTmQk_V.js';
package/dist/hono.js ADDED
@@ -0,0 +1,23 @@
1
+ 'use strict';
2
+
3
+ /* @lelemondev/sdk - LLM Observability */
4
+
5
+ async function flush() {
6
+ }
7
+
8
+ // src/integrations/hono.ts
9
+ function createMiddleware() {
10
+ return async (c, next) => {
11
+ await next();
12
+ if (c.executionCtx?.waitUntil) {
13
+ c.executionCtx.waitUntil(flush());
14
+ } else {
15
+ flush().catch(() => {
16
+ });
17
+ }
18
+ };
19
+ }
20
+
21
+ exports.createMiddleware = createMiddleware;
22
+ //# sourceMappingURL=hono.js.map
23
+ //# sourceMappingURL=hono.js.map