@lelemondev/sdk 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,16 +4,15 @@
4
4
  [![CI](https://github.com/lelemondev/lelemondev-sdk/actions/workflows/ci.yml/badge.svg)](https://github.com/lelemondev/lelemondev-sdk/actions/workflows/ci.yml)
5
5
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
6
6
 
7
- Automatic LLM observability for Node.js. Track your AI agents with zero code changes.
7
+ Automatic LLM observability for Node.js. Wrap your client, everything is traced.
8
8
 
9
9
  ## Features
10
10
 
11
- - **Automatic Tracing** - Wrap your client, everything is traced
12
- - **Fire-and-forget** - Never blocks your code
13
- - **Auto-batching** - Efficient network usage
11
+ - **Automatic Tracing** - Wrap your client, all calls are traced
12
+ - **Zero Config** - Works out of the box
13
+ - **Framework Integrations** - Next.js, Express, Lambda, Hono
14
14
  - **Streaming Support** - Full support for streaming responses
15
15
  - **Type-safe** - Preserves your client's TypeScript types
16
- - **Serverless-ready** - Built-in flush for Lambda/Vercel
17
16
 
18
17
  ## Installation
19
18
 
@@ -24,26 +23,120 @@ npm install @lelemondev/sdk
24
23
  ## Quick Start
25
24
 
26
25
  ```typescript
27
- import { init, observe, flush } from '@lelemondev/sdk';
26
+ import { init, observe } from '@lelemondev/sdk';
28
27
  import OpenAI from 'openai';
29
28
 
30
- // 1. Initialize once at app startup
29
+ // 1. Initialize once
31
30
  init({ apiKey: process.env.LELEMON_API_KEY });
32
31
 
33
32
  // 2. Wrap your client
34
33
  const openai = observe(new OpenAI());
35
34
 
36
- // 3. Use normally - all calls are traced automatically
35
+ // 3. Use normally - all calls traced automatically
37
36
  const response = await openai.chat.completions.create({
38
37
  model: 'gpt-4',
39
38
  messages: [{ role: 'user', content: 'Hello!' }],
40
39
  });
40
+ ```
41
41
 
42
- // 4. For serverless: flush before response
43
- await flush();
42
+ ## Framework Integrations
43
+
44
+ ### Next.js App Router
45
+
46
+ ```typescript
47
+ // app/api/chat/route.ts
48
+ import { init, observe } from '@lelemondev/sdk';
49
+ import { withObserve } from '@lelemondev/sdk/next';
50
+ import { after } from 'next/server';
51
+ import OpenAI from 'openai';
52
+
53
+ init({ apiKey: process.env.LELEMON_API_KEY });
54
+ const openai = observe(new OpenAI());
55
+
56
+ export const POST = withObserve(
57
+ async (req) => {
58
+ const { message } = await req.json();
59
+ const result = await openai.chat.completions.create({
60
+ model: 'gpt-4',
61
+ messages: [{ role: 'user', content: message }],
62
+ });
63
+ return Response.json(result.choices[0].message);
64
+ },
65
+ { after } // Non-blocking flush (Next.js 15+)
66
+ );
67
+ ```
68
+
69
+ ### Express
70
+
71
+ ```typescript
72
+ import express from 'express';
73
+ import { init, observe } from '@lelemondev/sdk';
74
+ import { createMiddleware } from '@lelemondev/sdk/express';
75
+ import OpenAI from 'openai';
76
+
77
+ init({ apiKey: process.env.LELEMON_API_KEY });
78
+ const openai = observe(new OpenAI());
79
+
80
+ const app = express();
81
+ app.use(createMiddleware()); // Auto-flush on response finish
82
+
83
+ app.post('/chat', async (req, res) => {
84
+ const result = await openai.chat.completions.create({
85
+ model: 'gpt-4',
86
+ messages: [{ role: 'user', content: req.body.message }],
87
+ });
88
+ res.json(result.choices[0].message);
89
+ });
90
+ ```
91
+
92
+ ### AWS Lambda
93
+
94
+ ```typescript
95
+ import { init, observe } from '@lelemondev/sdk';
96
+ import { withObserve } from '@lelemondev/sdk/lambda';
97
+ import OpenAI from 'openai';
98
+
99
+ init({ apiKey: process.env.LELEMON_API_KEY });
100
+ const openai = observe(new OpenAI());
101
+
102
+ export const handler = withObserve(async (event) => {
103
+ const body = JSON.parse(event.body);
104
+ const result = await openai.chat.completions.create({
105
+ model: 'gpt-4',
106
+ messages: [{ role: 'user', content: body.message }],
107
+ });
108
+ return {
109
+ statusCode: 200,
110
+ body: JSON.stringify(result.choices[0].message),
111
+ };
112
+ });
44
113
  ```
45
114
 
46
- That's it! No manual tracing code needed.
115
+ ### Hono (Cloudflare Workers, Deno, Bun)
116
+
117
+ ```typescript
118
+ import { Hono } from 'hono';
119
+ import { init, observe } from '@lelemondev/sdk';
120
+ import { createMiddleware } from '@lelemondev/sdk/hono';
121
+ import OpenAI from 'openai';
122
+
123
+ init({ apiKey: process.env.LELEMON_API_KEY });
124
+ const openai = observe(new OpenAI());
125
+
126
+ const app = new Hono();
127
+ app.use(createMiddleware()); // Uses waitUntil on Workers
128
+
129
+ app.post('/chat', async (c) => {
130
+ const { message } = await c.req.json();
131
+ const result = await openai.chat.completions.create({
132
+ model: 'gpt-4',
133
+ messages: [{ role: 'user', content: message }],
134
+ });
135
+ return c.json(result.choices[0].message);
136
+ });
137
+
138
+ export default app;
139
+ ```
47
140
 
48
141
  ## Supported Providers
49
142
 
@@ -60,69 +153,41 @@ Initialize the SDK. Call once at app startup.
60
153
 
61
154
  ```typescript
62
155
  init({
63
- apiKey: 'le_xxx', // Required (or set LELEMON_API_KEY env var)
156
+ apiKey: 'le_xxx', // Required (or LELEMON_API_KEY env var)
64
157
  endpoint: 'https://...', // Optional, custom endpoint
65
158
  debug: false, // Optional, enable debug logs
66
- disabled: false, // Optional, disable all tracing
159
+ disabled: false, // Optional, disable tracing
67
160
  batchSize: 10, // Optional, items per batch
68
161
  flushIntervalMs: 1000, // Optional, auto-flush interval
69
- requestTimeoutMs: 10000, // Optional, HTTP request timeout
70
162
  });
71
163
  ```
72
164
 
73
165
  ### `observe(client, options?)`
74
166
 
75
- Wrap an LLM client with automatic tracing. Returns the same client type.
167
+ Wrap an LLM client with automatic tracing.
76
168
 
77
169
  ```typescript
78
- import Anthropic from '@anthropic-ai/sdk';
79
-
80
- const anthropic = observe(new Anthropic(), {
81
- sessionId: 'session-123', // Optional, group related traces
82
- userId: 'user-456', // Optional, identify the user
83
- metadata: { source: 'api' }, // Optional, custom metadata
84
- tags: ['production'], // Optional, tags for filtering
85
- });
86
- ```
87
-
88
- ### `createObserve(defaultOptions)`
89
-
90
- Create a scoped observe function with preset options.
91
-
92
- ```typescript
93
- const observeWithSession = createObserve({
170
+ const openai = observe(new OpenAI(), {
94
171
  sessionId: 'session-123',
95
172
  userId: 'user-456',
173
+ metadata: { source: 'api' },
174
+ tags: ['production'],
96
175
  });
97
-
98
- const openai = observeWithSession(new OpenAI());
99
- const anthropic = observeWithSession(new Anthropic());
100
176
  ```
101
177
 
102
178
  ### `flush()`
103
179
 
104
- Wait for all pending traces to be sent. Use in serverless environments.
180
+ Manually flush pending traces. Use in serverless without framework integration.
105
181
 
106
182
  ```typescript
107
183
  await flush();
108
184
  ```
109
185
 
110
- ### `isEnabled()`
111
-
112
- Check if tracing is enabled.
113
-
114
- ```typescript
115
- if (isEnabled()) {
116
- console.log('Tracing is active');
117
- }
118
- ```
119
-
120
- ## Streaming Support
186
+ ## Streaming
121
187
 
122
188
  Both OpenAI and Anthropic streaming are fully supported:
123
189
 
124
190
  ```typescript
125
- // OpenAI streaming
126
191
  const stream = await openai.chat.completions.create({
127
192
  model: 'gpt-4',
128
193
  messages: [{ role: 'user', content: 'Hello!' }],
@@ -132,64 +197,7 @@ const stream = await openai.chat.completions.create({
132
197
  for await (const chunk of stream) {
133
198
  process.stdout.write(chunk.choices[0]?.delta?.content || '');
134
199
  }
135
- // Trace is captured automatically when stream completes
136
-
137
- // Anthropic streaming
138
- const stream = anthropic.messages.stream({
139
- model: 'claude-3-opus-20240229',
140
- max_tokens: 1024,
141
- messages: [{ role: 'user', content: 'Hello!' }],
142
- });
143
-
144
- for await (const event of stream) {
145
- // Process events
146
- }
147
- // Trace is captured automatically
148
- ```
149
-
150
- ## Serverless Usage
151
-
152
- ### Vercel (Next.js)
153
-
154
- ```typescript
155
- import { waitUntil } from '@vercel/functions';
156
- import { init, observe, flush } from '@lelemondev/sdk';
157
- import OpenAI from 'openai';
158
-
159
- init({ apiKey: process.env.LELEMON_API_KEY });
160
- const openai = observe(new OpenAI());
161
-
162
- export async function POST(req: Request) {
163
- const { message } = await req.json();
164
-
165
- const response = await openai.chat.completions.create({
166
- model: 'gpt-4',
167
- messages: [{ role: 'user', content: message }],
168
- });
169
-
170
- waitUntil(flush()); // Flush after response
171
- return Response.json(response.choices[0].message);
172
- }
173
- ```
174
-
175
- ### AWS Lambda
176
-
177
- ```typescript
178
- import { init, observe, flush } from '@lelemondev/sdk';
179
- import OpenAI from 'openai';
180
-
181
- init({ apiKey: process.env.LELEMON_API_KEY });
182
- const openai = observe(new OpenAI());
183
-
184
- export const handler = async (event) => {
185
- const response = await openai.chat.completions.create({
186
- model: 'gpt-4',
187
- messages: [{ role: 'user', content: event.body }],
188
- });
189
-
190
- await flush(); // Always flush before Lambda ends
191
- return { statusCode: 200, body: JSON.stringify(response) };
192
- };
200
+ // Trace captured automatically when stream completes
193
201
  ```
194
202
 
195
203
  ## What Gets Traced
@@ -200,7 +208,7 @@ Each LLM call automatically captures:
200
208
  - **Model** - gpt-4, claude-3-opus, etc.
201
209
  - **Input** - Messages/prompt (sanitized)
202
210
  - **Output** - Response content
203
- - **Tokens** - Input and output token counts
211
+ - **Tokens** - Input and output counts
204
212
  - **Duration** - Request latency in ms
205
213
  - **Status** - success or error
206
214
  - **Streaming** - Whether streaming was used
@@ -211,14 +219,21 @@ The SDK automatically sanitizes sensitive data:
211
219
 
212
220
  - API keys and tokens are redacted
213
221
  - Large payloads are truncated
214
- - Errors are captured without stack traces
222
+ - Errors are captured safely
215
223
 
216
224
  ## Environment Variables
217
225
 
218
226
  | Variable | Description |
219
227
  |----------|-------------|
220
- | `LELEMON_API_KEY` | Your Lelemon API key (starts with `le_`) |
228
+ | `LELEMON_API_KEY` | Your API key (starts with `le_`) |
221
229
 
222
230
  ## License
223
231
 
224
232
  MIT
233
+
234
+ ## Sources
235
+
236
+ Framework integration patterns based on:
237
+ - [Next.js after() docs](https://nextjs.org/docs/app/api-reference/functions/after)
238
+ - [Vercel waitUntil](https://www.inngest.com/blog/vercel-cloudflare-wait-until)
239
+ - [Langfuse SDK patterns](https://langfuse.com/docs/observability/sdk/typescript/advanced-usage)
@@ -0,0 +1,47 @@
1
+ /**
2
+ * Express Integration
3
+ *
4
+ * Middleware that automatically flushes traces when response finishes.
5
+ *
6
+ * @example
7
+ * import express from 'express';
8
+ * import { createMiddleware } from '@lelemondev/sdk/express';
9
+ *
10
+ * const app = express();
11
+ * app.use(createMiddleware());
12
+ */
13
+ interface ExpressRequest {
14
+ [key: string]: unknown;
15
+ }
16
+ interface ExpressResponse {
17
+ on(event: 'finish' | 'close' | 'error', listener: () => void): this;
18
+ [key: string]: unknown;
19
+ }
20
+ type NextFunction = (error?: unknown) => void;
21
+ type ExpressMiddleware = (req: ExpressRequest, res: ExpressResponse, next: NextFunction) => void;
22
+ /**
23
+ * Create Express middleware for automatic trace flushing
24
+ *
25
+ * Flushes traces when the response finishes (after res.send/res.json).
26
+ * This is fire-and-forget and doesn't block the response.
27
+ *
28
+ * @returns Express middleware function
29
+ *
30
+ * @example
31
+ * // Global middleware
32
+ * app.use(createMiddleware());
33
+ *
34
+ * @example
35
+ * // Per-route middleware
36
+ * app.post('/chat', createMiddleware(), async (req, res) => {
37
+ * res.json({ ok: true });
38
+ * });
39
+ */
40
+ declare function createMiddleware(): ExpressMiddleware;
41
+
42
+ declare const express_createMiddleware: typeof createMiddleware;
43
+ declare namespace express {
44
+ export { express_createMiddleware as createMiddleware };
45
+ }
46
+
47
+ export { createMiddleware as c, express as e };
@@ -0,0 +1,47 @@
1
+ /**
2
+ * Express Integration
3
+ *
4
+ * Middleware that automatically flushes traces when response finishes.
5
+ *
6
+ * @example
7
+ * import express from 'express';
8
+ * import { createMiddleware } from '@lelemondev/sdk/express';
9
+ *
10
+ * const app = express();
11
+ * app.use(createMiddleware());
12
+ */
13
+ interface ExpressRequest {
14
+ [key: string]: unknown;
15
+ }
16
+ interface ExpressResponse {
17
+ on(event: 'finish' | 'close' | 'error', listener: () => void): this;
18
+ [key: string]: unknown;
19
+ }
20
+ type NextFunction = (error?: unknown) => void;
21
+ type ExpressMiddleware = (req: ExpressRequest, res: ExpressResponse, next: NextFunction) => void;
22
+ /**
23
+ * Create Express middleware for automatic trace flushing
24
+ *
25
+ * Flushes traces when the response finishes (after res.send/res.json).
26
+ * This is fire-and-forget and doesn't block the response.
27
+ *
28
+ * @returns Express middleware function
29
+ *
30
+ * @example
31
+ * // Global middleware
32
+ * app.use(createMiddleware());
33
+ *
34
+ * @example
35
+ * // Per-route middleware
36
+ * app.post('/chat', createMiddleware(), async (req, res) => {
37
+ * res.json({ ok: true });
38
+ * });
39
+ */
40
+ declare function createMiddleware(): ExpressMiddleware;
41
+
42
+ declare const express_createMiddleware: typeof createMiddleware;
43
+ declare namespace express {
44
+ export { express_createMiddleware as createMiddleware };
45
+ }
46
+
47
+ export { createMiddleware as c, express as e };
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './express-Cmb_A4sI.mjs';
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './express-Cmb_A4sI.js';
@@ -0,0 +1,21 @@
1
+ 'use strict';
2
+
3
+ /* @lelemondev/sdk - LLM Observability */
4
+
5
+ async function flush() {
6
+ }
7
+
8
+ // src/integrations/express.ts
9
+ function createMiddleware() {
10
+ return (_req, res, next) => {
11
+ res.on("finish", () => {
12
+ flush().catch(() => {
13
+ });
14
+ });
15
+ next();
16
+ };
17
+ }
18
+
19
+ exports.createMiddleware = createMiddleware;
20
+ //# sourceMappingURL=express.js.map
21
+ //# sourceMappingURL=express.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/core/config.ts","../src/integrations/express.ts"],"names":[],"mappings":";;;;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACjBO,SAAS,gBAAA,GAAsC;AACpD,EAAA,OAAO,CAAC,IAAA,EAAM,GAAA,EAAK,IAAA,KAAS;AAE1B,IAAA,GAAA,CAAI,EAAA,CAAG,UAAU,MAAM;AACrB,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAEpB,CAAC,CAAA;AAAA,IACH,CAAC,CAAA;AAED,IAAA,IAAA,EAAK;AAAA,EACP,CAAA;AACF","file":"express.js","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Express Integration\n *\n * Middleware that automatically flushes traces when response finishes.\n *\n * @example\n * import express from 'express';\n * import { createMiddleware } from '@lelemondev/sdk/express';\n *\n * const app = express();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring express as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExpressRequest {\n [key: string]: unknown;\n}\n\ninterface ExpressResponse {\n on(event: 'finish' | 'close' | 'error', listener: () => void): this;\n [key: string]: unknown;\n}\n\ntype NextFunction = (error?: unknown) => void;\n\ntype ExpressMiddleware = (\n req: ExpressRequest,\n res: ExpressResponse,\n next: NextFunction\n) => void;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Express middleware for automatic trace flushing\n *\n * Flushes traces when the response finishes (after res.send/res.json).\n * This is fire-and-forget and doesn't block the response.\n *\n * @returns Express middleware function\n *\n * @example\n * // Global middleware\n * app.use(createMiddleware());\n *\n * @example\n * // Per-route middleware\n * app.post('/chat', createMiddleware(), async (req, res) => {\n * res.json({ ok: true });\n * });\n */\nexport function createMiddleware(): ExpressMiddleware {\n return (_req, res, next) => {\n // Flush when response is finished (after headers + body sent)\n res.on('finish', () => {\n flush().catch(() => {\n // Silently ignore flush errors - fire and forget\n });\n });\n\n next();\n };\n}\n"]}
@@ -0,0 +1,19 @@
1
+ /* @lelemondev/sdk - LLM Observability */
2
+
3
+ async function flush() {
4
+ }
5
+
6
+ // src/integrations/express.ts
7
+ function createMiddleware() {
8
+ return (_req, res, next) => {
9
+ res.on("finish", () => {
10
+ flush().catch(() => {
11
+ });
12
+ });
13
+ next();
14
+ };
15
+ }
16
+
17
+ export { createMiddleware };
18
+ //# sourceMappingURL=express.mjs.map
19
+ //# sourceMappingURL=express.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/core/config.ts","../src/integrations/express.ts"],"names":[],"mappings":";;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACjBO,SAAS,gBAAA,GAAsC;AACpD,EAAA,OAAO,CAAC,IAAA,EAAM,GAAA,EAAK,IAAA,KAAS;AAE1B,IAAA,GAAA,CAAI,EAAA,CAAG,UAAU,MAAM;AACrB,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAEpB,CAAC,CAAA;AAAA,IACH,CAAC,CAAA;AAED,IAAA,IAAA,EAAK;AAAA,EACP,CAAA;AACF","file":"express.mjs","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Express Integration\n *\n * Middleware that automatically flushes traces when response finishes.\n *\n * @example\n * import express from 'express';\n * import { createMiddleware } from '@lelemondev/sdk/express';\n *\n * const app = express();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring express as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExpressRequest {\n [key: string]: unknown;\n}\n\ninterface ExpressResponse {\n on(event: 'finish' | 'close' | 'error', listener: () => void): this;\n [key: string]: unknown;\n}\n\ntype NextFunction = (error?: unknown) => void;\n\ntype ExpressMiddleware = (\n req: ExpressRequest,\n res: ExpressResponse,\n next: NextFunction\n) => void;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Express middleware for automatic trace flushing\n *\n * Flushes traces when the response finishes (after res.send/res.json).\n * This is fire-and-forget and doesn't block the response.\n *\n * @returns Express middleware function\n *\n * @example\n * // Global middleware\n * app.use(createMiddleware());\n *\n * @example\n * // Per-route middleware\n * app.post('/chat', createMiddleware(), async (req, res) => {\n * res.json({ ok: true });\n * });\n */\nexport function createMiddleware(): ExpressMiddleware {\n return (_req, res, next) => {\n // Flush when response is finished (after headers + body sent)\n res.on('finish', () => {\n flush().catch(() => {\n // Silently ignore flush errors - fire and forget\n });\n });\n\n next();\n };\n}\n"]}
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Hono Integration
3
+ *
4
+ * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).
5
+ * Uses executionCtx.waitUntil() when available for non-blocking flush.
6
+ *
7
+ * @example
8
+ * import { Hono } from 'hono';
9
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
10
+ *
11
+ * const app = new Hono();
12
+ * app.use(createMiddleware());
13
+ */
14
+ interface ExecutionContext {
15
+ waitUntil(promise: Promise<unknown>): void;
16
+ passThroughOnException(): void;
17
+ }
18
+ interface HonoContext {
19
+ req: {
20
+ raw: Request;
21
+ [key: string]: unknown;
22
+ };
23
+ res: Response | undefined;
24
+ executionCtx?: ExecutionContext;
25
+ [key: string]: unknown;
26
+ }
27
+ type NextFunction = () => Promise<void>;
28
+ type HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;
29
+ /**
30
+ * Create Hono middleware for automatic trace flushing
31
+ *
32
+ * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush
33
+ * On Node.js/Bun: flushes after response (fire-and-forget)
34
+ *
35
+ * @returns Hono middleware function
36
+ *
37
+ * @example
38
+ * import { Hono } from 'hono';
39
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
40
+ *
41
+ * const app = new Hono();
42
+ *
43
+ * // Global middleware
44
+ * app.use(createMiddleware());
45
+ *
46
+ * app.post('/chat', async (c) => {
47
+ * const openai = observe(new OpenAI());
48
+ * const result = await openai.chat.completions.create({...});
49
+ * return c.json(result);
50
+ * });
51
+ *
52
+ * export default app;
53
+ */
54
+ declare function createMiddleware(): HonoMiddleware;
55
+
56
+ declare const hono_createMiddleware: typeof createMiddleware;
57
+ declare namespace hono {
58
+ export { hono_createMiddleware as createMiddleware };
59
+ }
60
+
61
+ export { createMiddleware as c, hono as h };
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Hono Integration
3
+ *
4
+ * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).
5
+ * Uses executionCtx.waitUntil() when available for non-blocking flush.
6
+ *
7
+ * @example
8
+ * import { Hono } from 'hono';
9
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
10
+ *
11
+ * const app = new Hono();
12
+ * app.use(createMiddleware());
13
+ */
14
+ interface ExecutionContext {
15
+ waitUntil(promise: Promise<unknown>): void;
16
+ passThroughOnException(): void;
17
+ }
18
+ interface HonoContext {
19
+ req: {
20
+ raw: Request;
21
+ [key: string]: unknown;
22
+ };
23
+ res: Response | undefined;
24
+ executionCtx?: ExecutionContext;
25
+ [key: string]: unknown;
26
+ }
27
+ type NextFunction = () => Promise<void>;
28
+ type HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;
29
+ /**
30
+ * Create Hono middleware for automatic trace flushing
31
+ *
32
+ * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush
33
+ * On Node.js/Bun: flushes after response (fire-and-forget)
34
+ *
35
+ * @returns Hono middleware function
36
+ *
37
+ * @example
38
+ * import { Hono } from 'hono';
39
+ * import { createMiddleware } from '@lelemondev/sdk/hono';
40
+ *
41
+ * const app = new Hono();
42
+ *
43
+ * // Global middleware
44
+ * app.use(createMiddleware());
45
+ *
46
+ * app.post('/chat', async (c) => {
47
+ * const openai = observe(new OpenAI());
48
+ * const result = await openai.chat.completions.create({...});
49
+ * return c.json(result);
50
+ * });
51
+ *
52
+ * export default app;
53
+ */
54
+ declare function createMiddleware(): HonoMiddleware;
55
+
56
+ declare const hono_createMiddleware: typeof createMiddleware;
57
+ declare namespace hono {
58
+ export { hono_createMiddleware as createMiddleware };
59
+ }
60
+
61
+ export { createMiddleware as c, hono as h };
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './hono-ChTmQk_V.mjs';
package/dist/hono.d.ts ADDED
@@ -0,0 +1 @@
1
+ export { c as createMiddleware } from './hono-ChTmQk_V.js';
package/dist/hono.js ADDED
@@ -0,0 +1,23 @@
1
+ 'use strict';
2
+
3
+ /* @lelemondev/sdk - LLM Observability */
4
+
5
+ async function flush() {
6
+ }
7
+
8
+ // src/integrations/hono.ts
9
+ function createMiddleware() {
10
+ return async (c, next) => {
11
+ await next();
12
+ if (c.executionCtx?.waitUntil) {
13
+ c.executionCtx.waitUntil(flush());
14
+ } else {
15
+ flush().catch(() => {
16
+ });
17
+ }
18
+ };
19
+ }
20
+
21
+ exports.createMiddleware = createMiddleware;
22
+ //# sourceMappingURL=hono.js.map
23
+ //# sourceMappingURL=hono.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/core/config.ts","../src/integrations/hono.ts"],"names":[],"mappings":";;;;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACPO,SAAS,gBAAA,GAAmC;AACjD,EAAA,OAAO,OAAO,GAAG,IAAA,KAAS;AACxB,IAAA,MAAM,IAAA,EAAK;AAGX,IAAA,IAAI,CAAA,CAAE,cAAc,SAAA,EAAW;AAC7B,MAAA,CAAA,CAAE,YAAA,CAAa,SAAA,CAAU,KAAA,EAAO,CAAA;AAAA,IAClC,CAAA,MAAO;AAEL,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAAC,CAAC,CAAA;AAAA,IACxB;AAAA,EACF,CAAA;AACF","file":"hono.js","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Hono Integration\n *\n * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).\n * Uses executionCtx.waitUntil() when available for non-blocking flush.\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring hono as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExecutionContext {\n waitUntil(promise: Promise<unknown>): void;\n passThroughOnException(): void;\n}\n\ninterface HonoContext {\n req: {\n raw: Request;\n [key: string]: unknown;\n };\n res: Response | undefined;\n executionCtx?: ExecutionContext;\n [key: string]: unknown;\n}\n\ntype NextFunction = () => Promise<void>;\n\ntype HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Hono middleware for automatic trace flushing\n *\n * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush\n * On Node.js/Bun: flushes after response (fire-and-forget)\n *\n * @returns Hono middleware function\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n *\n * // Global middleware\n * app.use(createMiddleware());\n *\n * app.post('/chat', async (c) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return c.json(result);\n * });\n *\n * export default app;\n */\nexport function createMiddleware(): HonoMiddleware {\n return async (c, next) => {\n await next();\n\n // Use waitUntil if available (Cloudflare Workers, Deno Deploy)\n if (c.executionCtx?.waitUntil) {\n c.executionCtx.waitUntil(flush());\n } else {\n // Fire-and-forget for Node.js/Bun\n flush().catch(() => {});\n }\n };\n}\n"]}