@lelemondev/sdk 0.3.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +123 -108
- package/dist/express-Cmb_A4sI.d.mts +47 -0
- package/dist/express-Cmb_A4sI.d.ts +47 -0
- package/dist/express.d.mts +1 -0
- package/dist/express.d.ts +1 -0
- package/dist/express.js +21 -0
- package/dist/express.js.map +1 -0
- package/dist/express.mjs +19 -0
- package/dist/express.mjs.map +1 -0
- package/dist/hono-ChTmQk_V.d.mts +61 -0
- package/dist/hono-ChTmQk_V.d.ts +61 -0
- package/dist/hono.d.mts +1 -0
- package/dist/hono.d.ts +1 -0
- package/dist/hono.js +23 -0
- package/dist/hono.js.map +1 -0
- package/dist/hono.mjs +21 -0
- package/dist/hono.mjs.map +1 -0
- package/dist/index.js +1 -1
- package/dist/index.mjs +1 -1
- package/dist/integrations.d.mts +4 -0
- package/dist/integrations.d.ts +4 -0
- package/dist/integrations.js +93 -0
- package/dist/integrations.js.map +1 -0
- package/dist/integrations.mjs +88 -0
- package/dist/integrations.mjs.map +1 -0
- package/dist/lambda-DQmEfWXC.d.mts +75 -0
- package/dist/lambda-DQmEfWXC.d.ts +75 -0
- package/dist/lambda.d.mts +1 -0
- package/dist/lambda.d.ts +1 -0
- package/dist/lambda.js +21 -0
- package/dist/lambda.js.map +1 -0
- package/dist/lambda.mjs +19 -0
- package/dist/lambda.mjs.map +1 -0
- package/dist/next-0nso_zEN.d.mts +94 -0
- package/dist/next-0nso_zEN.d.ts +94 -0
- package/dist/next.d.mts +1 -0
- package/dist/next.d.ts +1 -0
- package/dist/next.js +33 -0
- package/dist/next.js.map +1 -0
- package/dist/next.mjs +30 -0
- package/dist/next.mjs.map +1 -0
- package/package.json +38 -13
package/dist/hono.mjs
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
2
|
+
|
|
3
|
+
async function flush() {
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
// src/integrations/hono.ts
|
|
7
|
+
function createMiddleware() {
|
|
8
|
+
return async (c, next) => {
|
|
9
|
+
await next();
|
|
10
|
+
if (c.executionCtx?.waitUntil) {
|
|
11
|
+
c.executionCtx.waitUntil(flush());
|
|
12
|
+
} else {
|
|
13
|
+
flush().catch(() => {
|
|
14
|
+
});
|
|
15
|
+
}
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export { createMiddleware };
|
|
20
|
+
//# sourceMappingURL=hono.mjs.map
|
|
21
|
+
//# sourceMappingURL=hono.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/core/config.ts","../src/integrations/hono.ts"],"names":[],"mappings":";;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACPO,SAAS,gBAAA,GAAmC;AACjD,EAAA,OAAO,OAAO,GAAG,IAAA,KAAS;AACxB,IAAA,MAAM,IAAA,EAAK;AAGX,IAAA,IAAI,CAAA,CAAE,cAAc,SAAA,EAAW;AAC7B,MAAA,CAAA,CAAE,YAAA,CAAa,SAAA,CAAU,KAAA,EAAO,CAAA;AAAA,IAClC,CAAA,MAAO;AAEL,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAAC,CAAC,CAAA;AAAA,IACxB;AAAA,EACF,CAAA;AACF","file":"hono.mjs","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * Hono Integration\n *\n * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).\n * Uses executionCtx.waitUntil() when available for non-blocking flush.\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring hono as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExecutionContext {\n waitUntil(promise: Promise<unknown>): void;\n passThroughOnException(): void;\n}\n\ninterface HonoContext {\n req: {\n raw: Request;\n [key: string]: unknown;\n };\n res: Response | undefined;\n executionCtx?: ExecutionContext;\n [key: string]: unknown;\n}\n\ntype NextFunction = () => Promise<void>;\n\ntype HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Hono middleware for automatic trace flushing\n *\n * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush\n * On Node.js/Bun: flushes after response (fire-and-forget)\n *\n * @returns Hono middleware function\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n *\n * // Global middleware\n * app.use(createMiddleware());\n *\n * app.post('/chat', async (c) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return c.json(result);\n * });\n *\n * export default app;\n */\nexport function createMiddleware(): HonoMiddleware {\n return async (c, next) => {\n await next();\n\n // Use waitUntil if available (Cloudflare Workers, Deno Deploy)\n if (c.executionCtx?.waitUntil) {\n c.executionCtx.waitUntil(flush());\n } else {\n // Fire-and-forget for Node.js/Bun\n flush().catch(() => {});\n }\n };\n}\n"]}
|
package/dist/index.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
-
/* @
|
|
3
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
4
4
|
var __defProp = Object.defineProperty;
|
|
5
5
|
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
6
6
|
var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
|
package/dist/index.mjs
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
/* @
|
|
1
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
2
2
|
var __defProp = Object.defineProperty;
|
|
3
3
|
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
4
4
|
var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
4
|
+
var __defProp = Object.defineProperty;
|
|
5
|
+
var __export = (target, all) => {
|
|
6
|
+
for (var name in all)
|
|
7
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
8
|
+
};
|
|
9
|
+
|
|
10
|
+
// src/integrations/next.ts
|
|
11
|
+
var next_exports = {};
|
|
12
|
+
__export(next_exports, {
|
|
13
|
+
createWrapper: () => createWrapper,
|
|
14
|
+
withObserve: () => withObserve
|
|
15
|
+
});
|
|
16
|
+
async function flush() {
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
// src/integrations/next.ts
|
|
20
|
+
function withObserve(handler, options) {
|
|
21
|
+
return async (request, context) => {
|
|
22
|
+
try {
|
|
23
|
+
return await handler(request, context);
|
|
24
|
+
} finally {
|
|
25
|
+
if (options?.after) {
|
|
26
|
+
options.after(() => flush());
|
|
27
|
+
} else if (options?.waitUntil) {
|
|
28
|
+
options.waitUntil(flush());
|
|
29
|
+
} else {
|
|
30
|
+
await flush();
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
function createWrapper(defaultOptions) {
|
|
36
|
+
return function(handler, options) {
|
|
37
|
+
return withObserve(handler, { ...defaultOptions, ...options });
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
// src/integrations/lambda.ts
|
|
42
|
+
var lambda_exports = {};
|
|
43
|
+
__export(lambda_exports, {
|
|
44
|
+
withObserve: () => withObserve2
|
|
45
|
+
});
|
|
46
|
+
function withObserve2(handler) {
|
|
47
|
+
return async (event, context) => {
|
|
48
|
+
try {
|
|
49
|
+
return await handler(event, context);
|
|
50
|
+
} finally {
|
|
51
|
+
await flush();
|
|
52
|
+
}
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// src/integrations/express.ts
|
|
57
|
+
var express_exports = {};
|
|
58
|
+
__export(express_exports, {
|
|
59
|
+
createMiddleware: () => createMiddleware
|
|
60
|
+
});
|
|
61
|
+
function createMiddleware() {
|
|
62
|
+
return (_req, res, next) => {
|
|
63
|
+
res.on("finish", () => {
|
|
64
|
+
flush().catch(() => {
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
next();
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// src/integrations/hono.ts
|
|
72
|
+
var hono_exports = {};
|
|
73
|
+
__export(hono_exports, {
|
|
74
|
+
createMiddleware: () => createMiddleware2
|
|
75
|
+
});
|
|
76
|
+
function createMiddleware2() {
|
|
77
|
+
return async (c, next) => {
|
|
78
|
+
await next();
|
|
79
|
+
if (c.executionCtx?.waitUntil) {
|
|
80
|
+
c.executionCtx.waitUntil(flush());
|
|
81
|
+
} else {
|
|
82
|
+
flush().catch(() => {
|
|
83
|
+
});
|
|
84
|
+
}
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
exports.express = express_exports;
|
|
89
|
+
exports.hono = hono_exports;
|
|
90
|
+
exports.lambda = lambda_exports;
|
|
91
|
+
exports.next = next_exports;
|
|
92
|
+
//# sourceMappingURL=integrations.js.map
|
|
93
|
+
//# sourceMappingURL=integrations.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/integrations/next.ts","../src/core/config.ts","../src/integrations/lambda.ts","../src/integrations/express.ts","../src/integrations/hono.ts"],"names":["withObserve","createMiddleware"],"mappings":";;;;;;;;;;AAAA,IAAA,YAAA,GAAA;AAAA,QAAA,CAAA,YAAA,EAAA;AAAA,EAAA,aAAA,EAAA,MAAA,aAAA;AAAA,EAAA,WAAA,EAAA,MAAA;AAAA,CAAA,CAAA;ACuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ADYO,SAAS,WAAA,CACd,SACA,OAAA,EAC4B;AAC5B,EAAA,OAAO,OAAO,SAAkB,OAAA,KAA0C;AACxE,IAAA,IAAI;AACF,MAAA,OAAO,MAAM,OAAA,CAAQ,OAAA,EAAS,OAAO,CAAA;AAAA,IACvC,CAAA,SAAE;AAEA,MAAA,IAAI,SAAS,KAAA,EAAO;AAElB,QAAA,OAAA,CAAQ,KAAA,CAAM,MAAM,KAAA,EAAO,CAAA;AAAA,MAC7B,CAAA,MAAA,IAAW,SAAS,SAAA,EAAW;AAE7B,QAAA,OAAA,CAAQ,SAAA,CAAU,OAAO,CAAA;AAAA,MAC3B,CAAA,MAAO;AAEL,QAAA,MAAM,KAAA,EAAM;AAAA,MACd;AAAA,IACF;AAAA,EACF,CAAA;AACF;AAeO,SAAS,cAAc,cAAA,EAAoC;AAChE,EAAA,OAAO,SACL,SACA,OAAA,EAC4B;AAC5B,IAAA,OAAO,YAAY,OAAA,EAAS,EAAE,GAAG,cAAA,EAAgB,GAAG,SAAS,CAAA;AAAA,EAC/D,CAAA;AACF;;;AElIA,IAAA,cAAA,GAAA;AAAA,QAAA,CAAA,cAAA,EAAA;AAAA,EAAA,WAAA,EAAA,MAAAA;AAAA,CAAA,CAAA;AAkFO,SAASA,aACd,OAAA,EACgC;AAChC,EAAA,OAAO,OAAO,OAAe,OAAA,KAA6C;AACxE,IAAA,IAAI;AACF,MAAA,OAAO,MAAM,OAAA,CAAQ,KAAA,EAAO,OAAO,CAAA;AAAA,IACrC,CAAA,SAAE;AAEA,MAAA,MAAM,KAAA,EAAM;AAAA,IACd;AAAA,EACF,CAAA;AACF;;;AC7FA,IAAA,eAAA,GAAA;AAAA,QAAA,CAAA,eAAA,EAAA;AAAA,EAAA,gBAAA,EAAA,MAAA;AAAA,CAAA,CAAA;AA0DO,SAAS,gBAAA,GAAsC;AACpD,EAAA,OAAO,CAAC,IAAA,EAAM,GAAA,EAAK,IAAA,KAAS;AAE1B,IAAA,GAAA,CAAI,EAAA,CAAG,UAAU,MAAM;AACrB,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAEpB,CAAC,CAAA;AAAA,IACH,CAAC,CAAA;AAED,IAAA,IAAA,EAAK;AAAA,EACP,CAAA;AACF;;;ACrEA,IAAA,YAAA,GAAA;AAAA,QAAA,CAAA,YAAA,EAAA;AAAA,EAAA,gBAAA,EAAA,MAAAC;AAAA,CAAA,CAAA;AAoEO,SAASA,iBAAAA,GAAmC;AACjD,EAAA,OAAO,OAAO,GAAG,IAAA,KAAS;AACxB,IAAA,MAAM,IAAA,EAAK;AAGX,IAAA,IAAI,CAAA,CAAE,cAAc,SAAA,EAAW;AAC7B,MAAA,CAAA,CAAE,YAAA,CAAa,SAAA,CAAU,KAAA,EAAO,CAAA;AAAA,IAClC,CAAA,MAAO;AAEL,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAAC,CAAC,CAAA;AAAA,IACxB;AAAA,EACF,CAAA;AACF","file":"integrations.js","sourcesContent":["/**\n * Next.js App Router Integration\n *\n * Wraps route handlers to automatically flush traces.\n * Supports Next.js 15+ `after()` and Vercel's `waitUntil()`.\n *\n * @example\n * import { withObserve } from '@lelemondev/sdk/next';\n *\n * export const POST = withObserve(async (req) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return Response.json(result);\n * });\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types\n// ─────────────────────────────────────────────────────────────\n\ntype NextRouteHandler<TContext = unknown> = (\n request: Request,\n context?: TContext\n) => Response | Promise<Response>;\n\n/**\n * Options for the Next.js wrapper\n */\nexport interface NextObserveOptions {\n /**\n * Next.js 15+ after() function from 'next/server'\n * Preferred method - runs after response without blocking\n *\n * @example\n * import { after } from 'next/server';\n * export const POST = withObserve(handler, { after });\n */\n after?: (callback: () => void | Promise<void>) => void;\n\n /**\n * Vercel's waitUntil() from '@vercel/functions'\n * Alternative for Vercel deployments\n *\n * @example\n * import { waitUntil } from '@vercel/functions';\n * export const POST = withObserve(handler, { waitUntil });\n */\n waitUntil?: (promise: Promise<unknown>) => void;\n}\n\n// ─────────────────────────────────────────────────────────────\n// Wrapper\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Wrap a Next.js App Router handler with automatic trace flushing\n *\n * @param handler - Your route handler function\n * @param options - Optional: pass `after` (Next.js 15+) or `waitUntil` (Vercel)\n * @returns Wrapped handler that auto-flushes traces\n *\n * @example\n * // Basic usage (blocking flush)\n * export const POST = withObserve(async (req) => {\n * return Response.json({ ok: true });\n * });\n *\n * @example\n * // Next.js 15+ with after() - non-blocking (recommended)\n * import { after } from 'next/server';\n *\n * export const POST = withObserve(\n * async (req) => Response.json({ ok: true }),\n * { after }\n * );\n *\n * @example\n * // Vercel with waitUntil() - non-blocking\n * import { waitUntil } from '@vercel/functions';\n *\n * export const POST = withObserve(\n * async (req) => Response.json({ ok: true }),\n * { waitUntil }\n * );\n */\nexport function withObserve<TContext = unknown>(\n handler: NextRouteHandler<TContext>,\n options?: NextObserveOptions\n): NextRouteHandler<TContext> {\n return async (request: Request, context?: TContext): Promise<Response> => {\n try {\n return await handler(request, context);\n } finally {\n // Priority: after() > waitUntil() > blocking flush\n if (options?.after) {\n // Next.js 15+ native - best option\n options.after(() => flush());\n } else if (options?.waitUntil) {\n // Vercel platform\n options.waitUntil(flush());\n } else {\n // Fallback: blocking flush\n await flush();\n }\n }\n };\n}\n\n/**\n * Create a pre-configured wrapper with default options\n *\n * @example\n * import { after } from 'next/server';\n * import { createWrapper } from '@lelemondev/sdk/next';\n *\n * const withObserve = createWrapper({ after });\n *\n * export const POST = withObserve(async (req) => {\n * return Response.json({ ok: true });\n * });\n */\nexport function createWrapper(defaultOptions: NextObserveOptions) {\n return function <TContext = unknown>(\n handler: NextRouteHandler<TContext>,\n options?: NextObserveOptions\n ): NextRouteHandler<TContext> {\n return withObserve(handler, { ...defaultOptions, ...options });\n };\n}\n","/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * AWS Lambda Integration\n *\n * Wraps Lambda handlers to automatically flush traces before the function exits.\n *\n * @example\n * import { withObserve } from '@lelemondev/sdk/lambda';\n *\n * export const handler = withObserve(async (event) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return { statusCode: 200, body: JSON.stringify(result) };\n * });\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring @types/aws-lambda)\n// ─────────────────────────────────────────────────────────────\n\n/**\n * AWS Lambda Context object\n */\nexport interface LambdaContext {\n functionName: string;\n functionVersion: string;\n invokedFunctionArn: string;\n memoryLimitInMB: string;\n awsRequestId: string;\n logGroupName: string;\n logStreamName: string;\n getRemainingTimeInMillis(): number;\n [key: string]: unknown;\n}\n\n/**\n * Generic Lambda handler type\n */\ntype LambdaHandler<TEvent = unknown, TResult = unknown> = (\n event: TEvent,\n context: LambdaContext\n) => Promise<TResult>;\n\n// ─────────────────────────────────────────────────────────────\n// Wrapper\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Wrap an AWS Lambda handler with automatic trace flushing\n *\n * Always flushes before returning - Lambda freezes the container\n * immediately after the handler returns, so this is required.\n *\n * @param handler - Your Lambda handler function\n * @returns Wrapped handler that auto-flushes traces\n *\n * @example\n * // API Gateway event\n * export const handler = withObserve(async (event) => {\n * const body = JSON.parse(event.body);\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: body.message }],\n * });\n * return {\n * statusCode: 200,\n * body: JSON.stringify(result.choices[0].message),\n * };\n * });\n *\n * @example\n * // With typed events\n * import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';\n *\n * export const handler = withObserve<APIGatewayProxyEvent, APIGatewayProxyResult>(\n * async (event, context) => {\n * return { statusCode: 200, body: 'OK' };\n * }\n * );\n */\nexport function withObserve<TEvent = unknown, TResult = unknown>(\n handler: LambdaHandler<TEvent, TResult>\n): LambdaHandler<TEvent, TResult> {\n return async (event: TEvent, context: LambdaContext): Promise<TResult> => {\n try {\n return await handler(event, context);\n } finally {\n // Always flush - Lambda freezes immediately after return\n await flush();\n }\n };\n}\n","/**\n * Express Integration\n *\n * Middleware that automatically flushes traces when response finishes.\n *\n * @example\n * import express from 'express';\n * import { createMiddleware } from '@lelemondev/sdk/express';\n *\n * const app = express();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring express as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExpressRequest {\n [key: string]: unknown;\n}\n\ninterface ExpressResponse {\n on(event: 'finish' | 'close' | 'error', listener: () => void): this;\n [key: string]: unknown;\n}\n\ntype NextFunction = (error?: unknown) => void;\n\ntype ExpressMiddleware = (\n req: ExpressRequest,\n res: ExpressResponse,\n next: NextFunction\n) => void;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Express middleware for automatic trace flushing\n *\n * Flushes traces when the response finishes (after res.send/res.json).\n * This is fire-and-forget and doesn't block the response.\n *\n * @returns Express middleware function\n *\n * @example\n * // Global middleware\n * app.use(createMiddleware());\n *\n * @example\n * // Per-route middleware\n * app.post('/chat', createMiddleware(), async (req, res) => {\n * res.json({ ok: true });\n * });\n */\nexport function createMiddleware(): ExpressMiddleware {\n return (_req, res, next) => {\n // Flush when response is finished (after headers + body sent)\n res.on('finish', () => {\n flush().catch(() => {\n // Silently ignore flush errors - fire and forget\n });\n });\n\n next();\n };\n}\n","/**\n * Hono Integration\n *\n * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).\n * Uses executionCtx.waitUntil() when available for non-blocking flush.\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring hono as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExecutionContext {\n waitUntil(promise: Promise<unknown>): void;\n passThroughOnException(): void;\n}\n\ninterface HonoContext {\n req: {\n raw: Request;\n [key: string]: unknown;\n };\n res: Response | undefined;\n executionCtx?: ExecutionContext;\n [key: string]: unknown;\n}\n\ntype NextFunction = () => Promise<void>;\n\ntype HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Hono middleware for automatic trace flushing\n *\n * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush\n * On Node.js/Bun: flushes after response (fire-and-forget)\n *\n * @returns Hono middleware function\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n *\n * // Global middleware\n * app.use(createMiddleware());\n *\n * app.post('/chat', async (c) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return c.json(result);\n * });\n *\n * export default app;\n */\nexport function createMiddleware(): HonoMiddleware {\n return async (c, next) => {\n await next();\n\n // Use waitUntil if available (Cloudflare Workers, Deno Deploy)\n if (c.executionCtx?.waitUntil) {\n c.executionCtx.waitUntil(flush());\n } else {\n // Fire-and-forget for Node.js/Bun\n flush().catch(() => {});\n }\n };\n}\n"]}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __export = (target, all) => {
|
|
4
|
+
for (var name in all)
|
|
5
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
6
|
+
};
|
|
7
|
+
|
|
8
|
+
// src/integrations/next.ts
|
|
9
|
+
var next_exports = {};
|
|
10
|
+
__export(next_exports, {
|
|
11
|
+
createWrapper: () => createWrapper,
|
|
12
|
+
withObserve: () => withObserve
|
|
13
|
+
});
|
|
14
|
+
async function flush() {
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
// src/integrations/next.ts
|
|
18
|
+
function withObserve(handler, options) {
|
|
19
|
+
return async (request, context) => {
|
|
20
|
+
try {
|
|
21
|
+
return await handler(request, context);
|
|
22
|
+
} finally {
|
|
23
|
+
if (options?.after) {
|
|
24
|
+
options.after(() => flush());
|
|
25
|
+
} else if (options?.waitUntil) {
|
|
26
|
+
options.waitUntil(flush());
|
|
27
|
+
} else {
|
|
28
|
+
await flush();
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
};
|
|
32
|
+
}
|
|
33
|
+
function createWrapper(defaultOptions) {
|
|
34
|
+
return function(handler, options) {
|
|
35
|
+
return withObserve(handler, { ...defaultOptions, ...options });
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// src/integrations/lambda.ts
|
|
40
|
+
var lambda_exports = {};
|
|
41
|
+
__export(lambda_exports, {
|
|
42
|
+
withObserve: () => withObserve2
|
|
43
|
+
});
|
|
44
|
+
function withObserve2(handler) {
|
|
45
|
+
return async (event, context) => {
|
|
46
|
+
try {
|
|
47
|
+
return await handler(event, context);
|
|
48
|
+
} finally {
|
|
49
|
+
await flush();
|
|
50
|
+
}
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// src/integrations/express.ts
|
|
55
|
+
var express_exports = {};
|
|
56
|
+
__export(express_exports, {
|
|
57
|
+
createMiddleware: () => createMiddleware
|
|
58
|
+
});
|
|
59
|
+
function createMiddleware() {
|
|
60
|
+
return (_req, res, next) => {
|
|
61
|
+
res.on("finish", () => {
|
|
62
|
+
flush().catch(() => {
|
|
63
|
+
});
|
|
64
|
+
});
|
|
65
|
+
next();
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// src/integrations/hono.ts
|
|
70
|
+
var hono_exports = {};
|
|
71
|
+
__export(hono_exports, {
|
|
72
|
+
createMiddleware: () => createMiddleware2
|
|
73
|
+
});
|
|
74
|
+
function createMiddleware2() {
|
|
75
|
+
return async (c, next) => {
|
|
76
|
+
await next();
|
|
77
|
+
if (c.executionCtx?.waitUntil) {
|
|
78
|
+
c.executionCtx.waitUntil(flush());
|
|
79
|
+
} else {
|
|
80
|
+
flush().catch(() => {
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
export { express_exports as express, hono_exports as hono, lambda_exports as lambda, next_exports as next };
|
|
87
|
+
//# sourceMappingURL=integrations.mjs.map
|
|
88
|
+
//# sourceMappingURL=integrations.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/integrations/next.ts","../src/core/config.ts","../src/integrations/lambda.ts","../src/integrations/express.ts","../src/integrations/hono.ts"],"names":["withObserve","createMiddleware"],"mappings":";;;;;;;;AAAA,IAAA,YAAA,GAAA;AAAA,QAAA,CAAA,YAAA,EAAA;AAAA,EAAA,aAAA,EAAA,MAAA,aAAA;AAAA,EAAA,WAAA,EAAA,MAAA;AAAA,CAAA,CAAA;ACuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ADYO,SAAS,WAAA,CACd,SACA,OAAA,EAC4B;AAC5B,EAAA,OAAO,OAAO,SAAkB,OAAA,KAA0C;AACxE,IAAA,IAAI;AACF,MAAA,OAAO,MAAM,OAAA,CAAQ,OAAA,EAAS,OAAO,CAAA;AAAA,IACvC,CAAA,SAAE;AAEA,MAAA,IAAI,SAAS,KAAA,EAAO;AAElB,QAAA,OAAA,CAAQ,KAAA,CAAM,MAAM,KAAA,EAAO,CAAA;AAAA,MAC7B,CAAA,MAAA,IAAW,SAAS,SAAA,EAAW;AAE7B,QAAA,OAAA,CAAQ,SAAA,CAAU,OAAO,CAAA;AAAA,MAC3B,CAAA,MAAO;AAEL,QAAA,MAAM,KAAA,EAAM;AAAA,MACd;AAAA,IACF;AAAA,EACF,CAAA;AACF;AAeO,SAAS,cAAc,cAAA,EAAoC;AAChE,EAAA,OAAO,SACL,SACA,OAAA,EAC4B;AAC5B,IAAA,OAAO,YAAY,OAAA,EAAS,EAAE,GAAG,cAAA,EAAgB,GAAG,SAAS,CAAA;AAAA,EAC/D,CAAA;AACF;;;AElIA,IAAA,cAAA,GAAA;AAAA,QAAA,CAAA,cAAA,EAAA;AAAA,EAAA,WAAA,EAAA,MAAAA;AAAA,CAAA,CAAA;AAkFO,SAASA,aACd,OAAA,EACgC;AAChC,EAAA,OAAO,OAAO,OAAe,OAAA,KAA6C;AACxE,IAAA,IAAI;AACF,MAAA,OAAO,MAAM,OAAA,CAAQ,KAAA,EAAO,OAAO,CAAA;AAAA,IACrC,CAAA,SAAE;AAEA,MAAA,MAAM,KAAA,EAAM;AAAA,IACd;AAAA,EACF,CAAA;AACF;;;AC7FA,IAAA,eAAA,GAAA;AAAA,QAAA,CAAA,eAAA,EAAA;AAAA,EAAA,gBAAA,EAAA,MAAA;AAAA,CAAA,CAAA;AA0DO,SAAS,gBAAA,GAAsC;AACpD,EAAA,OAAO,CAAC,IAAA,EAAM,GAAA,EAAK,IAAA,KAAS;AAE1B,IAAA,GAAA,CAAI,EAAA,CAAG,UAAU,MAAM;AACrB,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAEpB,CAAC,CAAA;AAAA,IACH,CAAC,CAAA;AAED,IAAA,IAAA,EAAK;AAAA,EACP,CAAA;AACF;;;ACrEA,IAAA,YAAA,GAAA;AAAA,QAAA,CAAA,YAAA,EAAA;AAAA,EAAA,gBAAA,EAAA,MAAAC;AAAA,CAAA,CAAA;AAoEO,SAASA,iBAAAA,GAAmC;AACjD,EAAA,OAAO,OAAO,GAAG,IAAA,KAAS;AACxB,IAAA,MAAM,IAAA,EAAK;AAGX,IAAA,IAAI,CAAA,CAAE,cAAc,SAAA,EAAW;AAC7B,MAAA,CAAA,CAAE,YAAA,CAAa,SAAA,CAAU,KAAA,EAAO,CAAA;AAAA,IAClC,CAAA,MAAO;AAEL,MAAA,KAAA,EAAM,CAAE,MAAM,MAAM;AAAA,MAAC,CAAC,CAAA;AAAA,IACxB;AAAA,EACF,CAAA;AACF","file":"integrations.mjs","sourcesContent":["/**\n * Next.js App Router Integration\n *\n * Wraps route handlers to automatically flush traces.\n * Supports Next.js 15+ `after()` and Vercel's `waitUntil()`.\n *\n * @example\n * import { withObserve } from '@lelemondev/sdk/next';\n *\n * export const POST = withObserve(async (req) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return Response.json(result);\n * });\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types\n// ─────────────────────────────────────────────────────────────\n\ntype NextRouteHandler<TContext = unknown> = (\n request: Request,\n context?: TContext\n) => Response | Promise<Response>;\n\n/**\n * Options for the Next.js wrapper\n */\nexport interface NextObserveOptions {\n /**\n * Next.js 15+ after() function from 'next/server'\n * Preferred method - runs after response without blocking\n *\n * @example\n * import { after } from 'next/server';\n * export const POST = withObserve(handler, { after });\n */\n after?: (callback: () => void | Promise<void>) => void;\n\n /**\n * Vercel's waitUntil() from '@vercel/functions'\n * Alternative for Vercel deployments\n *\n * @example\n * import { waitUntil } from '@vercel/functions';\n * export const POST = withObserve(handler, { waitUntil });\n */\n waitUntil?: (promise: Promise<unknown>) => void;\n}\n\n// ─────────────────────────────────────────────────────────────\n// Wrapper\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Wrap a Next.js App Router handler with automatic trace flushing\n *\n * @param handler - Your route handler function\n * @param options - Optional: pass `after` (Next.js 15+) or `waitUntil` (Vercel)\n * @returns Wrapped handler that auto-flushes traces\n *\n * @example\n * // Basic usage (blocking flush)\n * export const POST = withObserve(async (req) => {\n * return Response.json({ ok: true });\n * });\n *\n * @example\n * // Next.js 15+ with after() - non-blocking (recommended)\n * import { after } from 'next/server';\n *\n * export const POST = withObserve(\n * async (req) => Response.json({ ok: true }),\n * { after }\n * );\n *\n * @example\n * // Vercel with waitUntil() - non-blocking\n * import { waitUntil } from '@vercel/functions';\n *\n * export const POST = withObserve(\n * async (req) => Response.json({ ok: true }),\n * { waitUntil }\n * );\n */\nexport function withObserve<TContext = unknown>(\n handler: NextRouteHandler<TContext>,\n options?: NextObserveOptions\n): NextRouteHandler<TContext> {\n return async (request: Request, context?: TContext): Promise<Response> => {\n try {\n return await handler(request, context);\n } finally {\n // Priority: after() > waitUntil() > blocking flush\n if (options?.after) {\n // Next.js 15+ native - best option\n options.after(() => flush());\n } else if (options?.waitUntil) {\n // Vercel platform\n options.waitUntil(flush());\n } else {\n // Fallback: blocking flush\n await flush();\n }\n }\n };\n}\n\n/**\n * Create a pre-configured wrapper with default options\n *\n * @example\n * import { after } from 'next/server';\n * import { createWrapper } from '@lelemondev/sdk/next';\n *\n * const withObserve = createWrapper({ after });\n *\n * export const POST = withObserve(async (req) => {\n * return Response.json({ ok: true });\n * });\n */\nexport function createWrapper(defaultOptions: NextObserveOptions) {\n return function <TContext = unknown>(\n handler: NextRouteHandler<TContext>,\n options?: NextObserveOptions\n ): NextRouteHandler<TContext> {\n return withObserve(handler, { ...defaultOptions, ...options });\n };\n}\n","/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * AWS Lambda Integration\n *\n * Wraps Lambda handlers to automatically flush traces before the function exits.\n *\n * @example\n * import { withObserve } from '@lelemondev/sdk/lambda';\n *\n * export const handler = withObserve(async (event) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return { statusCode: 200, body: JSON.stringify(result) };\n * });\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring @types/aws-lambda)\n// ─────────────────────────────────────────────────────────────\n\n/**\n * AWS Lambda Context object\n */\nexport interface LambdaContext {\n functionName: string;\n functionVersion: string;\n invokedFunctionArn: string;\n memoryLimitInMB: string;\n awsRequestId: string;\n logGroupName: string;\n logStreamName: string;\n getRemainingTimeInMillis(): number;\n [key: string]: unknown;\n}\n\n/**\n * Generic Lambda handler type\n */\ntype LambdaHandler<TEvent = unknown, TResult = unknown> = (\n event: TEvent,\n context: LambdaContext\n) => Promise<TResult>;\n\n// ─────────────────────────────────────────────────────────────\n// Wrapper\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Wrap an AWS Lambda handler with automatic trace flushing\n *\n * Always flushes before returning - Lambda freezes the container\n * immediately after the handler returns, so this is required.\n *\n * @param handler - Your Lambda handler function\n * @returns Wrapped handler that auto-flushes traces\n *\n * @example\n * // API Gateway event\n * export const handler = withObserve(async (event) => {\n * const body = JSON.parse(event.body);\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: body.message }],\n * });\n * return {\n * statusCode: 200,\n * body: JSON.stringify(result.choices[0].message),\n * };\n * });\n *\n * @example\n * // With typed events\n * import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';\n *\n * export const handler = withObserve<APIGatewayProxyEvent, APIGatewayProxyResult>(\n * async (event, context) => {\n * return { statusCode: 200, body: 'OK' };\n * }\n * );\n */\nexport function withObserve<TEvent = unknown, TResult = unknown>(\n handler: LambdaHandler<TEvent, TResult>\n): LambdaHandler<TEvent, TResult> {\n return async (event: TEvent, context: LambdaContext): Promise<TResult> => {\n try {\n return await handler(event, context);\n } finally {\n // Always flush - Lambda freezes immediately after return\n await flush();\n }\n };\n}\n","/**\n * Express Integration\n *\n * Middleware that automatically flushes traces when response finishes.\n *\n * @example\n * import express from 'express';\n * import { createMiddleware } from '@lelemondev/sdk/express';\n *\n * const app = express();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring express as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExpressRequest {\n [key: string]: unknown;\n}\n\ninterface ExpressResponse {\n on(event: 'finish' | 'close' | 'error', listener: () => void): this;\n [key: string]: unknown;\n}\n\ntype NextFunction = (error?: unknown) => void;\n\ntype ExpressMiddleware = (\n req: ExpressRequest,\n res: ExpressResponse,\n next: NextFunction\n) => void;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Express middleware for automatic trace flushing\n *\n * Flushes traces when the response finishes (after res.send/res.json).\n * This is fire-and-forget and doesn't block the response.\n *\n * @returns Express middleware function\n *\n * @example\n * // Global middleware\n * app.use(createMiddleware());\n *\n * @example\n * // Per-route middleware\n * app.post('/chat', createMiddleware(), async (req, res) => {\n * res.json({ ok: true });\n * });\n */\nexport function createMiddleware(): ExpressMiddleware {\n return (_req, res, next) => {\n // Flush when response is finished (after headers + body sent)\n res.on('finish', () => {\n flush().catch(() => {\n // Silently ignore flush errors - fire and forget\n });\n });\n\n next();\n };\n}\n","/**\n * Hono Integration\n *\n * Middleware for Hono framework (Cloudflare Workers, Deno, Bun, Node.js).\n * Uses executionCtx.waitUntil() when available for non-blocking flush.\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n * app.use(createMiddleware());\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring hono as dependency)\n// ─────────────────────────────────────────────────────────────\n\ninterface ExecutionContext {\n waitUntil(promise: Promise<unknown>): void;\n passThroughOnException(): void;\n}\n\ninterface HonoContext {\n req: {\n raw: Request;\n [key: string]: unknown;\n };\n res: Response | undefined;\n executionCtx?: ExecutionContext;\n [key: string]: unknown;\n}\n\ntype NextFunction = () => Promise<void>;\n\ntype HonoMiddleware = (c: HonoContext, next: NextFunction) => Promise<void>;\n\n// ─────────────────────────────────────────────────────────────\n// Middleware\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Create Hono middleware for automatic trace flushing\n *\n * On Cloudflare Workers/Deno Deploy: uses executionCtx.waitUntil() for non-blocking flush\n * On Node.js/Bun: flushes after response (fire-and-forget)\n *\n * @returns Hono middleware function\n *\n * @example\n * import { Hono } from 'hono';\n * import { createMiddleware } from '@lelemondev/sdk/hono';\n *\n * const app = new Hono();\n *\n * // Global middleware\n * app.use(createMiddleware());\n *\n * app.post('/chat', async (c) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return c.json(result);\n * });\n *\n * export default app;\n */\nexport function createMiddleware(): HonoMiddleware {\n return async (c, next) => {\n await next();\n\n // Use waitUntil if available (Cloudflare Workers, Deno Deploy)\n if (c.executionCtx?.waitUntil) {\n c.executionCtx.waitUntil(flush());\n } else {\n // Fire-and-forget for Node.js/Bun\n flush().catch(() => {});\n }\n };\n}\n"]}
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AWS Lambda Integration
|
|
3
|
+
*
|
|
4
|
+
* Wraps Lambda handlers to automatically flush traces before the function exits.
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* import { withObserve } from '@lelemondev/sdk/lambda';
|
|
8
|
+
*
|
|
9
|
+
* export const handler = withObserve(async (event) => {
|
|
10
|
+
* const openai = observe(new OpenAI());
|
|
11
|
+
* const result = await openai.chat.completions.create({...});
|
|
12
|
+
* return { statusCode: 200, body: JSON.stringify(result) };
|
|
13
|
+
* });
|
|
14
|
+
*/
|
|
15
|
+
/**
|
|
16
|
+
* AWS Lambda Context object
|
|
17
|
+
*/
|
|
18
|
+
interface LambdaContext {
|
|
19
|
+
functionName: string;
|
|
20
|
+
functionVersion: string;
|
|
21
|
+
invokedFunctionArn: string;
|
|
22
|
+
memoryLimitInMB: string;
|
|
23
|
+
awsRequestId: string;
|
|
24
|
+
logGroupName: string;
|
|
25
|
+
logStreamName: string;
|
|
26
|
+
getRemainingTimeInMillis(): number;
|
|
27
|
+
[key: string]: unknown;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Generic Lambda handler type
|
|
31
|
+
*/
|
|
32
|
+
type LambdaHandler<TEvent = unknown, TResult = unknown> = (event: TEvent, context: LambdaContext) => Promise<TResult>;
|
|
33
|
+
/**
|
|
34
|
+
* Wrap an AWS Lambda handler with automatic trace flushing
|
|
35
|
+
*
|
|
36
|
+
* Always flushes before returning - Lambda freezes the container
|
|
37
|
+
* immediately after the handler returns, so this is required.
|
|
38
|
+
*
|
|
39
|
+
* @param handler - Your Lambda handler function
|
|
40
|
+
* @returns Wrapped handler that auto-flushes traces
|
|
41
|
+
*
|
|
42
|
+
* @example
|
|
43
|
+
* // API Gateway event
|
|
44
|
+
* export const handler = withObserve(async (event) => {
|
|
45
|
+
* const body = JSON.parse(event.body);
|
|
46
|
+
* const openai = observe(new OpenAI());
|
|
47
|
+
* const result = await openai.chat.completions.create({
|
|
48
|
+
* model: 'gpt-4',
|
|
49
|
+
* messages: [{ role: 'user', content: body.message }],
|
|
50
|
+
* });
|
|
51
|
+
* return {
|
|
52
|
+
* statusCode: 200,
|
|
53
|
+
* body: JSON.stringify(result.choices[0].message),
|
|
54
|
+
* };
|
|
55
|
+
* });
|
|
56
|
+
*
|
|
57
|
+
* @example
|
|
58
|
+
* // With typed events
|
|
59
|
+
* import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';
|
|
60
|
+
*
|
|
61
|
+
* export const handler = withObserve<APIGatewayProxyEvent, APIGatewayProxyResult>(
|
|
62
|
+
* async (event, context) => {
|
|
63
|
+
* return { statusCode: 200, body: 'OK' };
|
|
64
|
+
* }
|
|
65
|
+
* );
|
|
66
|
+
*/
|
|
67
|
+
declare function withObserve<TEvent = unknown, TResult = unknown>(handler: LambdaHandler<TEvent, TResult>): LambdaHandler<TEvent, TResult>;
|
|
68
|
+
|
|
69
|
+
type lambda_LambdaContext = LambdaContext;
|
|
70
|
+
declare const lambda_withObserve: typeof withObserve;
|
|
71
|
+
declare namespace lambda {
|
|
72
|
+
export { type lambda_LambdaContext as LambdaContext, lambda_withObserve as withObserve };
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export { type LambdaContext as L, lambda as l, withObserve as w };
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* AWS Lambda Integration
|
|
3
|
+
*
|
|
4
|
+
* Wraps Lambda handlers to automatically flush traces before the function exits.
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* import { withObserve } from '@lelemondev/sdk/lambda';
|
|
8
|
+
*
|
|
9
|
+
* export const handler = withObserve(async (event) => {
|
|
10
|
+
* const openai = observe(new OpenAI());
|
|
11
|
+
* const result = await openai.chat.completions.create({...});
|
|
12
|
+
* return { statusCode: 200, body: JSON.stringify(result) };
|
|
13
|
+
* });
|
|
14
|
+
*/
|
|
15
|
+
/**
|
|
16
|
+
* AWS Lambda Context object
|
|
17
|
+
*/
|
|
18
|
+
interface LambdaContext {
|
|
19
|
+
functionName: string;
|
|
20
|
+
functionVersion: string;
|
|
21
|
+
invokedFunctionArn: string;
|
|
22
|
+
memoryLimitInMB: string;
|
|
23
|
+
awsRequestId: string;
|
|
24
|
+
logGroupName: string;
|
|
25
|
+
logStreamName: string;
|
|
26
|
+
getRemainingTimeInMillis(): number;
|
|
27
|
+
[key: string]: unknown;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Generic Lambda handler type
|
|
31
|
+
*/
|
|
32
|
+
type LambdaHandler<TEvent = unknown, TResult = unknown> = (event: TEvent, context: LambdaContext) => Promise<TResult>;
|
|
33
|
+
/**
|
|
34
|
+
* Wrap an AWS Lambda handler with automatic trace flushing
|
|
35
|
+
*
|
|
36
|
+
* Always flushes before returning - Lambda freezes the container
|
|
37
|
+
* immediately after the handler returns, so this is required.
|
|
38
|
+
*
|
|
39
|
+
* @param handler - Your Lambda handler function
|
|
40
|
+
* @returns Wrapped handler that auto-flushes traces
|
|
41
|
+
*
|
|
42
|
+
* @example
|
|
43
|
+
* // API Gateway event
|
|
44
|
+
* export const handler = withObserve(async (event) => {
|
|
45
|
+
* const body = JSON.parse(event.body);
|
|
46
|
+
* const openai = observe(new OpenAI());
|
|
47
|
+
* const result = await openai.chat.completions.create({
|
|
48
|
+
* model: 'gpt-4',
|
|
49
|
+
* messages: [{ role: 'user', content: body.message }],
|
|
50
|
+
* });
|
|
51
|
+
* return {
|
|
52
|
+
* statusCode: 200,
|
|
53
|
+
* body: JSON.stringify(result.choices[0].message),
|
|
54
|
+
* };
|
|
55
|
+
* });
|
|
56
|
+
*
|
|
57
|
+
* @example
|
|
58
|
+
* // With typed events
|
|
59
|
+
* import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';
|
|
60
|
+
*
|
|
61
|
+
* export const handler = withObserve<APIGatewayProxyEvent, APIGatewayProxyResult>(
|
|
62
|
+
* async (event, context) => {
|
|
63
|
+
* return { statusCode: 200, body: 'OK' };
|
|
64
|
+
* }
|
|
65
|
+
* );
|
|
66
|
+
*/
|
|
67
|
+
declare function withObserve<TEvent = unknown, TResult = unknown>(handler: LambdaHandler<TEvent, TResult>): LambdaHandler<TEvent, TResult>;
|
|
68
|
+
|
|
69
|
+
type lambda_LambdaContext = LambdaContext;
|
|
70
|
+
declare const lambda_withObserve: typeof withObserve;
|
|
71
|
+
declare namespace lambda {
|
|
72
|
+
export { type lambda_LambdaContext as LambdaContext, lambda_withObserve as withObserve };
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
export { type LambdaContext as L, lambda as l, withObserve as w };
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { L as LambdaContext, w as withObserve } from './lambda-DQmEfWXC.mjs';
|
package/dist/lambda.d.ts
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { L as LambdaContext, w as withObserve } from './lambda-DQmEfWXC.js';
|
package/dist/lambda.js
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
4
|
+
|
|
5
|
+
async function flush() {
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
// src/integrations/lambda.ts
|
|
9
|
+
function withObserve(handler) {
|
|
10
|
+
return async (event, context) => {
|
|
11
|
+
try {
|
|
12
|
+
return await handler(event, context);
|
|
13
|
+
} finally {
|
|
14
|
+
await flush();
|
|
15
|
+
}
|
|
16
|
+
};
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
exports.withObserve = withObserve;
|
|
20
|
+
//# sourceMappingURL=lambda.js.map
|
|
21
|
+
//# sourceMappingURL=lambda.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/core/config.ts","../src/integrations/lambda.ts"],"names":[],"mappings":";;;;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACOO,SAAS,YACd,OAAA,EACgC;AAChC,EAAA,OAAO,OAAO,OAAe,OAAA,KAA6C;AACxE,IAAA,IAAI;AACF,MAAA,OAAO,MAAM,OAAA,CAAQ,KAAA,EAAO,OAAO,CAAA;AAAA,IACrC,CAAA,SAAE;AAEA,MAAA,MAAM,KAAA,EAAM;AAAA,IACd;AAAA,EACF,CAAA;AACF","file":"lambda.js","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * AWS Lambda Integration\n *\n * Wraps Lambda handlers to automatically flush traces before the function exits.\n *\n * @example\n * import { withObserve } from '@lelemondev/sdk/lambda';\n *\n * export const handler = withObserve(async (event) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return { statusCode: 200, body: JSON.stringify(result) };\n * });\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring @types/aws-lambda)\n// ─────────────────────────────────────────────────────────────\n\n/**\n * AWS Lambda Context object\n */\nexport interface LambdaContext {\n functionName: string;\n functionVersion: string;\n invokedFunctionArn: string;\n memoryLimitInMB: string;\n awsRequestId: string;\n logGroupName: string;\n logStreamName: string;\n getRemainingTimeInMillis(): number;\n [key: string]: unknown;\n}\n\n/**\n * Generic Lambda handler type\n */\ntype LambdaHandler<TEvent = unknown, TResult = unknown> = (\n event: TEvent,\n context: LambdaContext\n) => Promise<TResult>;\n\n// ─────────────────────────────────────────────────────────────\n// Wrapper\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Wrap an AWS Lambda handler with automatic trace flushing\n *\n * Always flushes before returning - Lambda freezes the container\n * immediately after the handler returns, so this is required.\n *\n * @param handler - Your Lambda handler function\n * @returns Wrapped handler that auto-flushes traces\n *\n * @example\n * // API Gateway event\n * export const handler = withObserve(async (event) => {\n * const body = JSON.parse(event.body);\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: body.message }],\n * });\n * return {\n * statusCode: 200,\n * body: JSON.stringify(result.choices[0].message),\n * };\n * });\n *\n * @example\n * // With typed events\n * import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';\n *\n * export const handler = withObserve<APIGatewayProxyEvent, APIGatewayProxyResult>(\n * async (event, context) => {\n * return { statusCode: 200, body: 'OK' };\n * }\n * );\n */\nexport function withObserve<TEvent = unknown, TResult = unknown>(\n handler: LambdaHandler<TEvent, TResult>\n): LambdaHandler<TEvent, TResult> {\n return async (event: TEvent, context: LambdaContext): Promise<TResult> => {\n try {\n return await handler(event, context);\n } finally {\n // Always flush - Lambda freezes immediately after return\n await flush();\n }\n };\n}\n"]}
|
package/dist/lambda.mjs
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
/* @lelemondev/sdk - LLM Observability */
|
|
2
|
+
|
|
3
|
+
async function flush() {
|
|
4
|
+
}
|
|
5
|
+
|
|
6
|
+
// src/integrations/lambda.ts
|
|
7
|
+
function withObserve(handler) {
|
|
8
|
+
return async (event, context) => {
|
|
9
|
+
try {
|
|
10
|
+
return await handler(event, context);
|
|
11
|
+
} finally {
|
|
12
|
+
await flush();
|
|
13
|
+
}
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
export { withObserve };
|
|
18
|
+
//# sourceMappingURL=lambda.mjs.map
|
|
19
|
+
//# sourceMappingURL=lambda.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/core/config.ts","../src/integrations/lambda.ts"],"names":[],"mappings":";;AAuEA,eAAsB,KAAA,GAAuB;AAI7C;;;ACOO,SAAS,YACd,OAAA,EACgC;AAChC,EAAA,OAAO,OAAO,OAAe,OAAA,KAA6C;AACxE,IAAA,IAAI;AACF,MAAA,OAAO,MAAM,OAAA,CAAQ,KAAA,EAAO,OAAO,CAAA;AAAA,IACrC,CAAA,SAAE;AAEA,MAAA,MAAM,KAAA,EAAM;AAAA,IACd;AAAA,EACF,CAAA;AACF","file":"lambda.mjs","sourcesContent":["/**\n * Global Configuration\n *\n * Manages SDK configuration and transport instance.\n */\n\nimport type { LelemonConfig } from './types';\nimport { Transport } from './transport';\n\n// ─────────────────────────────────────────────────────────────\n// Global State\n// ─────────────────────────────────────────────────────────────\n\nlet globalConfig: LelemonConfig = {};\nlet globalTransport: Transport | null = null;\nlet initialized = false;\n\n// ─────────────────────────────────────────────────────────────\n// Configuration\n// ─────────────────────────────────────────────────────────────\n\nconst DEFAULT_ENDPOINT = 'https://api.lelemon.dev';\n\n/**\n * Initialize the SDK\n * Call once at app startup\n */\nexport function init(config: LelemonConfig = {}): void {\n globalConfig = config;\n globalTransport = createTransport(config);\n initialized = true;\n}\n\n/**\n * Get current config\n */\nexport function getConfig(): LelemonConfig {\n return globalConfig;\n}\n\n/**\n * Check if SDK is initialized\n */\nexport function isInitialized(): boolean {\n return initialized;\n}\n\n/**\n * Check if SDK is enabled\n */\nexport function isEnabled(): boolean {\n return getTransport().isEnabled();\n}\n\n// ─────────────────────────────────────────────────────────────\n// Transport\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Get or create transport instance\n */\nexport function getTransport(): Transport {\n if (!globalTransport) {\n globalTransport = createTransport(globalConfig);\n }\n return globalTransport;\n}\n\n/**\n * Flush all pending traces\n */\nexport async function flush(): Promise<void> {\n if (globalTransport) {\n await globalTransport.flush();\n }\n}\n\n/**\n * Create transport instance\n */\nfunction createTransport(config: LelemonConfig): Transport {\n const apiKey = config.apiKey ?? getEnvVar('LELEMON_API_KEY');\n\n if (!apiKey && !config.disabled) {\n console.warn(\n '[Lelemon] No API key provided. Set apiKey in init() or LELEMON_API_KEY env var. Tracing disabled.'\n );\n }\n\n return new Transport({\n apiKey: apiKey ?? '',\n endpoint: config.endpoint ?? DEFAULT_ENDPOINT,\n debug: config.debug ?? false,\n disabled: config.disabled ?? !apiKey,\n batchSize: config.batchSize,\n flushIntervalMs: config.flushIntervalMs,\n requestTimeoutMs: config.requestTimeoutMs,\n });\n}\n\n/**\n * Get environment variable (works in Node and edge)\n */\nfunction getEnvVar(name: string): string | undefined {\n if (typeof process !== 'undefined' && process.env) {\n return process.env[name];\n }\n return undefined;\n}\n","/**\n * AWS Lambda Integration\n *\n * Wraps Lambda handlers to automatically flush traces before the function exits.\n *\n * @example\n * import { withObserve } from '@lelemondev/sdk/lambda';\n *\n * export const handler = withObserve(async (event) => {\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({...});\n * return { statusCode: 200, body: JSON.stringify(result) };\n * });\n */\n\nimport { flush } from '../core/config';\n\n// ─────────────────────────────────────────────────────────────\n// Types (minimal to avoid requiring @types/aws-lambda)\n// ─────────────────────────────────────────────────────────────\n\n/**\n * AWS Lambda Context object\n */\nexport interface LambdaContext {\n functionName: string;\n functionVersion: string;\n invokedFunctionArn: string;\n memoryLimitInMB: string;\n awsRequestId: string;\n logGroupName: string;\n logStreamName: string;\n getRemainingTimeInMillis(): number;\n [key: string]: unknown;\n}\n\n/**\n * Generic Lambda handler type\n */\ntype LambdaHandler<TEvent = unknown, TResult = unknown> = (\n event: TEvent,\n context: LambdaContext\n) => Promise<TResult>;\n\n// ─────────────────────────────────────────────────────────────\n// Wrapper\n// ─────────────────────────────────────────────────────────────\n\n/**\n * Wrap an AWS Lambda handler with automatic trace flushing\n *\n * Always flushes before returning - Lambda freezes the container\n * immediately after the handler returns, so this is required.\n *\n * @param handler - Your Lambda handler function\n * @returns Wrapped handler that auto-flushes traces\n *\n * @example\n * // API Gateway event\n * export const handler = withObserve(async (event) => {\n * const body = JSON.parse(event.body);\n * const openai = observe(new OpenAI());\n * const result = await openai.chat.completions.create({\n * model: 'gpt-4',\n * messages: [{ role: 'user', content: body.message }],\n * });\n * return {\n * statusCode: 200,\n * body: JSON.stringify(result.choices[0].message),\n * };\n * });\n *\n * @example\n * // With typed events\n * import type { APIGatewayProxyEvent, APIGatewayProxyResult } from 'aws-lambda';\n *\n * export const handler = withObserve<APIGatewayProxyEvent, APIGatewayProxyResult>(\n * async (event, context) => {\n * return { statusCode: 200, body: 'OK' };\n * }\n * );\n */\nexport function withObserve<TEvent = unknown, TResult = unknown>(\n handler: LambdaHandler<TEvent, TResult>\n): LambdaHandler<TEvent, TResult> {\n return async (event: TEvent, context: LambdaContext): Promise<TResult> => {\n try {\n return await handler(event, context);\n } finally {\n // Always flush - Lambda freezes immediately after return\n await flush();\n }\n };\n}\n"]}
|