@usetransactional/llm-node 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,419 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/integrations/vercel-ai.ts
21
+ var vercel_ai_exports = {};
22
+ __export(vercel_ai_exports, {
23
+ getLlmOps: () => getLlmOps,
24
+ initLlmOps: () => initLlmOps,
25
+ wrapAiSdk: () => wrapAiSdk,
26
+ wrapStreamText: () => wrapStreamText
27
+ });
28
+ module.exports = __toCommonJS(vercel_ai_exports);
29
+
30
+ // src/client.ts
31
+ var import_nanoid = require("nanoid");
32
+ var currentTraceId;
33
+ var currentObservationId;
34
+ function setTraceContext(traceId, observationId) {
35
+ currentTraceId = traceId;
36
+ currentObservationId = observationId;
37
+ }
38
+ function getTraceContext() {
39
+ return { traceId: currentTraceId, observationId: currentObservationId };
40
+ }
41
+ function clearTraceContext() {
42
+ currentTraceId = void 0;
43
+ currentObservationId = void 0;
44
+ }
45
+ var LlmOpsClient = class {
46
+ constructor(config) {
47
+ this.queue = [];
48
+ this.config = this.parseConfig(config);
49
+ if (this.config.enabled) {
50
+ this.startFlushTimer();
51
+ }
52
+ }
53
+ parseConfig(config) {
54
+ let publicKey = config.publicKey;
55
+ let projectId = config.projectId;
56
+ let baseUrl = config.baseUrl || "https://api.transactional.dev";
57
+ if (config.dsn) {
58
+ try {
59
+ const url = new URL(config.dsn);
60
+ publicKey = url.username;
61
+ const pathParts = url.pathname.split("/").filter(Boolean);
62
+ projectId = parseInt(pathParts[pathParts.length - 1] || "0");
63
+ baseUrl = `${url.protocol}//${url.host}`;
64
+ } catch {
65
+ throw new Error(`Invalid DSN format: ${config.dsn}`);
66
+ }
67
+ }
68
+ if (!publicKey || !projectId) {
69
+ throw new Error("LlmOps requires either a DSN or publicKey + projectId");
70
+ }
71
+ return {
72
+ publicKey,
73
+ projectId,
74
+ baseUrl,
75
+ enabled: config.enabled ?? true,
76
+ batchSize: config.batchSize ?? 100,
77
+ flushInterval: config.flushInterval ?? 5e3,
78
+ debug: config.debug ?? false
79
+ };
80
+ }
81
+ startFlushTimer() {
82
+ this.flushTimer = setInterval(() => {
83
+ this.flush().catch((err) => {
84
+ if (this.config.debug) {
85
+ console.error("[LlmOps] Flush error:", err);
86
+ }
87
+ });
88
+ }, this.config.flushInterval);
89
+ }
90
+ log(message, ...args) {
91
+ if (this.config.debug) {
92
+ console.log(`[LlmOps] ${message}`, ...args);
93
+ }
94
+ }
95
+ enqueue(item) {
96
+ if (!this.config.enabled) return;
97
+ this.queue.push(item);
98
+ this.log("Enqueued:", item.type, item.id);
99
+ if (this.queue.length >= this.config.batchSize) {
100
+ this.flush().catch((err) => {
101
+ if (this.config.debug) {
102
+ console.error("[LlmOps] Flush error:", err);
103
+ }
104
+ });
105
+ }
106
+ }
107
+ /**
108
+ * Create a new trace
109
+ */
110
+ trace(params) {
111
+ const traceId = (0, import_nanoid.nanoid)();
112
+ const startTime = (/* @__PURE__ */ new Date()).toISOString();
113
+ this.enqueue({
114
+ type: "trace",
115
+ id: traceId,
116
+ projectId: this.config.projectId,
117
+ ...params,
118
+ status: "RUNNING",
119
+ startTime
120
+ });
121
+ setTraceContext(traceId);
122
+ return {
123
+ id: traceId,
124
+ end: async (endParams) => {
125
+ await this.updateTrace(traceId, {
126
+ status: "COMPLETED",
127
+ output: endParams?.output,
128
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
129
+ });
130
+ clearTraceContext();
131
+ },
132
+ error: async (error) => {
133
+ await this.updateTrace(traceId, {
134
+ status: "ERROR",
135
+ metadata: { error: error.message, stack: error.stack },
136
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
137
+ });
138
+ clearTraceContext();
139
+ }
140
+ };
141
+ }
142
+ /**
143
+ * Update an existing trace
144
+ */
145
+ async updateTrace(traceId, params) {
146
+ this.enqueue({
147
+ type: "trace",
148
+ id: traceId,
149
+ ...params
150
+ });
151
+ }
152
+ /**
153
+ * Create a new observation (span, generation, or event)
154
+ */
155
+ observation(params) {
156
+ const observationId = (0, import_nanoid.nanoid)();
157
+ const startTime = (/* @__PURE__ */ new Date()).toISOString();
158
+ const context = getTraceContext();
159
+ const traceId = params.traceId || context.traceId;
160
+ if (!traceId) {
161
+ throw new Error("No trace context found. Create a trace first.");
162
+ }
163
+ const { type: observationType, ...restParams } = params;
164
+ this.enqueue({
165
+ type: "observation",
166
+ id: observationId,
167
+ traceId,
168
+ parentObservationId: restParams.parentObservationId || context.observationId,
169
+ ...restParams,
170
+ observationType,
171
+ status: "RUNNING",
172
+ startTime
173
+ });
174
+ setTraceContext(traceId, observationId);
175
+ return {
176
+ id: observationId,
177
+ end: async (endParams) => {
178
+ await this.updateObservation(observationId, {
179
+ status: "COMPLETED",
180
+ output: endParams?.output,
181
+ promptTokens: endParams?.promptTokens,
182
+ completionTokens: endParams?.completionTokens,
183
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
184
+ });
185
+ setTraceContext(traceId, params.parentObservationId || context.observationId);
186
+ },
187
+ error: async (error) => {
188
+ await this.updateObservation(observationId, {
189
+ status: "ERROR",
190
+ metadata: { error: error.message, stack: error.stack },
191
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
192
+ });
193
+ setTraceContext(traceId, params.parentObservationId || context.observationId);
194
+ }
195
+ };
196
+ }
197
+ /**
198
+ * Create a generation observation (LLM call)
199
+ */
200
+ generation(params) {
201
+ return this.observation({
202
+ ...params,
203
+ type: "GENERATION"
204
+ });
205
+ }
206
+ /**
207
+ * Create a span observation
208
+ */
209
+ span(params) {
210
+ return this.observation({
211
+ ...params,
212
+ type: "SPAN"
213
+ });
214
+ }
215
+ /**
216
+ * Create an event observation
217
+ */
218
+ event(params) {
219
+ return this.observation({
220
+ ...params,
221
+ type: "EVENT"
222
+ });
223
+ }
224
+ /**
225
+ * Update an existing observation
226
+ */
227
+ async updateObservation(observationId, params) {
228
+ this.enqueue({
229
+ type: "observation",
230
+ id: observationId,
231
+ ...params
232
+ });
233
+ }
234
+ /**
235
+ * Flush queued events to the API
236
+ */
237
+ async flush() {
238
+ if (this.queue.length === 0) return;
239
+ if (this.pendingFlush) {
240
+ await this.pendingFlush;
241
+ }
242
+ const batch = this.queue.splice(0, this.config.batchSize);
243
+ this.log("Flushing", batch.length, "items");
244
+ this.pendingFlush = this.sendBatch(batch);
245
+ await this.pendingFlush;
246
+ this.pendingFlush = void 0;
247
+ }
248
+ async sendBatch(batch) {
249
+ try {
250
+ const response = await fetch(
251
+ `${this.config.baseUrl}/observability/ingest/batch`,
252
+ {
253
+ method: "POST",
254
+ headers: {
255
+ "Authorization": `Bearer ${this.config.publicKey}`,
256
+ "Content-Type": "application/json"
257
+ },
258
+ body: JSON.stringify({
259
+ projectId: this.config.projectId,
260
+ batch
261
+ })
262
+ }
263
+ );
264
+ if (!response.ok) {
265
+ const text = await response.text();
266
+ throw new Error(`Failed to send batch: ${response.status} ${text}`);
267
+ }
268
+ this.log("Batch sent successfully");
269
+ } catch (error) {
270
+ this.queue.unshift(...batch);
271
+ throw error;
272
+ }
273
+ }
274
+ /**
275
+ * Shutdown the client and flush remaining events
276
+ */
277
+ async shutdown() {
278
+ if (this.flushTimer) {
279
+ clearInterval(this.flushTimer);
280
+ }
281
+ await this.flush();
282
+ this.log("Shutdown complete");
283
+ }
284
+ };
285
+
286
+ // src/index.ts
287
+ var defaultClient = null;
288
+ function initLlmOps(config) {
289
+ if (defaultClient) {
290
+ console.warn("[LlmOps] SDK already initialized. Ignoring duplicate initialization.");
291
+ return defaultClient;
292
+ }
293
+ defaultClient = new LlmOpsClient(config);
294
+ return defaultClient;
295
+ }
296
+ function getLlmOps() {
297
+ if (!defaultClient) {
298
+ throw new Error(
299
+ "LLM Ops SDK not initialized. Call initLlmOps() first."
300
+ );
301
+ }
302
+ return defaultClient;
303
+ }
304
+ function isInitialized() {
305
+ return defaultClient !== null;
306
+ }
307
+
308
+ // src/integrations/vercel-ai.ts
309
+ function wrapAiSdk(fn) {
310
+ return async (options) => {
311
+ if (!isInitialized()) {
312
+ console.warn(
313
+ "[LlmOps] SDK not initialized. Call initLlmOps() before using the Vercel AI wrapper."
314
+ );
315
+ return fn(options);
316
+ }
317
+ const client = getLlmOps();
318
+ const metadata = options.experimental_telemetry?.metadata || {};
319
+ const modelId = options.model?.modelId || "unknown";
320
+ const provider = options.model?.provider || "unknown";
321
+ const trace = client.trace({
322
+ name: `${fn.name || "ai-call"}`,
323
+ sessionId: metadata.sessionId,
324
+ userId: metadata.userId,
325
+ input: {
326
+ prompt: options.prompt,
327
+ messages: options.messages
328
+ },
329
+ metadata: {
330
+ provider,
331
+ modelId,
332
+ ...metadata
333
+ }
334
+ });
335
+ const generation = client.generation({
336
+ name: `${provider}/${modelId}`,
337
+ modelName: modelId,
338
+ input: {
339
+ prompt: options.prompt,
340
+ messages: options.messages
341
+ }
342
+ });
343
+ try {
344
+ const result = await fn(options);
345
+ await generation.end({
346
+ output: { text: result.text },
347
+ promptTokens: result.usage?.promptTokens,
348
+ completionTokens: result.usage?.completionTokens
349
+ });
350
+ await trace.end({
351
+ output: { text: result.text }
352
+ });
353
+ return result;
354
+ } catch (error) {
355
+ await generation.error(error);
356
+ await trace.error(error);
357
+ throw error;
358
+ }
359
+ };
360
+ }
361
+ function wrapStreamText(fn) {
362
+ return async (options) => {
363
+ if (!isInitialized()) {
364
+ console.warn(
365
+ "[LlmOps] SDK not initialized. Call initLlmOps() before using the Vercel AI wrapper."
366
+ );
367
+ return fn(options);
368
+ }
369
+ const client = getLlmOps();
370
+ const metadata = options.experimental_telemetry?.metadata || {};
371
+ const modelId = options.model?.modelId || "unknown";
372
+ const provider = options.model?.provider || "unknown";
373
+ const trace = client.trace({
374
+ name: "streamText",
375
+ sessionId: metadata.sessionId,
376
+ userId: metadata.userId,
377
+ input: {
378
+ prompt: options.prompt,
379
+ messages: options.messages
380
+ },
381
+ metadata: {
382
+ provider,
383
+ modelId,
384
+ streaming: true,
385
+ ...metadata
386
+ }
387
+ });
388
+ const generation = client.generation({
389
+ name: `${provider}/${modelId}`,
390
+ modelName: modelId,
391
+ input: {
392
+ prompt: options.prompt,
393
+ messages: options.messages
394
+ },
395
+ metadata: { streaming: true }
396
+ });
397
+ try {
398
+ const result = await fn(options);
399
+ await generation.end({
400
+ output: { streaming: true }
401
+ });
402
+ await trace.end({
403
+ output: { streaming: true }
404
+ });
405
+ return result;
406
+ } catch (error) {
407
+ await generation.error(error);
408
+ await trace.error(error);
409
+ throw error;
410
+ }
411
+ };
412
+ }
413
+ // Annotate the CommonJS export names for ESM import in node:
414
+ 0 && (module.exports = {
415
+ getLlmOps,
416
+ initLlmOps,
417
+ wrapAiSdk,
418
+ wrapStreamText
419
+ });
@@ -0,0 +1,117 @@
1
+ import {
2
+ getLlmOps,
3
+ initLlmOps,
4
+ isInitialized
5
+ } from "../chunk-IR6P3PV4.mjs";
6
+
7
+ // src/integrations/vercel-ai.ts
8
+ function wrapAiSdk(fn) {
9
+ return async (options) => {
10
+ if (!isInitialized()) {
11
+ console.warn(
12
+ "[LlmOps] SDK not initialized. Call initLlmOps() before using the Vercel AI wrapper."
13
+ );
14
+ return fn(options);
15
+ }
16
+ const client = getLlmOps();
17
+ const metadata = options.experimental_telemetry?.metadata || {};
18
+ const modelId = options.model?.modelId || "unknown";
19
+ const provider = options.model?.provider || "unknown";
20
+ const trace = client.trace({
21
+ name: `${fn.name || "ai-call"}`,
22
+ sessionId: metadata.sessionId,
23
+ userId: metadata.userId,
24
+ input: {
25
+ prompt: options.prompt,
26
+ messages: options.messages
27
+ },
28
+ metadata: {
29
+ provider,
30
+ modelId,
31
+ ...metadata
32
+ }
33
+ });
34
+ const generation = client.generation({
35
+ name: `${provider}/${modelId}`,
36
+ modelName: modelId,
37
+ input: {
38
+ prompt: options.prompt,
39
+ messages: options.messages
40
+ }
41
+ });
42
+ try {
43
+ const result = await fn(options);
44
+ await generation.end({
45
+ output: { text: result.text },
46
+ promptTokens: result.usage?.promptTokens,
47
+ completionTokens: result.usage?.completionTokens
48
+ });
49
+ await trace.end({
50
+ output: { text: result.text }
51
+ });
52
+ return result;
53
+ } catch (error) {
54
+ await generation.error(error);
55
+ await trace.error(error);
56
+ throw error;
57
+ }
58
+ };
59
+ }
60
+ function wrapStreamText(fn) {
61
+ return async (options) => {
62
+ if (!isInitialized()) {
63
+ console.warn(
64
+ "[LlmOps] SDK not initialized. Call initLlmOps() before using the Vercel AI wrapper."
65
+ );
66
+ return fn(options);
67
+ }
68
+ const client = getLlmOps();
69
+ const metadata = options.experimental_telemetry?.metadata || {};
70
+ const modelId = options.model?.modelId || "unknown";
71
+ const provider = options.model?.provider || "unknown";
72
+ const trace = client.trace({
73
+ name: "streamText",
74
+ sessionId: metadata.sessionId,
75
+ userId: metadata.userId,
76
+ input: {
77
+ prompt: options.prompt,
78
+ messages: options.messages
79
+ },
80
+ metadata: {
81
+ provider,
82
+ modelId,
83
+ streaming: true,
84
+ ...metadata
85
+ }
86
+ });
87
+ const generation = client.generation({
88
+ name: `${provider}/${modelId}`,
89
+ modelName: modelId,
90
+ input: {
91
+ prompt: options.prompt,
92
+ messages: options.messages
93
+ },
94
+ metadata: { streaming: true }
95
+ });
96
+ try {
97
+ const result = await fn(options);
98
+ await generation.end({
99
+ output: { streaming: true }
100
+ });
101
+ await trace.end({
102
+ output: { streaming: true }
103
+ });
104
+ return result;
105
+ } catch (error) {
106
+ await generation.error(error);
107
+ await trace.error(error);
108
+ throw error;
109
+ }
110
+ };
111
+ }
112
+ export {
113
+ getLlmOps,
114
+ initLlmOps,
115
+ wrapAiSdk,
116
+ wrapStreamText
117
+ };
package/package.json ADDED
@@ -0,0 +1,83 @@
1
+ {
2
+ "name": "@usetransactional/llm-node",
3
+ "version": "0.1.0",
4
+ "description": "LLM Ops SDK for Transactional - AI observability with cost tracking and trace analysis",
5
+ "main": "./dist/index.js",
6
+ "module": "./dist/index.mjs",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.mjs",
12
+ "require": "./dist/index.js"
13
+ },
14
+ "./langchain": {
15
+ "types": "./dist/integrations/langchain.d.ts",
16
+ "import": "./dist/integrations/langchain.mjs",
17
+ "require": "./dist/integrations/langchain.js"
18
+ },
19
+ "./vercel-ai": {
20
+ "types": "./dist/integrations/vercel-ai.d.ts",
21
+ "import": "./dist/integrations/vercel-ai.mjs",
22
+ "require": "./dist/integrations/vercel-ai.js"
23
+ }
24
+ },
25
+ "files": [
26
+ "dist"
27
+ ],
28
+ "scripts": {
29
+ "build": "tsup src/index.ts src/integrations/langchain.ts src/integrations/vercel-ai.ts --format esm,cjs --dts",
30
+ "dev": "tsup src/index.ts src/integrations/langchain.ts src/integrations/vercel-ai.ts --format esm,cjs --dts --watch",
31
+ "lint": "eslint src/",
32
+ "typecheck": "tsc --noEmit",
33
+ "clean": "rm -rf dist"
34
+ },
35
+ "keywords": [
36
+ "llm",
37
+ "observability",
38
+ "langchain",
39
+ "openai",
40
+ "anthropic",
41
+ "tracing",
42
+ "monitoring",
43
+ "cost-tracking"
44
+ ],
45
+ "author": "Transactional",
46
+ "license": "MIT",
47
+ "publishConfig": {
48
+ "access": "public",
49
+ "registry": "https://registry.npmjs.org"
50
+ },
51
+ "repository": {
52
+ "type": "git",
53
+ "url": "https://github.com/TransactionalHQ/llm-node.git"
54
+ },
55
+ "homepage": "https://usetransactional.com/docs/llm-ops",
56
+ "dependencies": {
57
+ "nanoid": "^5.0.4"
58
+ },
59
+ "devDependencies": {
60
+ "@eslint/js": "^9.0.0",
61
+ "@langchain/core": "^0.3.0",
62
+ "@types/node": "^20.10.0",
63
+ "eslint": "^9.0.0",
64
+ "tsup": "^8.0.1",
65
+ "typescript": "^5.3.3",
66
+ "typescript-eslint": "^8.0.0"
67
+ },
68
+ "peerDependencies": {
69
+ "@langchain/core": ">=0.2.0",
70
+ "ai": ">=3.0.0"
71
+ },
72
+ "peerDependenciesMeta": {
73
+ "@langchain/core": {
74
+ "optional": true
75
+ },
76
+ "ai": {
77
+ "optional": true
78
+ }
79
+ },
80
+ "engines": {
81
+ "node": ">=18.0.0"
82
+ }
83
+ }