@usetransactional/llm-node 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,103 @@
1
+ # transactional-llm
2
+
3
+ LLM Ops SDK for Transactional - AI observability with cost tracking, trace analysis, and performance monitoring.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ npm install transactional-llm
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```typescript
14
+ import { initLlmOps, getLlmOps } from 'transactional-llm';
15
+
16
+ // Initialize once at startup
17
+ initLlmOps({
18
+ dsn: process.env.TRANSACTIONAL_LLM_OPS_DSN!,
19
+ });
20
+
21
+ // Create traces
22
+ const llmOps = getLlmOps();
23
+
24
+ const trace = llmOps.trace({
25
+ name: 'chat-completion',
26
+ input: { prompt: 'Hello!' },
27
+ userId: 'user-123',
28
+ });
29
+
30
+ const generation = llmOps.generation({
31
+ name: 'gpt-4o',
32
+ modelName: 'gpt-4o',
33
+ input: { messages: [...] },
34
+ });
35
+
36
+ await generation.end({
37
+ output: { content: 'Hi there!' },
38
+ promptTokens: 10,
39
+ completionTokens: 5,
40
+ });
41
+
42
+ await trace.end({ output: { response: 'Hi there!' } });
43
+ ```
44
+
45
+ ## LangChain Integration
46
+
47
+ ```typescript
48
+ import { TransactionalCallbackHandler } from 'transactional-llm/langchain';
49
+ import { ChatOpenAI } from '@langchain/openai';
50
+
51
+ const handler = new TransactionalCallbackHandler({
52
+ sessionId: 'conversation-123',
53
+ userId: 'user-456',
54
+ });
55
+
56
+ const model = new ChatOpenAI({ modelName: 'gpt-4o' });
57
+
58
+ const response = await model.invoke('Hello!', {
59
+ callbacks: [handler],
60
+ });
61
+ ```
62
+
63
+ ## Vercel AI SDK Integration
64
+
65
+ ```typescript
66
+ import { wrapAiSdk } from 'transactional-llm/vercel-ai';
67
+ import { generateText } from 'ai';
68
+ import { openai } from '@ai-sdk/openai';
69
+
70
+ const wrappedGenerateText = wrapAiSdk(generateText);
71
+
72
+ const { text } = await wrappedGenerateText({
73
+ model: openai('gpt-4o'),
74
+ prompt: 'Hello!',
75
+ });
76
+ ```
77
+
78
+ ## Configuration
79
+
80
+ ```typescript
81
+ initLlmOps({
82
+ // Required: Your project DSN
83
+ dsn: 'https://pk_...@api.transactional.dev/observability/42',
84
+
85
+ // Optional: Disable tracing
86
+ enabled: process.env.NODE_ENV === 'production',
87
+
88
+ // Optional: Batch settings
89
+ batchSize: 100,
90
+ flushInterval: 5000,
91
+
92
+ // Optional: Debug mode
93
+ debug: false,
94
+ });
95
+ ```
96
+
97
+ ## Documentation
98
+
99
+ Full documentation available at [transactional.dev/docs/llm-ops](https://transactional.dev/docs/llm-ops)
100
+
101
+ ## License
102
+
103
+ MIT
@@ -0,0 +1,319 @@
1
+ // src/client.ts
2
+ import { nanoid } from "nanoid";
3
+ var currentTraceId;
4
+ var currentObservationId;
5
+ function setTraceContext(traceId, observationId) {
6
+ currentTraceId = traceId;
7
+ currentObservationId = observationId;
8
+ }
9
+ function getTraceContext() {
10
+ return { traceId: currentTraceId, observationId: currentObservationId };
11
+ }
12
+ function clearTraceContext() {
13
+ currentTraceId = void 0;
14
+ currentObservationId = void 0;
15
+ }
16
+ var LlmOpsClient = class {
17
+ constructor(config) {
18
+ this.queue = [];
19
+ this.config = this.parseConfig(config);
20
+ if (this.config.enabled) {
21
+ this.startFlushTimer();
22
+ }
23
+ }
24
+ parseConfig(config) {
25
+ let publicKey = config.publicKey;
26
+ let projectId = config.projectId;
27
+ let baseUrl = config.baseUrl || "https://api.transactional.dev";
28
+ if (config.dsn) {
29
+ try {
30
+ const url = new URL(config.dsn);
31
+ publicKey = url.username;
32
+ const pathParts = url.pathname.split("/").filter(Boolean);
33
+ projectId = parseInt(pathParts[pathParts.length - 1] || "0");
34
+ baseUrl = `${url.protocol}//${url.host}`;
35
+ } catch {
36
+ throw new Error(`Invalid DSN format: ${config.dsn}`);
37
+ }
38
+ }
39
+ if (!publicKey || !projectId) {
40
+ throw new Error("LlmOps requires either a DSN or publicKey + projectId");
41
+ }
42
+ return {
43
+ publicKey,
44
+ projectId,
45
+ baseUrl,
46
+ enabled: config.enabled ?? true,
47
+ batchSize: config.batchSize ?? 100,
48
+ flushInterval: config.flushInterval ?? 5e3,
49
+ debug: config.debug ?? false
50
+ };
51
+ }
52
+ startFlushTimer() {
53
+ this.flushTimer = setInterval(() => {
54
+ this.flush().catch((err) => {
55
+ if (this.config.debug) {
56
+ console.error("[LlmOps] Flush error:", err);
57
+ }
58
+ });
59
+ }, this.config.flushInterval);
60
+ }
61
+ log(message, ...args) {
62
+ if (this.config.debug) {
63
+ console.log(`[LlmOps] ${message}`, ...args);
64
+ }
65
+ }
66
+ enqueue(item) {
67
+ if (!this.config.enabled) return;
68
+ this.queue.push(item);
69
+ this.log("Enqueued:", item.type, item.id);
70
+ if (this.queue.length >= this.config.batchSize) {
71
+ this.flush().catch((err) => {
72
+ if (this.config.debug) {
73
+ console.error("[LlmOps] Flush error:", err);
74
+ }
75
+ });
76
+ }
77
+ }
78
+ /**
79
+ * Create a new trace
80
+ */
81
+ trace(params) {
82
+ const traceId = nanoid();
83
+ const startTime = (/* @__PURE__ */ new Date()).toISOString();
84
+ this.enqueue({
85
+ type: "trace",
86
+ id: traceId,
87
+ projectId: this.config.projectId,
88
+ ...params,
89
+ status: "RUNNING",
90
+ startTime
91
+ });
92
+ setTraceContext(traceId);
93
+ return {
94
+ id: traceId,
95
+ end: async (endParams) => {
96
+ await this.updateTrace(traceId, {
97
+ status: "COMPLETED",
98
+ output: endParams?.output,
99
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
100
+ });
101
+ clearTraceContext();
102
+ },
103
+ error: async (error) => {
104
+ await this.updateTrace(traceId, {
105
+ status: "ERROR",
106
+ metadata: { error: error.message, stack: error.stack },
107
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
108
+ });
109
+ clearTraceContext();
110
+ }
111
+ };
112
+ }
113
+ /**
114
+ * Update an existing trace
115
+ */
116
+ async updateTrace(traceId, params) {
117
+ this.enqueue({
118
+ type: "trace",
119
+ id: traceId,
120
+ ...params
121
+ });
122
+ }
123
+ /**
124
+ * Create a new observation (span, generation, or event)
125
+ */
126
+ observation(params) {
127
+ const observationId = nanoid();
128
+ const startTime = (/* @__PURE__ */ new Date()).toISOString();
129
+ const context = getTraceContext();
130
+ const traceId = params.traceId || context.traceId;
131
+ if (!traceId) {
132
+ throw new Error("No trace context found. Create a trace first.");
133
+ }
134
+ const { type: observationType, ...restParams } = params;
135
+ this.enqueue({
136
+ type: "observation",
137
+ id: observationId,
138
+ traceId,
139
+ parentObservationId: restParams.parentObservationId || context.observationId,
140
+ ...restParams,
141
+ observationType,
142
+ status: "RUNNING",
143
+ startTime
144
+ });
145
+ setTraceContext(traceId, observationId);
146
+ return {
147
+ id: observationId,
148
+ end: async (endParams) => {
149
+ await this.updateObservation(observationId, {
150
+ status: "COMPLETED",
151
+ output: endParams?.output,
152
+ promptTokens: endParams?.promptTokens,
153
+ completionTokens: endParams?.completionTokens,
154
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
155
+ });
156
+ setTraceContext(traceId, params.parentObservationId || context.observationId);
157
+ },
158
+ error: async (error) => {
159
+ await this.updateObservation(observationId, {
160
+ status: "ERROR",
161
+ metadata: { error: error.message, stack: error.stack },
162
+ endTime: (/* @__PURE__ */ new Date()).toISOString()
163
+ });
164
+ setTraceContext(traceId, params.parentObservationId || context.observationId);
165
+ }
166
+ };
167
+ }
168
+ /**
169
+ * Create a generation observation (LLM call)
170
+ */
171
+ generation(params) {
172
+ return this.observation({
173
+ ...params,
174
+ type: "GENERATION"
175
+ });
176
+ }
177
+ /**
178
+ * Create a span observation
179
+ */
180
+ span(params) {
181
+ return this.observation({
182
+ ...params,
183
+ type: "SPAN"
184
+ });
185
+ }
186
+ /**
187
+ * Create an event observation
188
+ */
189
+ event(params) {
190
+ return this.observation({
191
+ ...params,
192
+ type: "EVENT"
193
+ });
194
+ }
195
+ /**
196
+ * Update an existing observation
197
+ */
198
+ async updateObservation(observationId, params) {
199
+ this.enqueue({
200
+ type: "observation",
201
+ id: observationId,
202
+ ...params
203
+ });
204
+ }
205
+ /**
206
+ * Flush queued events to the API
207
+ */
208
+ async flush() {
209
+ if (this.queue.length === 0) return;
210
+ if (this.pendingFlush) {
211
+ await this.pendingFlush;
212
+ }
213
+ const batch = this.queue.splice(0, this.config.batchSize);
214
+ this.log("Flushing", batch.length, "items");
215
+ this.pendingFlush = this.sendBatch(batch);
216
+ await this.pendingFlush;
217
+ this.pendingFlush = void 0;
218
+ }
219
+ async sendBatch(batch) {
220
+ try {
221
+ const response = await fetch(
222
+ `${this.config.baseUrl}/observability/ingest/batch`,
223
+ {
224
+ method: "POST",
225
+ headers: {
226
+ "Authorization": `Bearer ${this.config.publicKey}`,
227
+ "Content-Type": "application/json"
228
+ },
229
+ body: JSON.stringify({
230
+ projectId: this.config.projectId,
231
+ batch
232
+ })
233
+ }
234
+ );
235
+ if (!response.ok) {
236
+ const text = await response.text();
237
+ throw new Error(`Failed to send batch: ${response.status} ${text}`);
238
+ }
239
+ this.log("Batch sent successfully");
240
+ } catch (error) {
241
+ this.queue.unshift(...batch);
242
+ throw error;
243
+ }
244
+ }
245
+ /**
246
+ * Shutdown the client and flush remaining events
247
+ */
248
+ async shutdown() {
249
+ if (this.flushTimer) {
250
+ clearInterval(this.flushTimer);
251
+ }
252
+ await this.flush();
253
+ this.log("Shutdown complete");
254
+ }
255
+ };
256
+
257
+ // src/types/index.ts
258
+ var TraceStatus = /* @__PURE__ */ ((TraceStatus2) => {
259
+ TraceStatus2["RUNNING"] = "RUNNING";
260
+ TraceStatus2["COMPLETED"] = "COMPLETED";
261
+ TraceStatus2["ERROR"] = "ERROR";
262
+ return TraceStatus2;
263
+ })(TraceStatus || {});
264
+ var ObservationType = /* @__PURE__ */ ((ObservationType2) => {
265
+ ObservationType2["SPAN"] = "SPAN";
266
+ ObservationType2["GENERATION"] = "GENERATION";
267
+ ObservationType2["EVENT"] = "EVENT";
268
+ return ObservationType2;
269
+ })(ObservationType || {});
270
+ var ObservationLevel = /* @__PURE__ */ ((ObservationLevel2) => {
271
+ ObservationLevel2["DEBUG"] = "DEBUG";
272
+ ObservationLevel2["INFO"] = "INFO";
273
+ ObservationLevel2["WARNING"] = "WARNING";
274
+ ObservationLevel2["ERROR"] = "ERROR";
275
+ return ObservationLevel2;
276
+ })(ObservationLevel || {});
277
+
278
+ // src/index.ts
279
+ var defaultClient = null;
280
+ function initLlmOps(config) {
281
+ if (defaultClient) {
282
+ console.warn("[LlmOps] SDK already initialized. Ignoring duplicate initialization.");
283
+ return defaultClient;
284
+ }
285
+ defaultClient = new LlmOpsClient(config);
286
+ return defaultClient;
287
+ }
288
+ function getLlmOps() {
289
+ if (!defaultClient) {
290
+ throw new Error(
291
+ "LLM Ops SDK not initialized. Call initLlmOps() first."
292
+ );
293
+ }
294
+ return defaultClient;
295
+ }
296
+ function isInitialized() {
297
+ return defaultClient !== null;
298
+ }
299
+ function resetLlmOps() {
300
+ if (defaultClient) {
301
+ defaultClient.shutdown().catch(() => {
302
+ });
303
+ defaultClient = null;
304
+ }
305
+ }
306
+
307
+ export {
308
+ setTraceContext,
309
+ getTraceContext,
310
+ clearTraceContext,
311
+ LlmOpsClient,
312
+ TraceStatus,
313
+ ObservationType,
314
+ ObservationLevel,
315
+ initLlmOps,
316
+ getLlmOps,
317
+ isInitialized,
318
+ resetLlmOps
319
+ };
@@ -0,0 +1,289 @@
1
+ /**
2
+ * LLM Ops Type Definitions
3
+ *
4
+ * Core types for the LLM Ops SDK.
5
+ */
6
+ declare enum TraceStatus {
7
+ RUNNING = "RUNNING",
8
+ COMPLETED = "COMPLETED",
9
+ ERROR = "ERROR"
10
+ }
11
+ declare enum ObservationType {
12
+ SPAN = "SPAN",
13
+ GENERATION = "GENERATION",
14
+ EVENT = "EVENT"
15
+ }
16
+ declare enum ObservationLevel {
17
+ DEBUG = "DEBUG",
18
+ INFO = "INFO",
19
+ WARNING = "WARNING",
20
+ ERROR = "ERROR"
21
+ }
22
+ interface Trace {
23
+ id: string;
24
+ projectId: number;
25
+ sessionId?: string;
26
+ name: string;
27
+ status: TraceStatus;
28
+ input?: Record<string, unknown>;
29
+ output?: Record<string, unknown>;
30
+ metadata?: Record<string, unknown>;
31
+ tags?: string[];
32
+ userId?: string;
33
+ startTime: string;
34
+ endTime?: string;
35
+ totalTokens: number;
36
+ totalCost: number;
37
+ latencyMs?: number;
38
+ }
39
+ interface CreateTraceParams {
40
+ name: string;
41
+ sessionId?: string;
42
+ input?: Record<string, unknown>;
43
+ metadata?: Record<string, unknown>;
44
+ tags?: string[];
45
+ userId?: string;
46
+ }
47
+ interface UpdateTraceParams {
48
+ status?: TraceStatus;
49
+ output?: Record<string, unknown>;
50
+ metadata?: Record<string, unknown>;
51
+ endTime?: string;
52
+ }
53
+ interface Observation {
54
+ id: string;
55
+ traceId: string;
56
+ parentObservationId?: string;
57
+ type: ObservationType;
58
+ name: string;
59
+ status: TraceStatus;
60
+ modelName?: string;
61
+ input?: Record<string, unknown>;
62
+ output?: Record<string, unknown>;
63
+ promptTokens?: number;
64
+ completionTokens?: number;
65
+ totalTokens?: number;
66
+ cost?: number;
67
+ startTime: string;
68
+ endTime?: string;
69
+ latencyMs?: number;
70
+ metadata?: Record<string, unknown>;
71
+ level?: ObservationLevel;
72
+ }
73
+ interface CreateObservationParams {
74
+ traceId?: string;
75
+ parentObservationId?: string;
76
+ type: ObservationType;
77
+ name: string;
78
+ modelName?: string;
79
+ input?: Record<string, unknown>;
80
+ metadata?: Record<string, unknown>;
81
+ level?: ObservationLevel;
82
+ }
83
+ interface UpdateObservationParams {
84
+ status?: TraceStatus;
85
+ output?: Record<string, unknown>;
86
+ promptTokens?: number;
87
+ completionTokens?: number;
88
+ metadata?: Record<string, unknown>;
89
+ endTime?: string;
90
+ }
91
+ interface Session {
92
+ id: string;
93
+ projectId: number;
94
+ externalId?: string;
95
+ userId?: string;
96
+ metadata?: Record<string, unknown>;
97
+ startTime: string;
98
+ endTime?: string;
99
+ traceCount: number;
100
+ totalTokens: number;
101
+ totalCost: number;
102
+ }
103
+ interface UpsertSessionParams {
104
+ id?: string;
105
+ userId?: string;
106
+ metadata?: Record<string, unknown>;
107
+ }
108
+ interface BatchIngestParams {
109
+ traces?: CreateTraceParams[];
110
+ observations?: CreateObservationParams[];
111
+ sessions?: UpsertSessionParams[];
112
+ }
113
+ interface BatchIngestResult {
114
+ success: boolean;
115
+ tracesCreated: number;
116
+ observationsCreated: number;
117
+ sessionsCreated: number;
118
+ errors?: string[];
119
+ }
120
+ interface LlmOpsConfig {
121
+ /** DSN format: https://{publicKey}@api.transactional.dev/observability/{projectId} */
122
+ dsn?: string;
123
+ /** Public key (alternative to DSN) */
124
+ publicKey?: string;
125
+ /** Project ID (alternative to DSN) */
126
+ projectId?: number;
127
+ /** Base URL (alternative to DSN) */
128
+ baseUrl?: string;
129
+ /** Enable/disable tracing (default: true) */
130
+ enabled?: boolean;
131
+ /** Batch size before flushing (default: 100) */
132
+ batchSize?: number;
133
+ /** Flush interval in ms (default: 5000) */
134
+ flushInterval?: number;
135
+ /** Enable debug logging (default: false) */
136
+ debug?: boolean;
137
+ }
138
+ interface TraceHandle {
139
+ id: string;
140
+ end: (params?: {
141
+ output?: Record<string, unknown>;
142
+ }) => Promise<void>;
143
+ error: (error: Error) => Promise<void>;
144
+ }
145
+ interface ObservationHandle {
146
+ id: string;
147
+ end: (params?: {
148
+ output?: Record<string, unknown>;
149
+ promptTokens?: number;
150
+ completionTokens?: number;
151
+ }) => Promise<void>;
152
+ error: (error: Error) => Promise<void>;
153
+ }
154
+
155
+ /**
156
+ * LLM Ops Client
157
+ *
158
+ * Main client for sending traces and observations to the LLM Ops API.
159
+ */
160
+
161
+ declare function setTraceContext(traceId: string, observationId?: string): void;
162
+ declare function getTraceContext(): {
163
+ traceId?: string;
164
+ observationId?: string;
165
+ };
166
+ declare function clearTraceContext(): void;
167
+ declare class LlmOpsClient {
168
+ private config;
169
+ private queue;
170
+ private flushTimer?;
171
+ private pendingFlush?;
172
+ constructor(config: LlmOpsConfig);
173
+ private parseConfig;
174
+ private startFlushTimer;
175
+ private log;
176
+ private enqueue;
177
+ /**
178
+ * Create a new trace
179
+ */
180
+ trace(params: CreateTraceParams): TraceHandle;
181
+ /**
182
+ * Update an existing trace
183
+ */
184
+ updateTrace(traceId: string, params: UpdateTraceParams): Promise<void>;
185
+ /**
186
+ * Create a new observation (span, generation, or event)
187
+ */
188
+ observation(params: CreateObservationParams): ObservationHandle;
189
+ /**
190
+ * Create a generation observation (LLM call)
191
+ */
192
+ generation(params: Omit<CreateObservationParams, 'type'>): ObservationHandle;
193
+ /**
194
+ * Create a span observation
195
+ */
196
+ span(params: Omit<CreateObservationParams, 'type'>): ObservationHandle;
197
+ /**
198
+ * Create an event observation
199
+ */
200
+ event(params: Omit<CreateObservationParams, 'type'>): ObservationHandle;
201
+ /**
202
+ * Update an existing observation
203
+ */
204
+ updateObservation(observationId: string, params: UpdateObservationParams): Promise<void>;
205
+ /**
206
+ * Flush queued events to the API
207
+ */
208
+ flush(): Promise<void>;
209
+ private sendBatch;
210
+ /**
211
+ * Shutdown the client and flush remaining events
212
+ */
213
+ shutdown(): Promise<void>;
214
+ }
215
+
216
+ /**
217
+ * Transactional LLM Ops SDK
218
+ *
219
+ * AI observability with cost tracking, trace analysis, and performance monitoring.
220
+ *
221
+ * @example
222
+ * ```typescript
223
+ * import { initLlmOps, getLlmOps } from 'transactional-llm';
224
+ *
225
+ * initLlmOps({
226
+ * dsn: process.env.TRANSACTIONAL_LLM_OPS_DSN!,
227
+ * });
228
+ *
229
+ * const llmOps = getLlmOps();
230
+ *
231
+ * const trace = llmOps.trace({
232
+ * name: 'chat-completion',
233
+ * input: { prompt: 'Hello!' },
234
+ * });
235
+ *
236
+ * const generation = llmOps.generation({
237
+ * name: 'gpt-4o',
238
+ * modelName: 'gpt-4o',
239
+ * });
240
+ *
241
+ * await generation.end({
242
+ * output: { content: 'Hi there!' },
243
+ * promptTokens: 10,
244
+ * completionTokens: 5,
245
+ * });
246
+ *
247
+ * await trace.end({ output: { response: 'Hi there!' } });
248
+ * ```
249
+ */
250
+
251
+ /**
252
+ * Initialize the LLM Ops SDK
253
+ *
254
+ * Call this once at application startup before using any tracing functions.
255
+ *
256
+ * @param config - Configuration options including DSN
257
+ * @returns The initialized client instance
258
+ *
259
+ * @example
260
+ * ```typescript
261
+ * initLlmOps({
262
+ * dsn: 'https://pk_...@api.transactional.dev/observability/42',
263
+ * });
264
+ * ```
265
+ */
266
+ declare function initLlmOps(config: LlmOpsConfig): LlmOpsClient;
267
+ /**
268
+ * Get the LLM Ops client instance
269
+ *
270
+ * @throws Error if SDK has not been initialized
271
+ * @returns The client instance
272
+ *
273
+ * @example
274
+ * ```typescript
275
+ * const llmOps = getLlmOps();
276
+ * const trace = llmOps.trace({ name: 'my-trace' });
277
+ * ```
278
+ */
279
+ declare function getLlmOps(): LlmOpsClient;
280
+ /**
281
+ * Check if the SDK is initialized
282
+ */
283
+ declare function isInitialized(): boolean;
284
+ /**
285
+ * Reset the SDK (mainly for testing)
286
+ */
287
+ declare function resetLlmOps(): void;
288
+
289
+ export { type BatchIngestParams, type BatchIngestResult, type CreateObservationParams, type CreateTraceParams, LlmOpsClient, type LlmOpsConfig, type Observation, type ObservationHandle, ObservationLevel, ObservationType, type Session, type Trace, type TraceHandle, TraceStatus, type UpdateObservationParams, type UpdateTraceParams, clearTraceContext, getLlmOps, getTraceContext, initLlmOps, isInitialized, resetLlmOps, setTraceContext };