moda-ai 0.1.3 → 0.1.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -100,9 +100,9 @@ await withContext('conv_123', 'user_456', async () => {
100
100
  });
101
101
  ```
102
102
 
103
- ## Automatic Fallback
103
+ ## Automatic Fallback (Simple Chatbots Only)
104
104
 
105
- If you don't set a conversation ID, the SDK automatically computes one from the first user message and system prompt. This works well for simple use cases but explicit IDs are recommended for production:
105
+ If you don't set a conversation ID, the SDK automatically computes one by hashing the first user message and system prompt. **This only works for simple chatbots where you pass the full message history with each API call:**
106
106
 
107
107
  ```typescript
108
108
  // Turn 1
@@ -114,7 +114,44 @@ messages.push({ role: 'assistant', content: r1.choices[0].message.content });
114
114
  messages.push({ role: 'user', content: 'How do I read a file?' });
115
115
  const r2 = await openai.chat.completions.create({ model: 'gpt-4', messages });
116
116
 
117
- // Both turns have the SAME conversation_id in your Moda dashboard
117
+ // Both turns have the SAME conversation_id because "Hi, help with TypeScript"
118
+ // is still the first user message in both calls
119
+ ```
120
+
121
+ ### Why This Works
122
+
123
+ LLM APIs are stateless. Each API call must include the full conversation history. The SDK extracts the first user message from the `messages` array and hashes it to create a stable conversation ID across turns.
124
+
125
+ ### When Automatic Detection Does NOT Work
126
+
127
+ **Agent frameworks (LangChain, Claude Agent SDK, CrewAI, AutoGPT, etc.) do NOT pass full message history.** Each agent iteration typically passes only:
128
+ - System prompt (with context baked in)
129
+ - Tool results from the previous step
130
+ - A continuation prompt
131
+
132
+ This means each iteration has a **different** first user message, resulting in **different** conversation IDs:
133
+
134
+ ```typescript
135
+ // Agent iteration 1
136
+ messages = [{ role: 'user', content: 'What are my top clusters?' }] // conv_abc123
137
+
138
+ // Agent iteration 2 (tool result)
139
+ messages = [{ role: 'user', content: 'Tool returned: ...' }] // conv_xyz789 - DIFFERENT!
140
+
141
+ // Agent iteration 3
142
+ messages = [{ role: 'user', content: 'Based on the data...' }] // conv_def456 - DIFFERENT!
143
+ ```
144
+
145
+ **For agent-based applications, you MUST use explicit conversation IDs:**
146
+
147
+ ```typescript
148
+ // Wrap your entire agent execution
149
+ Moda.conversationId = 'agent_session_' + sessionId;
150
+
151
+ const agent = new LangChainAgent();
152
+ await agent.run('What are my top clusters?'); // All internal LLM calls share same ID
153
+
154
+ Moda.conversationId = null;
118
155
  ```
119
156
 
120
157
  ## Anthropic Support
@@ -153,6 +190,71 @@ for await (const chunk of stream) {
153
190
  // Streaming responses are automatically tracked
154
191
  ```
155
192
 
193
+ ## Using with Sentry (or other OpenTelemetry SDKs)
194
+
195
+ The Moda SDK automatically detects and coexists with other OpenTelemetry-based SDKs like Sentry. When an existing TracerProvider is detected, Moda adds its SpanProcessor to the existing provider instead of creating a new one.
196
+
197
+ ### Sentry v8+ Integration
198
+
199
+ Sentry v8+ uses OpenTelemetry internally for tracing. Initialize Sentry first, then Moda:
200
+
201
+ ```typescript
202
+ import * as Sentry from '@sentry/node';
203
+ import { Moda } from 'moda-ai';
204
+ import OpenAI from 'openai';
205
+
206
+ // 1. Initialize Sentry FIRST (sets up OpenTelemetry TracerProvider)
207
+ Sentry.init({
208
+ dsn: 'https://xxx@xxx.ingest.sentry.io/xxx',
209
+ tracesSampleRate: 1.0,
210
+ });
211
+
212
+ // 2. Initialize Moda SECOND (detects Sentry's provider automatically)
213
+ await Moda.init('moda_your_api_key', {
214
+ debug: true, // Shows: "[Moda] Detected existing TracerProvider, adding Moda SpanProcessor to it"
215
+ });
216
+
217
+ // 3. Use OpenAI normally - spans go to BOTH Sentry and Moda
218
+ const openai = new OpenAI();
219
+ const response = await openai.chat.completions.create({
220
+ model: 'gpt-4o-mini',
221
+ messages: [{ role: 'user', content: 'Hello!' }],
222
+ });
223
+
224
+ // 4. Cleanup - Moda shutdown preserves Sentry
225
+ await Moda.flush();
226
+ await Moda.shutdown(); // Only shuts down Moda's processor, Sentry continues working
227
+ ```
228
+
229
+ ### How It Works
230
+
231
+ When Moda detects an existing TracerProvider (e.g., from Sentry):
232
+ - Moda adds its SpanProcessor to the existing provider
233
+ - Both SDKs receive the same spans with identical trace IDs
234
+ - `Moda.shutdown()` only removes Moda's processor, preserving the other SDK
235
+ - You can re-initialize Moda after shutdown
236
+
237
+ ### Expected Behavior
238
+
239
+ With `debug: true`, you should see:
240
+ ```
241
+ [Moda] Detected existing TracerProvider, adding Moda SpanProcessor to it
242
+ ```
243
+
244
+ You should NOT see:
245
+ ```
246
+ Error: Attempted duplicate registration of tracer provider
247
+ ```
248
+
249
+ ### Supported SDKs
250
+
251
+ This coexistence works with any SDK that uses OpenTelemetry's TracerProvider:
252
+ - Sentry v8+
253
+ - Datadog APM
254
+ - New Relic
255
+ - Honeycomb
256
+ - Custom OpenTelemetry setups
257
+
156
258
  ## Configuration Options
157
259
 
158
260
  ```typescript
package/dist/index.cjs CHANGED
@@ -7,11 +7,30 @@ var sdkTraceNode = require('@opentelemetry/sdk-trace-node');
7
7
  var sdkTraceBase = require('@opentelemetry/sdk-trace-base');
8
8
  var exporterTraceOtlpProto = require('@opentelemetry/exporter-trace-otlp-proto');
9
9
  var resources = require('@opentelemetry/resources');
10
- var semanticConventions = require('@opentelemetry/semantic-conventions');
10
+ var semconv = require('@opentelemetry/semantic-conventions');
11
11
  var module$1 = require('module');
12
12
  var crypto = require('crypto');
13
13
  var async_hooks = require('async_hooks');
14
14
 
15
+ function _interopNamespaceDefault(e) {
16
+ var n = Object.create(null);
17
+ if (e) {
18
+ Object.keys(e).forEach(function (k) {
19
+ if (k !== 'default') {
20
+ var d = Object.getOwnPropertyDescriptor(e, k);
21
+ Object.defineProperty(n, k, d.get ? d : {
22
+ enumerable: true,
23
+ get: function () { return e[k]; }
24
+ });
25
+ }
26
+ });
27
+ }
28
+ n.default = e;
29
+ return Object.freeze(n);
30
+ }
31
+
32
+ var semconv__namespace = /*#__PURE__*/_interopNamespaceDefault(semconv);
33
+
15
34
  /**
16
35
  * Default configuration values
17
36
  */
@@ -1021,6 +1040,9 @@ async function registerInstrumentations() {
1021
1040
 
1022
1041
  let provider = null;
1023
1042
  let exporter = null;
1043
+ let modaProcessor = null;
1044
+ // Track if we're using an external provider (e.g., Sentry)
1045
+ let usingExternalProvider = false;
1024
1046
  /**
1025
1047
  * Check if the SDK is initialized
1026
1048
  */
@@ -1074,12 +1096,6 @@ async function init(apiKey, options = {}) {
1074
1096
  if (mergedOptions.debug) {
1075
1097
  api.diag.setLogger(new api.DiagConsoleLogger(), api.DiagLogLevel.DEBUG);
1076
1098
  }
1077
- // Create resource with service info
1078
- const resource = new resources.Resource({
1079
- [semanticConventions.ATTR_SERVICE_NAME]: 'moda-sdk',
1080
- [semanticConventions.ATTR_SERVICE_VERSION]: '0.1.0',
1081
- 'moda.environment': mergedOptions.environment,
1082
- });
1083
1099
  // Create OTLP exporter with Moda API key in headers
1084
1100
  exporter = new exporterTraceOtlpProto.OTLPTraceExporter({
1085
1101
  url: mergedOptions.baseUrl,
@@ -1088,20 +1104,54 @@ async function init(apiKey, options = {}) {
1088
1104
  'Content-Type': 'application/x-protobuf',
1089
1105
  },
1090
1106
  });
1091
- // Create tracer provider
1092
- provider = new sdkTraceNode.NodeTracerProvider({
1093
- resource,
1094
- });
1095
- // Use BatchSpanProcessor for production, SimpleSpanProcessor for debug
1096
- const processor = mergedOptions.debug
1107
+ // Create Moda's span processor
1108
+ modaProcessor = mergedOptions.debug
1097
1109
  ? new sdkTraceBase.SimpleSpanProcessor(exporter)
1098
1110
  : new sdkTraceBase.BatchSpanProcessor(exporter, {
1099
1111
  maxQueueSize: mergedOptions.batchSize * 2,
1100
1112
  maxExportBatchSize: mergedOptions.batchSize,
1101
1113
  scheduledDelayMillis: mergedOptions.flushInterval,
1102
1114
  });
1103
- provider.addSpanProcessor(processor);
1104
- provider.register();
1115
+ // Check if there's already a registered TracerProvider (e.g., from Sentry)
1116
+ // Sentry (and other SDKs) may wrap their provider in a ProxyTracerProvider,
1117
+ // so we need to check both the proxy and its delegate
1118
+ const existingProvider = api.trace.getTracerProvider();
1119
+ const isProxyProvider = existingProvider?.constructor?.name === 'ProxyTracerProvider';
1120
+ // Try to get the underlying provider if it's a proxy
1121
+ let targetProvider = existingProvider;
1122
+ if (isProxyProvider) {
1123
+ // ProxyTracerProvider wraps the real provider - get the delegate
1124
+ const delegate = existingProvider.getDelegate?.() || existingProvider._delegate;
1125
+ if (delegate && delegate.constructor?.name !== 'ProxyTracerProvider') {
1126
+ targetProvider = delegate;
1127
+ }
1128
+ }
1129
+ // Check if the target provider has addSpanProcessor (indicates a real SDK-managed provider)
1130
+ const hasAddSpanProcessor = targetProvider &&
1131
+ typeof targetProvider.addSpanProcessor === 'function';
1132
+ // Detect if this is a real external provider (not just the default noop proxy)
1133
+ const isExternalProvider = hasAddSpanProcessor &&
1134
+ targetProvider?.constructor?.name !== 'ProxyTracerProvider';
1135
+ if (isExternalProvider && targetProvider) {
1136
+ // Another SDK (like Sentry) already set up OTEL - add our processor to their provider
1137
+ targetProvider.addSpanProcessor(modaProcessor);
1138
+ usingExternalProvider = true;
1139
+ if (mergedOptions.debug) {
1140
+ console.log('[Moda] Detected existing TracerProvider, adding Moda SpanProcessor to it');
1141
+ }
1142
+ }
1143
+ else {
1144
+ // No existing provider - create and register our own
1145
+ const resource = new resources.Resource({
1146
+ [semconv__namespace.ATTR_SERVICE_NAME]: 'moda-sdk',
1147
+ [semconv__namespace.ATTR_SERVICE_VERSION]: '0.1.0',
1148
+ 'moda.environment': mergedOptions.environment,
1149
+ });
1150
+ provider = new sdkTraceNode.NodeTracerProvider({ resource });
1151
+ provider.addSpanProcessor(modaProcessor);
1152
+ provider.register();
1153
+ usingExternalProvider = false;
1154
+ }
1105
1155
  // Register LLM instrumentations (async - uses dynamic imports for ESM compatibility)
1106
1156
  await registerInstrumentations();
1107
1157
  state.initialized = true;
@@ -1123,11 +1173,20 @@ async function init(apiKey, options = {}) {
1123
1173
  * ```
1124
1174
  */
1125
1175
  async function flush() {
1126
- if (!state.initialized || !provider) {
1176
+ if (!state.initialized) {
1127
1177
  return;
1128
1178
  }
1129
1179
  try {
1130
- await provider.forceFlush();
1180
+ if (usingExternalProvider) {
1181
+ // External provider: flush our processor directly
1182
+ if (modaProcessor) {
1183
+ await modaProcessor.forceFlush();
1184
+ }
1185
+ }
1186
+ else if (provider) {
1187
+ // Our own provider: flush the whole provider
1188
+ await provider.forceFlush();
1189
+ }
1131
1190
  if (state.options.debug) {
1132
1191
  console.log('[Moda] Flushed all pending spans');
1133
1192
  }
@@ -1156,11 +1215,21 @@ async function shutdown() {
1156
1215
  return;
1157
1216
  }
1158
1217
  try {
1159
- if (provider) {
1160
- await provider.shutdown();
1218
+ if (usingExternalProvider) {
1219
+ // External provider: shutdown our processor only, preserve their provider
1220
+ if (modaProcessor) {
1221
+ await modaProcessor.shutdown();
1222
+ }
1223
+ if (state.options.debug) {
1224
+ console.log('[Moda] Moda processor shutdown complete (external provider preserved)');
1225
+ }
1161
1226
  }
1162
- if (state.options.debug) {
1163
- console.log('[Moda] SDK shutdown complete');
1227
+ else if (provider) {
1228
+ // Our own provider: shutdown everything
1229
+ await provider.shutdown();
1230
+ if (state.options.debug) {
1231
+ console.log('[Moda] SDK shutdown complete');
1232
+ }
1164
1233
  }
1165
1234
  }
1166
1235
  catch (error) {
@@ -1173,6 +1242,8 @@ async function shutdown() {
1173
1242
  resetState();
1174
1243
  provider = null;
1175
1244
  exporter = null;
1245
+ modaProcessor = null;
1246
+ usingExternalProvider = false;
1176
1247
  }
1177
1248
  }
1178
1249
  /**
@@ -1182,6 +1253,289 @@ async function shutdown() {
1182
1253
  function getTracer() {
1183
1254
  return api.trace.getTracer('moda-sdk', '0.1.0');
1184
1255
  }
1256
+ /**
1257
+ * Create a standalone Moda SpanProcessor for advanced OTEL setups.
1258
+ * Use when you need full control over your OpenTelemetry configuration.
1259
+ *
1260
+ * @example
1261
+ * ```typescript
1262
+ * import { createModaSpanProcessor } from 'moda-ai';
1263
+ * import { trace } from '@opentelemetry/api';
1264
+ *
1265
+ * const processor = createModaSpanProcessor({ apiKey: 'moda_xxx' });
1266
+ * (trace.getTracerProvider() as any).addSpanProcessor(processor);
1267
+ * ```
1268
+ */
1269
+ function createModaSpanProcessor(options) {
1270
+ const { apiKey, baseUrl = DEFAULT_OPTIONS.baseUrl, debug = false, batchSize = DEFAULT_OPTIONS.batchSize, flushInterval = DEFAULT_OPTIONS.flushInterval, } = options;
1271
+ if (!apiKey || typeof apiKey !== 'string') {
1272
+ throw new Error('[Moda] API key is required');
1273
+ }
1274
+ const processorExporter = new exporterTraceOtlpProto.OTLPTraceExporter({
1275
+ url: baseUrl,
1276
+ headers: {
1277
+ 'Authorization': `Bearer ${apiKey}`,
1278
+ 'Content-Type': 'application/x-protobuf',
1279
+ },
1280
+ });
1281
+ return debug
1282
+ ? new sdkTraceBase.SimpleSpanProcessor(processorExporter)
1283
+ : new sdkTraceBase.BatchSpanProcessor(processorExporter, {
1284
+ maxQueueSize: batchSize * 2,
1285
+ maxExportBatchSize: batchSize,
1286
+ scheduledDelayMillis: flushInterval,
1287
+ });
1288
+ }
1289
+
1290
+ /**
1291
+ * Manual LLM tracing API for instrumenting arbitrary LLM calls.
1292
+ * Use this when you can't use auto-instrumented SDKs (OpenAI/Anthropic).
1293
+ *
1294
+ * @example
1295
+ * ```typescript
1296
+ * const result = await withLLMCall(
1297
+ * { vendor: 'openrouter', type: 'chat' },
1298
+ * async ({ span }) => {
1299
+ * span.reportRequest({ model: 'gpt-4', messages });
1300
+ * const response = await fetch('https://api.example.com/chat', {...});
1301
+ * const data = await response.json();
1302
+ * span.reportResponse({ model: data.model, usage: data.usage, completions: data.choices });
1303
+ * return data;
1304
+ * }
1305
+ * );
1306
+ * ```
1307
+ */
1308
+ /**
1309
+ * Create an LLMSpanHelper that wraps an OpenTelemetry span
1310
+ */
1311
+ function createSpanHelper(span) {
1312
+ return {
1313
+ reportRequest(options) {
1314
+ const { model, messages, conversationId, userId } = options;
1315
+ // Set model
1316
+ span.setAttribute('llm.request.model', model);
1317
+ // Get effective context (global + local overrides)
1318
+ const globalContext = getEffectiveContext();
1319
+ // Determine conversation ID: local override > global > computed
1320
+ let effectiveConversationId = conversationId;
1321
+ if (!effectiveConversationId && globalContext.conversationId) {
1322
+ effectiveConversationId = globalContext.conversationId;
1323
+ }
1324
+ if (!effectiveConversationId) {
1325
+ effectiveConversationId = computeConversationId(messages);
1326
+ }
1327
+ span.setAttribute('moda.conversation_id', effectiveConversationId);
1328
+ // Set user ID if provided or from global context
1329
+ const effectiveUserId = userId ?? globalContext.userId;
1330
+ if (effectiveUserId) {
1331
+ span.setAttribute('moda.user_id', effectiveUserId);
1332
+ }
1333
+ // Format and set message attributes
1334
+ const messageAttrs = formatMessagesForSpan(messages);
1335
+ for (const [key, value] of Object.entries(messageAttrs)) {
1336
+ span.setAttribute(key, value);
1337
+ }
1338
+ },
1339
+ reportResponse(options) {
1340
+ const { model, usage, completions } = options;
1341
+ // Set response model if provided
1342
+ if (model) {
1343
+ span.setAttribute('llm.response.model', model);
1344
+ }
1345
+ // Set usage metrics
1346
+ if (usage) {
1347
+ const promptTokens = usage.prompt_tokens ?? usage.input_tokens;
1348
+ const completionTokens = usage.completion_tokens ?? usage.output_tokens;
1349
+ const totalTokens = usage.total_tokens ??
1350
+ (promptTokens !== undefined && completionTokens !== undefined
1351
+ ? promptTokens + completionTokens
1352
+ : undefined);
1353
+ if (promptTokens !== undefined) {
1354
+ span.setAttribute('llm.usage.prompt_tokens', promptTokens);
1355
+ }
1356
+ if (completionTokens !== undefined) {
1357
+ span.setAttribute('llm.usage.completion_tokens', completionTokens);
1358
+ }
1359
+ if (totalTokens !== undefined) {
1360
+ span.setAttribute('llm.usage.total_tokens', totalTokens);
1361
+ }
1362
+ }
1363
+ // Set completion attributes
1364
+ if (completions && completions.length > 0) {
1365
+ completions.forEach((completion, index) => {
1366
+ // Handle OpenAI-style nested message or direct properties
1367
+ const role = completion.role ?? completion.message?.role ?? 'assistant';
1368
+ const content = completion.content ?? completion.message?.content ?? '';
1369
+ const attrs = formatCompletionForSpan(role, content, index);
1370
+ for (const [key, value] of Object.entries(attrs)) {
1371
+ span.setAttribute(key, value);
1372
+ }
1373
+ // Set finish reason from first completion
1374
+ if (index === 0 && completion.finish_reason) {
1375
+ span.setAttribute('llm.response.finish_reason', completion.finish_reason);
1376
+ }
1377
+ });
1378
+ }
1379
+ },
1380
+ get rawSpan() {
1381
+ return span;
1382
+ },
1383
+ };
1384
+ }
1385
+ /**
1386
+ * Wrap an arbitrary LLM call with OpenTelemetry tracing.
1387
+ *
1388
+ * Use this when you can't use auto-instrumented SDKs (OpenAI/Anthropic)
1389
+ * and need to manually instrument LLM calls (e.g., direct fetch to OpenRouter,
1390
+ * custom LLM providers, proxied requests).
1391
+ *
1392
+ * @param options - Vendor and request type configuration
1393
+ * @param callback - Async function that makes the LLM call
1394
+ * @returns The return value of the callback
1395
+ *
1396
+ * @example
1397
+ * ```typescript
1398
+ * const result = await withLLMCall(
1399
+ * { vendor: 'openrouter', type: 'chat' },
1400
+ * async ({ span }) => {
1401
+ * span.reportRequest({ model: 'anthropic/claude-3-sonnet', messages });
1402
+ *
1403
+ * const response = await fetch('https://openrouter.ai/api/v1/chat/completions', {
1404
+ * method: 'POST',
1405
+ * headers: { Authorization: `Bearer ${apiKey}` },
1406
+ * body: JSON.stringify({ model, messages }),
1407
+ * });
1408
+ * const data = await response.json();
1409
+ *
1410
+ * span.reportResponse({
1411
+ * model: data.model,
1412
+ * usage: data.usage,
1413
+ * completions: data.choices,
1414
+ * });
1415
+ *
1416
+ * return data;
1417
+ * }
1418
+ * );
1419
+ * ```
1420
+ */
1421
+ async function withLLMCall(options, callback) {
1422
+ const { vendor, type } = options;
1423
+ const tracer = api.trace.getTracer('moda-sdk', '0.1.0');
1424
+ const span = tracer.startSpan(`${vendor}.${type}`, {
1425
+ attributes: {
1426
+ 'llm.vendor': vendor,
1427
+ 'llm.request.type': type,
1428
+ },
1429
+ });
1430
+ const spanHelper = createSpanHelper(span);
1431
+ try {
1432
+ const result = await callback({ span: spanHelper });
1433
+ span.setStatus({ code: api.SpanStatusCode.OK });
1434
+ return result;
1435
+ }
1436
+ catch (error) {
1437
+ span.setStatus({
1438
+ code: api.SpanStatusCode.ERROR,
1439
+ message: error instanceof Error ? error.message : String(error),
1440
+ });
1441
+ if (error instanceof Error) {
1442
+ span.recordException(error);
1443
+ }
1444
+ throw error;
1445
+ }
1446
+ finally {
1447
+ span.end();
1448
+ }
1449
+ }
1450
+
1451
+ /**
1452
+ * Vercel AI SDK integration for Moda observability.
1453
+ *
1454
+ * The Vercel AI SDK has built-in OpenTelemetry support via `experimental_telemetry`.
1455
+ * This module provides helper functions to integrate Moda with the AI SDK.
1456
+ *
1457
+ * @example
1458
+ * ```typescript
1459
+ * import { Moda } from 'moda-ai';
1460
+ * import { generateText } from 'ai';
1461
+ *
1462
+ * Moda.init('your-api-key');
1463
+ *
1464
+ * const result = await generateText({
1465
+ * model: openai('gpt-4o'),
1466
+ * prompt: 'Hello',
1467
+ * experimental_telemetry: Moda.getVercelAITelemetry(),
1468
+ * });
1469
+ * ```
1470
+ */
1471
+ /**
1472
+ * Get a telemetry configuration object for the Vercel AI SDK.
1473
+ *
1474
+ * This returns a configuration that can be passed directly to the
1475
+ * `experimental_telemetry` option of AI SDK functions like `generateText`,
1476
+ * `streamText`, `generateObject`, etc.
1477
+ *
1478
+ * The configuration includes:
1479
+ * - Moda's OpenTelemetry tracer for span collection
1480
+ * - Automatic inclusion of conversation_id and user_id in metadata
1481
+ * - Configurable input/output recording for privacy control
1482
+ *
1483
+ * @param options - Optional configuration overrides
1484
+ * @returns Telemetry configuration for Vercel AI SDK
1485
+ *
1486
+ * @example
1487
+ * ```typescript
1488
+ * import { Moda } from 'moda-ai';
1489
+ * import { generateText } from 'ai';
1490
+ * import { openai } from '@ai-sdk/openai';
1491
+ *
1492
+ * Moda.init('your-api-key');
1493
+ * Moda.conversationId = 'conv_123';
1494
+ *
1495
+ * const result = await generateText({
1496
+ * model: openai('gpt-4o'),
1497
+ * prompt: 'Write a haiku about coding',
1498
+ * experimental_telemetry: Moda.getVercelAITelemetry(),
1499
+ * });
1500
+ * ```
1501
+ *
1502
+ * @example
1503
+ * ```typescript
1504
+ * // With custom options
1505
+ * const result = await generateText({
1506
+ * model: openai('gpt-4o'),
1507
+ * prompt: 'Process this sensitive data',
1508
+ * experimental_telemetry: Moda.getVercelAITelemetry({
1509
+ * recordInputs: false, // Don't record sensitive prompts
1510
+ * recordOutputs: false, // Don't record sensitive outputs
1511
+ * functionId: 'sensitive-processor',
1512
+ * metadata: { operation: 'pii-processing' },
1513
+ * }),
1514
+ * });
1515
+ * ```
1516
+ */
1517
+ function getVercelAITelemetry(options = {}) {
1518
+ const context = getEffectiveContext();
1519
+ // Build metadata with Moda context
1520
+ const metadata = {
1521
+ ...options.metadata,
1522
+ };
1523
+ // Add Moda context to metadata
1524
+ if (context.conversationId) {
1525
+ metadata['moda.conversation_id'] = context.conversationId;
1526
+ }
1527
+ if (context.userId) {
1528
+ metadata['moda.user_id'] = context.userId;
1529
+ }
1530
+ return {
1531
+ isEnabled: true,
1532
+ recordInputs: options.recordInputs,
1533
+ recordOutputs: options.recordOutputs,
1534
+ functionId: options.functionId,
1535
+ metadata: Object.keys(metadata).length > 0 ? metadata : undefined,
1536
+ tracer: getTracer(),
1537
+ };
1538
+ }
1185
1539
 
1186
1540
  /**
1187
1541
  * @moda/sdk - Official TypeScript/Node.js SDK for Moda LLM observability
@@ -1251,6 +1605,43 @@ const Moda = {
1251
1605
  * @see {@link getTracer}
1252
1606
  */
1253
1607
  getTracer,
1608
+ /**
1609
+ * Manually trace an LLM call when using non-instrumented providers
1610
+ * @see {@link withLLMCall}
1611
+ */
1612
+ withLLMCall,
1613
+ /**
1614
+ * Get telemetry configuration for Vercel AI SDK integration.
1615
+ * Returns a config object for the `experimental_telemetry` option.
1616
+ * @see {@link getVercelAITelemetry}
1617
+ *
1618
+ * @example
1619
+ * ```typescript
1620
+ * import { generateText } from 'ai';
1621
+ *
1622
+ * const result = await generateText({
1623
+ * model: openai('gpt-4o'),
1624
+ * prompt: 'Hello',
1625
+ * experimental_telemetry: Moda.getVercelAITelemetry(),
1626
+ * });
1627
+ * ```
1628
+ */
1629
+ getVercelAITelemetry,
1630
+ /**
1631
+ * Create a standalone Moda SpanProcessor for advanced OTEL setups.
1632
+ * Use when you need full control over your OpenTelemetry configuration.
1633
+ * @see {@link createModaSpanProcessor}
1634
+ *
1635
+ * @example
1636
+ * ```typescript
1637
+ * import { Moda } from 'moda-ai';
1638
+ * import { trace } from '@opentelemetry/api';
1639
+ *
1640
+ * const processor = Moda.createModaSpanProcessor({ apiKey: 'moda_xxx' });
1641
+ * (trace.getTracerProvider() as any).addSpanProcessor(processor);
1642
+ * ```
1643
+ */
1644
+ createModaSpanProcessor,
1254
1645
  /**
1255
1646
  * Get or set the global conversation ID.
1256
1647
  * Setting to null clears the conversation ID.
@@ -1302,6 +1693,7 @@ exports.Moda = Moda;
1302
1693
  exports.clearConversationId = clearConversationId;
1303
1694
  exports.clearUserId = clearUserId;
1304
1695
  exports.computeConversationId = computeConversationId;
1696
+ exports.createModaSpanProcessor = createModaSpanProcessor;
1305
1697
  exports.default = Moda;
1306
1698
  exports.flush = flush;
1307
1699
  exports.generateRandomConversationId = generateRandomConversationId;
@@ -1309,6 +1701,7 @@ exports.getContext = getContext;
1309
1701
  exports.getEffectiveContext = getEffectiveContext;
1310
1702
  exports.getGlobalContext = getGlobalContext;
1311
1703
  exports.getTracer = getTracer;
1704
+ exports.getVercelAITelemetry = getVercelAITelemetry;
1312
1705
  exports.init = init;
1313
1706
  exports.isInitialized = isInitialized;
1314
1707
  exports.isValidConversationId = isValidConversationId;
@@ -1317,5 +1710,6 @@ exports.setUserId = setUserId;
1317
1710
  exports.shutdown = shutdown;
1318
1711
  exports.withContext = withContext;
1319
1712
  exports.withConversationId = withConversationId;
1713
+ exports.withLLMCall = withLLMCall;
1320
1714
  exports.withUserId = withUserId;
1321
1715
  //# sourceMappingURL=index.cjs.map