moda-ai 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +272 -0
- package/dist/index.cjs +1280 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +384 -0
- package/dist/index.mjs +1256 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +74 -0
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,1280 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
Object.defineProperty(exports, '__esModule', { value: true });
|
|
4
|
+
|
|
5
|
+
var api = require('@opentelemetry/api');
|
|
6
|
+
var sdkTraceNode = require('@opentelemetry/sdk-trace-node');
|
|
7
|
+
var sdkTraceBase = require('@opentelemetry/sdk-trace-base');
|
|
8
|
+
var exporterTraceOtlpProto = require('@opentelemetry/exporter-trace-otlp-proto');
|
|
9
|
+
var resources = require('@opentelemetry/resources');
|
|
10
|
+
var semanticConventions = require('@opentelemetry/semantic-conventions');
|
|
11
|
+
var crypto = require('crypto');
|
|
12
|
+
var async_hooks = require('async_hooks');
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Default configuration values
|
|
16
|
+
*/
|
|
17
|
+
const DEFAULT_OPTIONS = {
|
|
18
|
+
baseUrl: 'https://ingest.moda.so/v1/traces',
|
|
19
|
+
environment: 'production',
|
|
20
|
+
enabled: true,
|
|
21
|
+
debug: false,
|
|
22
|
+
batchSize: 100,
|
|
23
|
+
flushInterval: 5000,
|
|
24
|
+
};
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Internal SDK state (shared module to avoid circular dependencies)
|
|
28
|
+
*/
|
|
29
|
+
const state = {
|
|
30
|
+
initialized: false,
|
|
31
|
+
apiKey: null,
|
|
32
|
+
options: { ...DEFAULT_OPTIONS },
|
|
33
|
+
};
|
|
34
|
+
/**
|
|
35
|
+
* Check if debug mode is enabled
|
|
36
|
+
*/
|
|
37
|
+
function isDebugEnabled() {
|
|
38
|
+
return state.options.debug;
|
|
39
|
+
}
|
|
40
|
+
/**
|
|
41
|
+
* Update state options
|
|
42
|
+
*/
|
|
43
|
+
function setStateOptions(options) {
|
|
44
|
+
state.options = options;
|
|
45
|
+
}
|
|
46
|
+
/**
|
|
47
|
+
* Reset state to initial values
|
|
48
|
+
*/
|
|
49
|
+
function resetState() {
|
|
50
|
+
state.initialized = false;
|
|
51
|
+
state.apiKey = null;
|
|
52
|
+
state.options = { ...DEFAULT_OPTIONS };
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Compute SHA256 hash of input string
|
|
57
|
+
* @param input - String to hash
|
|
58
|
+
* @returns Hexadecimal hash string
|
|
59
|
+
*/
|
|
60
|
+
function sha256(input) {
|
|
61
|
+
return crypto.createHash('sha256').update(input).digest('hex');
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Compute truncated SHA256 hash
|
|
65
|
+
* @param input - String to hash
|
|
66
|
+
* @param length - Number of characters to return (default: 16)
|
|
67
|
+
* @returns Truncated hexadecimal hash string
|
|
68
|
+
*/
|
|
69
|
+
function sha256Short(input, length = 16) {
|
|
70
|
+
return sha256(input).slice(0, length);
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Extract text content from various message content formats
|
|
75
|
+
*/
|
|
76
|
+
function extractTextContent(content) {
|
|
77
|
+
if (content === null || content === undefined) {
|
|
78
|
+
return '';
|
|
79
|
+
}
|
|
80
|
+
if (typeof content === 'string') {
|
|
81
|
+
return content;
|
|
82
|
+
}
|
|
83
|
+
if (Array.isArray(content)) {
|
|
84
|
+
return content
|
|
85
|
+
.map((part) => {
|
|
86
|
+
if (typeof part === 'string') {
|
|
87
|
+
return part;
|
|
88
|
+
}
|
|
89
|
+
if (part.type === 'text' && part.text) {
|
|
90
|
+
return part.text;
|
|
91
|
+
}
|
|
92
|
+
return '';
|
|
93
|
+
})
|
|
94
|
+
.filter(Boolean)
|
|
95
|
+
.join('\n');
|
|
96
|
+
}
|
|
97
|
+
return '';
|
|
98
|
+
}
|
|
99
|
+
/**
|
|
100
|
+
* Normalize OpenAI messages to generic Message format
|
|
101
|
+
*/
|
|
102
|
+
function normalizeOpenAIMessages(messages) {
|
|
103
|
+
return messages.map((msg) => ({
|
|
104
|
+
role: msg.role,
|
|
105
|
+
content: normalizeOpenAIContent(msg.content),
|
|
106
|
+
name: msg.name,
|
|
107
|
+
tool_call_id: msg.tool_call_id,
|
|
108
|
+
}));
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Normalize OpenAI content to string or ContentPart array
|
|
112
|
+
*/
|
|
113
|
+
function normalizeOpenAIContent(content) {
|
|
114
|
+
if (content === null) {
|
|
115
|
+
return '';
|
|
116
|
+
}
|
|
117
|
+
if (typeof content === 'string') {
|
|
118
|
+
return content;
|
|
119
|
+
}
|
|
120
|
+
return content.map((part) => {
|
|
121
|
+
if (part.type === 'text') {
|
|
122
|
+
return { type: 'text', text: part.text };
|
|
123
|
+
}
|
|
124
|
+
if (part.type === 'image_url') {
|
|
125
|
+
return { type: 'image_url', image_url: part.image_url };
|
|
126
|
+
}
|
|
127
|
+
return { type: 'text', text: '' };
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
/**
|
|
131
|
+
* Normalize Anthropic messages to generic Message format
|
|
132
|
+
*/
|
|
133
|
+
function normalizeAnthropicMessages(messages, systemPrompt) {
|
|
134
|
+
const normalized = [];
|
|
135
|
+
if (systemPrompt) {
|
|
136
|
+
normalized.push({
|
|
137
|
+
role: 'system',
|
|
138
|
+
content: systemPrompt,
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
for (const msg of messages) {
|
|
142
|
+
normalized.push({
|
|
143
|
+
role: msg.role,
|
|
144
|
+
content: normalizeAnthropicContent(msg.content),
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
return normalized;
|
|
148
|
+
}
|
|
149
|
+
/**
|
|
150
|
+
* Normalize Anthropic content to string or ContentPart array
|
|
151
|
+
*/
|
|
152
|
+
function normalizeAnthropicContent(content) {
|
|
153
|
+
if (typeof content === 'string') {
|
|
154
|
+
return content;
|
|
155
|
+
}
|
|
156
|
+
return content.map((block) => {
|
|
157
|
+
switch (block.type) {
|
|
158
|
+
case 'text':
|
|
159
|
+
return { type: 'text', text: block.text };
|
|
160
|
+
case 'image':
|
|
161
|
+
return { type: 'image', source: block.source };
|
|
162
|
+
case 'tool_use':
|
|
163
|
+
return {
|
|
164
|
+
type: 'tool_use',
|
|
165
|
+
text: JSON.stringify({ id: block.id, name: block.name, input: block.input }),
|
|
166
|
+
};
|
|
167
|
+
case 'tool_result':
|
|
168
|
+
return {
|
|
169
|
+
type: 'tool_result',
|
|
170
|
+
text: typeof block.content === 'string'
|
|
171
|
+
? block.content
|
|
172
|
+
: JSON.stringify(block.content),
|
|
173
|
+
};
|
|
174
|
+
case 'thinking':
|
|
175
|
+
return { type: 'thinking', text: block.thinking };
|
|
176
|
+
default:
|
|
177
|
+
return { type: 'text', text: '' };
|
|
178
|
+
}
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
/**
|
|
182
|
+
* Find the first user message in a message array
|
|
183
|
+
*/
|
|
184
|
+
function findFirstUserMessage(messages) {
|
|
185
|
+
return messages.find((m) => m.role === 'user');
|
|
186
|
+
}
|
|
187
|
+
/**
|
|
188
|
+
* Extract system prompt from messages
|
|
189
|
+
*/
|
|
190
|
+
function extractSystemPrompt(messages) {
|
|
191
|
+
const systemMessage = messages.find((m) => m.role === 'system');
|
|
192
|
+
if (!systemMessage)
|
|
193
|
+
return undefined;
|
|
194
|
+
return extractTextContent(systemMessage.content);
|
|
195
|
+
}
|
|
196
|
+
/**
|
|
197
|
+
* Format messages for span attributes (OpenLLMetry compatible)
|
|
198
|
+
*/
|
|
199
|
+
function formatMessagesForSpan(messages) {
|
|
200
|
+
const attributes = {};
|
|
201
|
+
messages.forEach((msg, index) => {
|
|
202
|
+
attributes[`llm.prompts.${index}.role`] = msg.role;
|
|
203
|
+
attributes[`llm.prompts.${index}.content`] = extractTextContent(msg.content);
|
|
204
|
+
});
|
|
205
|
+
return attributes;
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Format completion for span attributes
|
|
209
|
+
*/
|
|
210
|
+
function formatCompletionForSpan(role, content, index = 0) {
|
|
211
|
+
return {
|
|
212
|
+
[`llm.completions.${index}.role`]: role,
|
|
213
|
+
[`llm.completions.${index}.content`]: content,
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* AsyncLocalStorage instance for managing Moda context across async operations
|
|
219
|
+
*/
|
|
220
|
+
const storage = new async_hooks.AsyncLocalStorage();
|
|
221
|
+
/**
|
|
222
|
+
* Get the current Moda context from AsyncLocalStorage
|
|
223
|
+
* @returns Current context or empty object if not set
|
|
224
|
+
*/
|
|
225
|
+
function getContext() {
|
|
226
|
+
return storage.getStore() ?? {};
|
|
227
|
+
}
|
|
228
|
+
/**
|
|
229
|
+
* Set the conversation ID in the current context.
|
|
230
|
+
* Note: This creates a new context with only the conversationId.
|
|
231
|
+
* For nested contexts, use withConversationId instead.
|
|
232
|
+
*/
|
|
233
|
+
let globalContext = {};
|
|
234
|
+
/**
|
|
235
|
+
* Set a global conversation ID that persists across async boundaries.
|
|
236
|
+
* This is useful when you want to track a conversation across multiple API calls
|
|
237
|
+
* without wrapping them in a callback.
|
|
238
|
+
*
|
|
239
|
+
* @param id - The conversation ID to set
|
|
240
|
+
*/
|
|
241
|
+
function setConversationId(id) {
|
|
242
|
+
globalContext = { ...globalContext, conversationId: id };
|
|
243
|
+
}
|
|
244
|
+
/**
|
|
245
|
+
* Clear the global conversation ID.
|
|
246
|
+
*/
|
|
247
|
+
function clearConversationId() {
|
|
248
|
+
const { conversationId: _, ...rest } = globalContext;
|
|
249
|
+
globalContext = rest;
|
|
250
|
+
}
|
|
251
|
+
/**
|
|
252
|
+
* Set a global user ID that persists across async boundaries.
|
|
253
|
+
*
|
|
254
|
+
* @param id - The user ID to set
|
|
255
|
+
*/
|
|
256
|
+
function setUserId(id) {
|
|
257
|
+
globalContext = { ...globalContext, userId: id };
|
|
258
|
+
}
|
|
259
|
+
/**
|
|
260
|
+
* Clear the global user ID.
|
|
261
|
+
*/
|
|
262
|
+
function clearUserId() {
|
|
263
|
+
const { userId: _, ...rest } = globalContext;
|
|
264
|
+
globalContext = rest;
|
|
265
|
+
}
|
|
266
|
+
/**
|
|
267
|
+
* Get the combined context (AsyncLocalStorage + global)
|
|
268
|
+
* AsyncLocalStorage takes precedence over global context
|
|
269
|
+
*/
|
|
270
|
+
function getEffectiveContext() {
|
|
271
|
+
const localContext = getContext();
|
|
272
|
+
return {
|
|
273
|
+
...globalContext,
|
|
274
|
+
...localContext,
|
|
275
|
+
};
|
|
276
|
+
}
|
|
277
|
+
/**
|
|
278
|
+
* Run a callback with a specific conversation ID in the AsyncLocalStorage context.
|
|
279
|
+
* The conversation ID will be available to all async operations within the callback.
|
|
280
|
+
*
|
|
281
|
+
* @param id - The conversation ID to set
|
|
282
|
+
* @param callback - The function to run with the context
|
|
283
|
+
* @returns The return value of the callback
|
|
284
|
+
*
|
|
285
|
+
* @example
|
|
286
|
+
* ```typescript
|
|
287
|
+
* await withConversationId('my-conv-123', async () => {
|
|
288
|
+
* await openai.chat.completions.create({ ... });
|
|
289
|
+
* await openai.chat.completions.create({ ... });
|
|
290
|
+
* // Both calls will have the same conversation ID
|
|
291
|
+
* });
|
|
292
|
+
* ```
|
|
293
|
+
*/
|
|
294
|
+
function withConversationId(id, callback) {
|
|
295
|
+
const currentContext = getEffectiveContext();
|
|
296
|
+
return storage.run({ ...currentContext, conversationId: id }, callback);
|
|
297
|
+
}
|
|
298
|
+
/**
|
|
299
|
+
* Run a callback with a specific user ID in the AsyncLocalStorage context.
|
|
300
|
+
*
|
|
301
|
+
* @param id - The user ID to set
|
|
302
|
+
* @param callback - The function to run with the context
|
|
303
|
+
* @returns The return value of the callback
|
|
304
|
+
*/
|
|
305
|
+
function withUserId(id, callback) {
|
|
306
|
+
const currentContext = getEffectiveContext();
|
|
307
|
+
return storage.run({ ...currentContext, userId: id }, callback);
|
|
308
|
+
}
|
|
309
|
+
/**
|
|
310
|
+
* Run a callback with both conversation ID and user ID in context.
|
|
311
|
+
*
|
|
312
|
+
* @param conversationId - The conversation ID to set
|
|
313
|
+
* @param userId - The user ID to set
|
|
314
|
+
* @param callback - The function to run with the context
|
|
315
|
+
* @returns The return value of the callback
|
|
316
|
+
*/
|
|
317
|
+
function withContext(conversationId, userId, callback) {
|
|
318
|
+
return storage.run({ conversationId, userId }, callback);
|
|
319
|
+
}
|
|
320
|
+
/**
|
|
321
|
+
* Get the global context (without AsyncLocalStorage context).
|
|
322
|
+
* Useful for accessing the explicitly set conversationId and userId.
|
|
323
|
+
*/
|
|
324
|
+
function getGlobalContext() {
|
|
325
|
+
return globalContext;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
/**
|
|
329
|
+
* Compute a stable conversation ID from message history.
|
|
330
|
+
*
|
|
331
|
+
* The algorithm:
|
|
332
|
+
* 1. If an explicit ID is provided (via context or parameter), use it
|
|
333
|
+
* 2. Find the first user message in the conversation
|
|
334
|
+
* 3. Compute SHA256 hash of: system prompt + first user message
|
|
335
|
+
* 4. Return conv_[hash[:16]]
|
|
336
|
+
*
|
|
337
|
+
* This ensures that multi-turn conversations with the same starting point
|
|
338
|
+
* always get the same conversation ID, enabling proper thread tracking.
|
|
339
|
+
*
|
|
340
|
+
* @param messages - Array of messages in the conversation
|
|
341
|
+
* @param systemPrompt - Optional system prompt (separate from messages, e.g., Anthropic style)
|
|
342
|
+
* @param explicitId - Optional explicit conversation ID to override computation
|
|
343
|
+
* @returns Conversation ID in format conv_[16-char-hash] or the explicit ID
|
|
344
|
+
*/
|
|
345
|
+
function computeConversationId(messages, systemPrompt, explicitId) {
|
|
346
|
+
// Priority 1: Explicit ID provided as parameter
|
|
347
|
+
if (explicitId) {
|
|
348
|
+
return explicitId;
|
|
349
|
+
}
|
|
350
|
+
// Priority 2: ID set in context (global or AsyncLocalStorage)
|
|
351
|
+
const context = getEffectiveContext();
|
|
352
|
+
if (context.conversationId) {
|
|
353
|
+
return context.conversationId;
|
|
354
|
+
}
|
|
355
|
+
// Priority 3: Compute from messages
|
|
356
|
+
const firstUserMessage = findFirstUserMessage(messages);
|
|
357
|
+
if (!firstUserMessage) {
|
|
358
|
+
// No user message found, generate random ID
|
|
359
|
+
return `conv_${crypto.randomUUID().replace(/-/g, '').slice(0, 16)}`;
|
|
360
|
+
}
|
|
361
|
+
// Get system prompt from messages if not provided separately
|
|
362
|
+
const effectiveSystemPrompt = systemPrompt ?? extractSystemPrompt(messages);
|
|
363
|
+
const firstUserContent = extractTextContent(firstUserMessage.content);
|
|
364
|
+
// Create seed for hashing
|
|
365
|
+
const seed = JSON.stringify({
|
|
366
|
+
system: effectiveSystemPrompt ?? null,
|
|
367
|
+
first_user: firstUserContent,
|
|
368
|
+
});
|
|
369
|
+
const hash = sha256Short(seed, 16);
|
|
370
|
+
return `conv_${hash}`;
|
|
371
|
+
}
|
|
372
|
+
/**
|
|
373
|
+
* Generate a random conversation ID
|
|
374
|
+
* Useful for cases where automatic computation is not desired
|
|
375
|
+
*/
|
|
376
|
+
function generateRandomConversationId() {
|
|
377
|
+
return `conv_${crypto.randomUUID().replace(/-/g, '').slice(0, 16)}`;
|
|
378
|
+
}
|
|
379
|
+
/**
|
|
380
|
+
* Validate conversation ID format
|
|
381
|
+
* @param id - Conversation ID to validate
|
|
382
|
+
* @returns true if the ID matches expected format
|
|
383
|
+
*/
|
|
384
|
+
function isValidConversationId(id) {
|
|
385
|
+
// Accept any string, but prefer conv_* format
|
|
386
|
+
return typeof id === 'string' && id.length > 0;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
/**
|
|
390
|
+
* Base class for LLM client instrumentations.
|
|
391
|
+
* Provides common functionality for creating spans and setting attributes.
|
|
392
|
+
*/
|
|
393
|
+
class BaseInstrumentation {
|
|
394
|
+
vendor;
|
|
395
|
+
tracer = api.trace.getTracer('moda-sdk', '0.1.0');
|
|
396
|
+
constructor(vendor) {
|
|
397
|
+
this.vendor = vendor;
|
|
398
|
+
}
|
|
399
|
+
/**
|
|
400
|
+
* Create span attributes for an LLM call
|
|
401
|
+
*/
|
|
402
|
+
createSpanAttributes(messages, model, systemPrompt) {
|
|
403
|
+
const context = getEffectiveContext();
|
|
404
|
+
const conversationId = computeConversationId(messages, systemPrompt);
|
|
405
|
+
const attributes = {
|
|
406
|
+
'llm.vendor': this.vendor,
|
|
407
|
+
'llm.request.type': 'chat',
|
|
408
|
+
'llm.request.model': model,
|
|
409
|
+
'moda.conversation_id': conversationId,
|
|
410
|
+
};
|
|
411
|
+
if (context.userId) {
|
|
412
|
+
attributes['moda.user_id'] = context.userId;
|
|
413
|
+
}
|
|
414
|
+
// Add message attributes
|
|
415
|
+
const messageAttrs = formatMessagesForSpan(messages);
|
|
416
|
+
Object.assign(attributes, messageAttrs);
|
|
417
|
+
return attributes;
|
|
418
|
+
}
|
|
419
|
+
/**
|
|
420
|
+
* Add completion attributes to a span
|
|
421
|
+
*/
|
|
422
|
+
addCompletionAttributes(span, role, content, index = 0) {
|
|
423
|
+
const attrs = formatCompletionForSpan(role, content, index);
|
|
424
|
+
for (const [key, value] of Object.entries(attrs)) {
|
|
425
|
+
span.setAttribute(key, value);
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
/**
|
|
429
|
+
* Add usage metrics to a span
|
|
430
|
+
*/
|
|
431
|
+
addUsageAttributes(span, usage) {
|
|
432
|
+
if (!usage)
|
|
433
|
+
return;
|
|
434
|
+
const promptTokens = usage.prompt_tokens ?? usage.input_tokens;
|
|
435
|
+
const completionTokens = usage.completion_tokens ?? usage.output_tokens;
|
|
436
|
+
const totalTokens = usage.total_tokens ?? (promptTokens && completionTokens ? promptTokens + completionTokens : undefined);
|
|
437
|
+
if (promptTokens !== undefined) {
|
|
438
|
+
span.setAttribute('llm.usage.prompt_tokens', promptTokens);
|
|
439
|
+
}
|
|
440
|
+
if (completionTokens !== undefined) {
|
|
441
|
+
span.setAttribute('llm.usage.completion_tokens', completionTokens);
|
|
442
|
+
}
|
|
443
|
+
if (totalTokens !== undefined) {
|
|
444
|
+
span.setAttribute('llm.usage.total_tokens', totalTokens);
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
/**
|
|
448
|
+
* Set span error status
|
|
449
|
+
*/
|
|
450
|
+
setSpanError(span, error) {
|
|
451
|
+
span.setStatus({
|
|
452
|
+
code: api.SpanStatusCode.ERROR,
|
|
453
|
+
message: error.message,
|
|
454
|
+
});
|
|
455
|
+
span.recordException(error);
|
|
456
|
+
}
|
|
457
|
+
/**
|
|
458
|
+
* Set span success status
|
|
459
|
+
*/
|
|
460
|
+
setSpanSuccess(span) {
|
|
461
|
+
span.setStatus({ code: api.SpanStatusCode.OK });
|
|
462
|
+
}
|
|
463
|
+
/**
|
|
464
|
+
* Log debug message if debug mode is enabled
|
|
465
|
+
*/
|
|
466
|
+
debug(message, ...args) {
|
|
467
|
+
if (isDebugEnabled()) {
|
|
468
|
+
console.log(`[Moda:${this.vendor}] ${message}`, ...args);
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
// Store original methods for unpatching
|
|
474
|
+
let originalChatCompletionsCreate = null;
|
|
475
|
+
let patched$1 = false;
|
|
476
|
+
/**
|
|
477
|
+
* OpenAI client instrumentation.
|
|
478
|
+
* Automatically tracks all chat completion calls with conversation threading.
|
|
479
|
+
*/
|
|
480
|
+
class OpenAIInstrumentation extends BaseInstrumentation {
|
|
481
|
+
constructor() {
|
|
482
|
+
super('openai');
|
|
483
|
+
}
|
|
484
|
+
/**
|
|
485
|
+
* Patch the OpenAI client to add instrumentation
|
|
486
|
+
*/
|
|
487
|
+
patch() {
|
|
488
|
+
if (patched$1) {
|
|
489
|
+
this.debug('Already patched');
|
|
490
|
+
return;
|
|
491
|
+
}
|
|
492
|
+
try {
|
|
493
|
+
// Dynamic import to handle optional dependency
|
|
494
|
+
const openaiModule = require('openai');
|
|
495
|
+
const OpenAI = openaiModule.default || openaiModule;
|
|
496
|
+
if (!OpenAI || !OpenAI.prototype) {
|
|
497
|
+
this.debug('OpenAI module not found or invalid');
|
|
498
|
+
return;
|
|
499
|
+
}
|
|
500
|
+
const instrumentation = this;
|
|
501
|
+
// Patch the Chat.Completions.create method
|
|
502
|
+
// We need to patch at the prototype level of the internal class
|
|
503
|
+
const originalConstructor = OpenAI;
|
|
504
|
+
// Override the constructor to patch instances
|
|
505
|
+
const patchedOpenAI = function (...args) {
|
|
506
|
+
const instance = new originalConstructor(...args);
|
|
507
|
+
instrumentation.patchInstance(instance);
|
|
508
|
+
return instance;
|
|
509
|
+
};
|
|
510
|
+
// Copy static properties
|
|
511
|
+
Object.setPrototypeOf(patchedOpenAI, originalConstructor);
|
|
512
|
+
patchedOpenAI.prototype = originalConstructor.prototype;
|
|
513
|
+
// Replace in module cache
|
|
514
|
+
if (openaiModule.default) {
|
|
515
|
+
openaiModule.default = patchedOpenAI;
|
|
516
|
+
}
|
|
517
|
+
// Also patch existing instances by patching the prototype
|
|
518
|
+
this.patchPrototype(OpenAI);
|
|
519
|
+
patched$1 = true;
|
|
520
|
+
this.debug('Patched successfully');
|
|
521
|
+
}
|
|
522
|
+
catch (error) {
|
|
523
|
+
// OpenAI not installed, skip silently
|
|
524
|
+
this.debug('Could not patch OpenAI:', error);
|
|
525
|
+
}
|
|
526
|
+
}
|
|
527
|
+
/**
|
|
528
|
+
* Patch an OpenAI instance
|
|
529
|
+
*/
|
|
530
|
+
patchInstance(instance) {
|
|
531
|
+
if (!instance.chat?.completions?.create) {
|
|
532
|
+
return;
|
|
533
|
+
}
|
|
534
|
+
const instrumentation = this;
|
|
535
|
+
const original = instance.chat.completions.create.bind(instance.chat.completions);
|
|
536
|
+
instance.chat.completions.create = async function (params, options) {
|
|
537
|
+
return instrumentation.tracedCreate(original, params, options);
|
|
538
|
+
};
|
|
539
|
+
}
|
|
540
|
+
/**
|
|
541
|
+
* Patch the OpenAI prototype to catch all instances
|
|
542
|
+
*/
|
|
543
|
+
patchPrototype(OpenAI) {
|
|
544
|
+
const instrumentation = this;
|
|
545
|
+
// Store original Chat class create method
|
|
546
|
+
try {
|
|
547
|
+
const chatProto = OpenAI.Chat?.Completions?.prototype;
|
|
548
|
+
if (chatProto && chatProto.create) {
|
|
549
|
+
originalChatCompletionsCreate = chatProto.create;
|
|
550
|
+
chatProto.create = async function (params, options) {
|
|
551
|
+
const original = originalChatCompletionsCreate.bind(this);
|
|
552
|
+
return instrumentation.tracedCreate(original, params, options);
|
|
553
|
+
};
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
catch {
|
|
557
|
+
// Prototype structure may vary
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
/**
|
|
561
|
+
* Wrap a create call with tracing
|
|
562
|
+
*/
|
|
563
|
+
async tracedCreate(original, params, options) {
|
|
564
|
+
const messages = params.messages;
|
|
565
|
+
const model = params.model;
|
|
566
|
+
const isStreaming = params.stream === true;
|
|
567
|
+
// Normalize messages
|
|
568
|
+
const normalizedMessages = normalizeOpenAIMessages(messages);
|
|
569
|
+
// Create span attributes
|
|
570
|
+
const attributes = this.createSpanAttributes(normalizedMessages, model);
|
|
571
|
+
// Add streaming flag
|
|
572
|
+
attributes['llm.request.streaming'] = isStreaming;
|
|
573
|
+
// Create span
|
|
574
|
+
const span = this.tracer.startSpan('openai.chat.completions.create', {
|
|
575
|
+
attributes,
|
|
576
|
+
});
|
|
577
|
+
try {
|
|
578
|
+
if (isStreaming) {
|
|
579
|
+
return await this.handleStreamingResponse(original, params, options, span);
|
|
580
|
+
}
|
|
581
|
+
else {
|
|
582
|
+
return await this.handleNonStreamingResponse(original, params, options, span);
|
|
583
|
+
}
|
|
584
|
+
}
|
|
585
|
+
catch (error) {
|
|
586
|
+
this.setSpanError(span, error);
|
|
587
|
+
throw error;
|
|
588
|
+
}
|
|
589
|
+
finally {
|
|
590
|
+
span.end();
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
/**
|
|
594
|
+
* Handle non-streaming response
|
|
595
|
+
*/
|
|
596
|
+
async handleNonStreamingResponse(original, params, options, span) {
|
|
597
|
+
const response = await original(params, options);
|
|
598
|
+
// Extract completion
|
|
599
|
+
const choice = response.choices?.[0];
|
|
600
|
+
if (choice?.message) {
|
|
601
|
+
const content = choice.message.content || '';
|
|
602
|
+
const role = choice.message.role || 'assistant';
|
|
603
|
+
this.addCompletionAttributes(span, role, content);
|
|
604
|
+
// Add finish reason
|
|
605
|
+
if (choice.finish_reason) {
|
|
606
|
+
span.setAttribute('llm.response.finish_reason', choice.finish_reason);
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
// Add model from response
|
|
610
|
+
if (response.model) {
|
|
611
|
+
span.setAttribute('llm.response.model', response.model);
|
|
612
|
+
}
|
|
613
|
+
// Add usage metrics
|
|
614
|
+
this.addUsageAttributes(span, response.usage);
|
|
615
|
+
this.setSpanSuccess(span);
|
|
616
|
+
return response;
|
|
617
|
+
}
|
|
618
|
+
/**
|
|
619
|
+
* Handle streaming response
|
|
620
|
+
*/
|
|
621
|
+
async handleStreamingResponse(original, params, options, span) {
|
|
622
|
+
const stream = await original(params, options);
|
|
623
|
+
const instrumentation = this;
|
|
624
|
+
// Wrap the stream to capture content
|
|
625
|
+
let fullContent = '';
|
|
626
|
+
let finishReason = null;
|
|
627
|
+
let model = null;
|
|
628
|
+
let usage = null;
|
|
629
|
+
// Create async iterator wrapper
|
|
630
|
+
const wrappedStream = {
|
|
631
|
+
[Symbol.asyncIterator]: async function* () {
|
|
632
|
+
try {
|
|
633
|
+
for await (const chunk of stream) {
|
|
634
|
+
// Capture content
|
|
635
|
+
const delta = chunk.choices?.[0]?.delta;
|
|
636
|
+
if (delta?.content) {
|
|
637
|
+
fullContent += delta.content;
|
|
638
|
+
}
|
|
639
|
+
// Capture finish reason
|
|
640
|
+
if (chunk.choices?.[0]?.finish_reason) {
|
|
641
|
+
finishReason = chunk.choices[0].finish_reason;
|
|
642
|
+
}
|
|
643
|
+
// Capture model
|
|
644
|
+
if (chunk.model) {
|
|
645
|
+
model = chunk.model;
|
|
646
|
+
}
|
|
647
|
+
// Capture usage (OpenAI includes it in the last chunk with stream_options)
|
|
648
|
+
if (chunk.usage) {
|
|
649
|
+
usage = chunk.usage;
|
|
650
|
+
}
|
|
651
|
+
yield chunk;
|
|
652
|
+
}
|
|
653
|
+
// After stream completes, add attributes
|
|
654
|
+
instrumentation.addCompletionAttributes(span, 'assistant', fullContent);
|
|
655
|
+
if (finishReason) {
|
|
656
|
+
span.setAttribute('llm.response.finish_reason', finishReason);
|
|
657
|
+
}
|
|
658
|
+
if (model) {
|
|
659
|
+
span.setAttribute('llm.response.model', model);
|
|
660
|
+
}
|
|
661
|
+
if (usage) {
|
|
662
|
+
instrumentation.addUsageAttributes(span, usage);
|
|
663
|
+
}
|
|
664
|
+
instrumentation.setSpanSuccess(span);
|
|
665
|
+
}
|
|
666
|
+
catch (error) {
|
|
667
|
+
instrumentation.setSpanError(span, error);
|
|
668
|
+
throw error;
|
|
669
|
+
}
|
|
670
|
+
},
|
|
671
|
+
// Forward other stream methods
|
|
672
|
+
...stream,
|
|
673
|
+
};
|
|
674
|
+
// If the stream has a controller/abort method, forward it
|
|
675
|
+
if (stream.controller) {
|
|
676
|
+
wrappedStream.controller = stream.controller;
|
|
677
|
+
}
|
|
678
|
+
return wrappedStream;
|
|
679
|
+
}
|
|
680
|
+
/**
|
|
681
|
+
* Unpatch the OpenAI client
|
|
682
|
+
*/
|
|
683
|
+
unpatch() {
|
|
684
|
+
if (!patched$1) {
|
|
685
|
+
return;
|
|
686
|
+
}
|
|
687
|
+
try {
|
|
688
|
+
const openaiModule = require('openai');
|
|
689
|
+
const OpenAI = openaiModule.default || openaiModule;
|
|
690
|
+
if (originalChatCompletionsCreate && OpenAI.Chat?.Completions?.prototype) {
|
|
691
|
+
OpenAI.Chat.Completions.prototype.create = originalChatCompletionsCreate;
|
|
692
|
+
}
|
|
693
|
+
patched$1 = false;
|
|
694
|
+
originalChatCompletionsCreate = null;
|
|
695
|
+
this.debug('Unpatched successfully');
|
|
696
|
+
}
|
|
697
|
+
catch {
|
|
698
|
+
// Ignore
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
// Export singleton instance
|
|
703
|
+
const openAIInstrumentation = new OpenAIInstrumentation();
|
|
704
|
+
|
|
705
|
+
// Store original methods for unpatching
|
|
706
|
+
let originalMessagesCreate = null;
|
|
707
|
+
let patched = false;
|
|
708
|
+
/**
|
|
709
|
+
* Anthropic client instrumentation.
|
|
710
|
+
* Automatically tracks all message creation calls with conversation threading.
|
|
711
|
+
*/
|
|
712
|
+
class AnthropicInstrumentation extends BaseInstrumentation {
|
|
713
|
+
constructor() {
|
|
714
|
+
super('anthropic');
|
|
715
|
+
}
|
|
716
|
+
/**
|
|
717
|
+
* Patch the Anthropic client to add instrumentation
|
|
718
|
+
*/
|
|
719
|
+
patch() {
|
|
720
|
+
if (patched) {
|
|
721
|
+
this.debug('Already patched');
|
|
722
|
+
return;
|
|
723
|
+
}
|
|
724
|
+
try {
|
|
725
|
+
// Dynamic import to handle optional dependency
|
|
726
|
+
const anthropicModule = require('@anthropic-ai/sdk');
|
|
727
|
+
const Anthropic = anthropicModule.default || anthropicModule;
|
|
728
|
+
if (!Anthropic || !Anthropic.prototype) {
|
|
729
|
+
this.debug('Anthropic module not found or invalid');
|
|
730
|
+
return;
|
|
731
|
+
}
|
|
732
|
+
// Patch the Messages.create method at prototype level
|
|
733
|
+
this.patchPrototype(Anthropic);
|
|
734
|
+
patched = true;
|
|
735
|
+
this.debug('Patched successfully');
|
|
736
|
+
}
|
|
737
|
+
catch (error) {
|
|
738
|
+
// Anthropic not installed, skip silently
|
|
739
|
+
this.debug('Could not patch Anthropic:', error);
|
|
740
|
+
}
|
|
741
|
+
}
|
|
742
|
+
/**
|
|
743
|
+
* Patch the Anthropic prototype to catch all instances
|
|
744
|
+
*/
|
|
745
|
+
patchPrototype(Anthropic) {
|
|
746
|
+
const instrumentation = this;
|
|
747
|
+
try {
|
|
748
|
+
const messagesProto = Anthropic.Messages?.prototype;
|
|
749
|
+
if (messagesProto && messagesProto.create) {
|
|
750
|
+
originalMessagesCreate = messagesProto.create;
|
|
751
|
+
messagesProto.create = async function (params, options) {
|
|
752
|
+
const original = originalMessagesCreate.bind(this);
|
|
753
|
+
return instrumentation.tracedCreate(original, params, options);
|
|
754
|
+
};
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
catch {
|
|
758
|
+
// Try alternative approach - patch on client construction
|
|
759
|
+
const originalConstructor = Anthropic;
|
|
760
|
+
// We'll wrap the constructor
|
|
761
|
+
const patchedAnthropic = function (...args) {
|
|
762
|
+
const instance = new originalConstructor(...args);
|
|
763
|
+
instrumentation.patchInstance(instance);
|
|
764
|
+
return instance;
|
|
765
|
+
};
|
|
766
|
+
Object.setPrototypeOf(patchedAnthropic, originalConstructor);
|
|
767
|
+
patchedAnthropic.prototype = originalConstructor.prototype;
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
/**
|
|
771
|
+
* Patch an Anthropic instance
|
|
772
|
+
*/
|
|
773
|
+
patchInstance(instance) {
|
|
774
|
+
if (!instance.messages?.create) {
|
|
775
|
+
return;
|
|
776
|
+
}
|
|
777
|
+
const instrumentation = this;
|
|
778
|
+
const original = instance.messages.create.bind(instance.messages);
|
|
779
|
+
instance.messages.create = async function (params, options) {
|
|
780
|
+
return instrumentation.tracedCreate(original, params, options);
|
|
781
|
+
};
|
|
782
|
+
}
|
|
783
|
+
/**
|
|
784
|
+
* Wrap a create call with tracing
|
|
785
|
+
*/
|
|
786
|
+
async tracedCreate(original, params, options) {
|
|
787
|
+
const messages = params.messages;
|
|
788
|
+
const model = params.model;
|
|
789
|
+
const systemPrompt = this.extractSystemPrompt(params.system);
|
|
790
|
+
const isStreaming = params.stream === true;
|
|
791
|
+
// Normalize messages
|
|
792
|
+
const normalizedMessages = normalizeAnthropicMessages(messages, systemPrompt);
|
|
793
|
+
// Create span attributes
|
|
794
|
+
const attributes = this.createSpanAttributes(normalizedMessages, model, systemPrompt);
|
|
795
|
+
// Add streaming flag
|
|
796
|
+
attributes['llm.request.streaming'] = isStreaming;
|
|
797
|
+
// Add max tokens if specified
|
|
798
|
+
if (params.max_tokens) {
|
|
799
|
+
attributes['llm.request.max_tokens'] = params.max_tokens;
|
|
800
|
+
}
|
|
801
|
+
// Create span
|
|
802
|
+
const span = this.tracer.startSpan('anthropic.messages.create', {
|
|
803
|
+
attributes,
|
|
804
|
+
});
|
|
805
|
+
try {
|
|
806
|
+
if (isStreaming) {
|
|
807
|
+
return await this.handleStreamingResponse(original, params, options, span);
|
|
808
|
+
}
|
|
809
|
+
else {
|
|
810
|
+
return await this.handleNonStreamingResponse(original, params, options, span);
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
catch (error) {
|
|
814
|
+
this.setSpanError(span, error);
|
|
815
|
+
throw error;
|
|
816
|
+
}
|
|
817
|
+
finally {
|
|
818
|
+
span.end();
|
|
819
|
+
}
|
|
820
|
+
}
|
|
821
|
+
/**
|
|
822
|
+
* Extract system prompt from Anthropic's system parameter
|
|
823
|
+
* Can be a string or array of content blocks
|
|
824
|
+
*/
|
|
825
|
+
extractSystemPrompt(system) {
|
|
826
|
+
if (!system)
|
|
827
|
+
return undefined;
|
|
828
|
+
if (typeof system === 'string') {
|
|
829
|
+
return system;
|
|
830
|
+
}
|
|
831
|
+
if (Array.isArray(system)) {
|
|
832
|
+
return system
|
|
833
|
+
.filter((block) => block.type === 'text')
|
|
834
|
+
.map((block) => block.text)
|
|
835
|
+
.join('\n');
|
|
836
|
+
}
|
|
837
|
+
return undefined;
|
|
838
|
+
}
|
|
839
|
+
/**
|
|
840
|
+
* Handle non-streaming response
|
|
841
|
+
*/
|
|
842
|
+
async handleNonStreamingResponse(original, params, options, span) {
|
|
843
|
+
const response = await original(params, options);
|
|
844
|
+
// Extract completion content
|
|
845
|
+
const content = this.extractResponseContent(response.content);
|
|
846
|
+
this.addCompletionAttributes(span, 'assistant', content);
|
|
847
|
+
// Add stop reason
|
|
848
|
+
if (response.stop_reason) {
|
|
849
|
+
span.setAttribute('llm.response.finish_reason', response.stop_reason);
|
|
850
|
+
}
|
|
851
|
+
// Add model from response
|
|
852
|
+
if (response.model) {
|
|
853
|
+
span.setAttribute('llm.response.model', response.model);
|
|
854
|
+
}
|
|
855
|
+
// Add usage metrics (Anthropic uses input_tokens/output_tokens)
|
|
856
|
+
this.addUsageAttributes(span, response.usage);
|
|
857
|
+
this.setSpanSuccess(span);
|
|
858
|
+
return response;
|
|
859
|
+
}
|
|
860
|
+
/**
|
|
861
|
+
* Handle streaming response
|
|
862
|
+
*/
|
|
863
|
+
async handleStreamingResponse(original, params, options, span) {
|
|
864
|
+
const stream = await original(params, options);
|
|
865
|
+
const instrumentation = this;
|
|
866
|
+
// Wrap the stream to capture content
|
|
867
|
+
let fullContent = '';
|
|
868
|
+
let stopReason = null;
|
|
869
|
+
let model = null;
|
|
870
|
+
let usage = null;
|
|
871
|
+
// Create async iterator wrapper
|
|
872
|
+
const wrappedStream = {
|
|
873
|
+
[Symbol.asyncIterator]: async function* () {
|
|
874
|
+
try {
|
|
875
|
+
for await (const event of stream) {
|
|
876
|
+
// Handle different event types
|
|
877
|
+
switch (event.type) {
|
|
878
|
+
case 'message_start':
|
|
879
|
+
if (event.message?.model) {
|
|
880
|
+
model = event.message.model;
|
|
881
|
+
}
|
|
882
|
+
if (event.message?.usage) {
|
|
883
|
+
usage = { ...usage, ...event.message.usage };
|
|
884
|
+
}
|
|
885
|
+
break;
|
|
886
|
+
case 'content_block_delta':
|
|
887
|
+
if (event.delta?.type === 'text_delta' && event.delta?.text) {
|
|
888
|
+
fullContent += event.delta.text;
|
|
889
|
+
}
|
|
890
|
+
break;
|
|
891
|
+
case 'message_delta':
|
|
892
|
+
if (event.delta?.stop_reason) {
|
|
893
|
+
stopReason = event.delta.stop_reason;
|
|
894
|
+
}
|
|
895
|
+
if (event.usage) {
|
|
896
|
+
usage = { ...usage, ...event.usage };
|
|
897
|
+
}
|
|
898
|
+
break;
|
|
899
|
+
}
|
|
900
|
+
yield event;
|
|
901
|
+
}
|
|
902
|
+
// After stream completes, add attributes
|
|
903
|
+
instrumentation.addCompletionAttributes(span, 'assistant', fullContent);
|
|
904
|
+
if (stopReason) {
|
|
905
|
+
span.setAttribute('llm.response.finish_reason', stopReason);
|
|
906
|
+
}
|
|
907
|
+
if (model) {
|
|
908
|
+
span.setAttribute('llm.response.model', model);
|
|
909
|
+
}
|
|
910
|
+
if (usage) {
|
|
911
|
+
instrumentation.addUsageAttributes(span, usage);
|
|
912
|
+
}
|
|
913
|
+
instrumentation.setSpanSuccess(span);
|
|
914
|
+
}
|
|
915
|
+
catch (error) {
|
|
916
|
+
instrumentation.setSpanError(span, error);
|
|
917
|
+
throw error;
|
|
918
|
+
}
|
|
919
|
+
},
|
|
920
|
+
// Forward other stream methods
|
|
921
|
+
...stream,
|
|
922
|
+
};
|
|
923
|
+
return wrappedStream;
|
|
924
|
+
}
|
|
925
|
+
/**
|
|
926
|
+
* Extract text content from Anthropic response content blocks
|
|
927
|
+
*/
|
|
928
|
+
extractResponseContent(content) {
|
|
929
|
+
if (!content || !Array.isArray(content)) {
|
|
930
|
+
return '';
|
|
931
|
+
}
|
|
932
|
+
return content
|
|
933
|
+
.filter((block) => block.type === 'text')
|
|
934
|
+
.map((block) => block.text || '')
|
|
935
|
+
.join('\n');
|
|
936
|
+
}
|
|
937
|
+
/**
|
|
938
|
+
* Unpatch the Anthropic client
|
|
939
|
+
*/
|
|
940
|
+
unpatch() {
|
|
941
|
+
if (!patched) {
|
|
942
|
+
return;
|
|
943
|
+
}
|
|
944
|
+
try {
|
|
945
|
+
const anthropicModule = require('@anthropic-ai/sdk');
|
|
946
|
+
const Anthropic = anthropicModule.default || anthropicModule;
|
|
947
|
+
if (originalMessagesCreate && Anthropic.Messages?.prototype) {
|
|
948
|
+
Anthropic.Messages.prototype.create = originalMessagesCreate;
|
|
949
|
+
}
|
|
950
|
+
patched = false;
|
|
951
|
+
originalMessagesCreate = null;
|
|
952
|
+
this.debug('Unpatched successfully');
|
|
953
|
+
}
|
|
954
|
+
catch {
|
|
955
|
+
// Ignore
|
|
956
|
+
}
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
// Export singleton instance
|
|
960
|
+
const anthropicInstrumentation = new AnthropicInstrumentation();
|
|
961
|
+
|
|
962
|
+
/**
|
|
963
|
+
* List of all available instrumentations
|
|
964
|
+
*/
|
|
965
|
+
const instrumentations = [
|
|
966
|
+
openAIInstrumentation,
|
|
967
|
+
anthropicInstrumentation,
|
|
968
|
+
];
|
|
969
|
+
/**
|
|
970
|
+
* Register all LLM client instrumentations.
|
|
971
|
+
* This is called automatically by Moda.init()
|
|
972
|
+
*/
|
|
973
|
+
function registerInstrumentations() {
|
|
974
|
+
for (const instrumentation of instrumentations) {
|
|
975
|
+
try {
|
|
976
|
+
instrumentation.patch();
|
|
977
|
+
}
|
|
978
|
+
catch (error) {
|
|
979
|
+
// Individual instrumentation failures should not break the SDK
|
|
980
|
+
console.warn(`[Moda] Failed to register instrumentation:`, error);
|
|
981
|
+
}
|
|
982
|
+
}
|
|
983
|
+
}
|
|
984
|
+
|
|
985
|
+
let provider = null;
|
|
986
|
+
let exporter = null;
|
|
987
|
+
/**
|
|
988
|
+
* Check if the SDK is initialized
|
|
989
|
+
*/
|
|
990
|
+
function isInitialized() {
|
|
991
|
+
return state.initialized;
|
|
992
|
+
}
|
|
993
|
+
/**
|
|
994
|
+
* Initialize the Moda SDK.
|
|
995
|
+
*
|
|
996
|
+
* This sets up OpenTelemetry tracing with an OTLP exporter pointed at Moda's
|
|
997
|
+
* ingestion endpoint and registers instrumentations for OpenAI and Anthropic.
|
|
998
|
+
*
|
|
999
|
+
* @param apiKey - Your Moda API key (format: moda_xxx)
|
|
1000
|
+
* @param options - Configuration options
|
|
1001
|
+
*
|
|
1002
|
+
* @example
|
|
1003
|
+
* ```typescript
|
|
1004
|
+
* import { Moda } from '@moda/sdk';
|
|
1005
|
+
*
|
|
1006
|
+
* Moda.init('moda_your_api_key', {
|
|
1007
|
+
* environment: 'production',
|
|
1008
|
+
* debug: false,
|
|
1009
|
+
* });
|
|
1010
|
+
* ```
|
|
1011
|
+
*/
|
|
1012
|
+
function init(apiKey, options = {}) {
|
|
1013
|
+
if (state.initialized) {
|
|
1014
|
+
if (state.options.debug) {
|
|
1015
|
+
console.warn('[Moda] SDK already initialized. Call shutdown() before re-initializing.');
|
|
1016
|
+
}
|
|
1017
|
+
return;
|
|
1018
|
+
}
|
|
1019
|
+
if (!apiKey || typeof apiKey !== 'string') {
|
|
1020
|
+
throw new Error('[Moda] API key is required');
|
|
1021
|
+
}
|
|
1022
|
+
// Merge options with defaults
|
|
1023
|
+
const mergedOptions = {
|
|
1024
|
+
...DEFAULT_OPTIONS,
|
|
1025
|
+
...options,
|
|
1026
|
+
};
|
|
1027
|
+
state.apiKey = apiKey;
|
|
1028
|
+
setStateOptions(mergedOptions);
|
|
1029
|
+
if (!mergedOptions.enabled) {
|
|
1030
|
+
if (mergedOptions.debug) {
|
|
1031
|
+
console.log('[Moda] SDK disabled via options');
|
|
1032
|
+
}
|
|
1033
|
+
state.initialized = true;
|
|
1034
|
+
return;
|
|
1035
|
+
}
|
|
1036
|
+
// Create resource with service info
|
|
1037
|
+
const resource = new resources.Resource({
|
|
1038
|
+
[semanticConventions.ATTR_SERVICE_NAME]: 'moda-sdk',
|
|
1039
|
+
[semanticConventions.ATTR_SERVICE_VERSION]: '0.1.0',
|
|
1040
|
+
'moda.environment': mergedOptions.environment,
|
|
1041
|
+
});
|
|
1042
|
+
// Create OTLP exporter with Moda API key in headers
|
|
1043
|
+
exporter = new exporterTraceOtlpProto.OTLPTraceExporter({
|
|
1044
|
+
url: mergedOptions.baseUrl,
|
|
1045
|
+
headers: {
|
|
1046
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
1047
|
+
'Content-Type': 'application/x-protobuf',
|
|
1048
|
+
},
|
|
1049
|
+
});
|
|
1050
|
+
// Create tracer provider
|
|
1051
|
+
provider = new sdkTraceNode.NodeTracerProvider({
|
|
1052
|
+
resource,
|
|
1053
|
+
});
|
|
1054
|
+
// Use BatchSpanProcessor for production, SimpleSpanProcessor for debug
|
|
1055
|
+
const processor = mergedOptions.debug
|
|
1056
|
+
? new sdkTraceBase.SimpleSpanProcessor(exporter)
|
|
1057
|
+
: new sdkTraceBase.BatchSpanProcessor(exporter, {
|
|
1058
|
+
maxQueueSize: mergedOptions.batchSize * 2,
|
|
1059
|
+
maxExportBatchSize: mergedOptions.batchSize,
|
|
1060
|
+
scheduledDelayMillis: mergedOptions.flushInterval,
|
|
1061
|
+
});
|
|
1062
|
+
provider.addSpanProcessor(processor);
|
|
1063
|
+
provider.register();
|
|
1064
|
+
// Register LLM instrumentations
|
|
1065
|
+
registerInstrumentations();
|
|
1066
|
+
state.initialized = true;
|
|
1067
|
+
if (mergedOptions.debug) {
|
|
1068
|
+
console.log('[Moda] SDK initialized successfully');
|
|
1069
|
+
console.log(`[Moda] Endpoint: ${mergedOptions.baseUrl}`);
|
|
1070
|
+
console.log(`[Moda] Environment: ${mergedOptions.environment}`);
|
|
1071
|
+
}
|
|
1072
|
+
}
|
|
1073
|
+
/**
|
|
1074
|
+
* Force flush all pending spans to the Moda backend.
|
|
1075
|
+
* Call this before your application exits to ensure all telemetry is sent.
|
|
1076
|
+
*
|
|
1077
|
+
* @example
|
|
1078
|
+
* ```typescript
|
|
1079
|
+
* // Before shutting down
|
|
1080
|
+
* await Moda.flush();
|
|
1081
|
+
* process.exit(0);
|
|
1082
|
+
* ```
|
|
1083
|
+
*/
|
|
1084
|
+
async function flush() {
|
|
1085
|
+
if (!state.initialized || !provider) {
|
|
1086
|
+
return;
|
|
1087
|
+
}
|
|
1088
|
+
try {
|
|
1089
|
+
await provider.forceFlush();
|
|
1090
|
+
if (state.options.debug) {
|
|
1091
|
+
console.log('[Moda] Flushed all pending spans');
|
|
1092
|
+
}
|
|
1093
|
+
}
|
|
1094
|
+
catch (error) {
|
|
1095
|
+
if (state.options.debug) {
|
|
1096
|
+
console.error('[Moda] Error flushing spans:', error);
|
|
1097
|
+
}
|
|
1098
|
+
throw error;
|
|
1099
|
+
}
|
|
1100
|
+
}
|
|
1101
|
+
/**
|
|
1102
|
+
* Shutdown the SDK and release all resources.
|
|
1103
|
+
* This flushes any pending spans and stops the tracer provider.
|
|
1104
|
+
*
|
|
1105
|
+
* @example
|
|
1106
|
+
* ```typescript
|
|
1107
|
+
* process.on('SIGTERM', async () => {
|
|
1108
|
+
* await Moda.shutdown();
|
|
1109
|
+
* process.exit(0);
|
|
1110
|
+
* });
|
|
1111
|
+
* ```
|
|
1112
|
+
*/
|
|
1113
|
+
async function shutdown() {
|
|
1114
|
+
if (!state.initialized) {
|
|
1115
|
+
return;
|
|
1116
|
+
}
|
|
1117
|
+
try {
|
|
1118
|
+
if (provider) {
|
|
1119
|
+
await provider.shutdown();
|
|
1120
|
+
}
|
|
1121
|
+
if (state.options.debug) {
|
|
1122
|
+
console.log('[Moda] SDK shutdown complete');
|
|
1123
|
+
}
|
|
1124
|
+
}
|
|
1125
|
+
catch (error) {
|
|
1126
|
+
if (state.options.debug) {
|
|
1127
|
+
console.error('[Moda] Error during shutdown:', error);
|
|
1128
|
+
}
|
|
1129
|
+
throw error;
|
|
1130
|
+
}
|
|
1131
|
+
finally {
|
|
1132
|
+
resetState();
|
|
1133
|
+
provider = null;
|
|
1134
|
+
exporter = null;
|
|
1135
|
+
}
|
|
1136
|
+
}
|
|
1137
|
+
/**
|
|
1138
|
+
* Get the OpenTelemetry tracer for creating custom spans.
|
|
1139
|
+
* Returns a no-op tracer if the SDK is not initialized.
|
|
1140
|
+
*/
|
|
1141
|
+
function getTracer() {
|
|
1142
|
+
return api.trace.getTracer('moda-sdk', '0.1.0');
|
|
1143
|
+
}
|
|
1144
|
+
|
|
1145
|
+
/**
|
|
1146
|
+
* @moda/sdk - Official TypeScript/Node.js SDK for Moda LLM observability
|
|
1147
|
+
*
|
|
1148
|
+
* @example
|
|
1149
|
+
* ```typescript
|
|
1150
|
+
* import { Moda } from '@moda/sdk';
|
|
1151
|
+
*
|
|
1152
|
+
* // Initialize the SDK
|
|
1153
|
+
* Moda.init('moda_your_api_key');
|
|
1154
|
+
*
|
|
1155
|
+
* // All OpenAI/Anthropic calls are now automatically tracked
|
|
1156
|
+
* const openai = new OpenAI();
|
|
1157
|
+
* await openai.chat.completions.create({ ... });
|
|
1158
|
+
*
|
|
1159
|
+
* // Flush before exit
|
|
1160
|
+
* await Moda.flush();
|
|
1161
|
+
* ```
|
|
1162
|
+
*/
|
|
1163
|
+
// Core SDK functions
|
|
1164
|
+
/**
|
|
1165
|
+
* Main Moda SDK object with all public methods
|
|
1166
|
+
*/
|
|
1167
|
+
const Moda = {
|
|
1168
|
+
/**
|
|
1169
|
+
* Initialize the Moda SDK with your API key
|
|
1170
|
+
* @see {@link init}
|
|
1171
|
+
*/
|
|
1172
|
+
init,
|
|
1173
|
+
/**
|
|
1174
|
+
* Force flush all pending spans to the Moda backend
|
|
1175
|
+
* @see {@link flush}
|
|
1176
|
+
*/
|
|
1177
|
+
flush,
|
|
1178
|
+
/**
|
|
1179
|
+
* Shutdown the SDK and release all resources
|
|
1180
|
+
* @see {@link shutdown}
|
|
1181
|
+
*/
|
|
1182
|
+
shutdown,
|
|
1183
|
+
/**
|
|
1184
|
+
* Check if the SDK is initialized
|
|
1185
|
+
* @see {@link isInitialized}
|
|
1186
|
+
*/
|
|
1187
|
+
isInitialized,
|
|
1188
|
+
/**
|
|
1189
|
+
* Set a global conversation ID for subsequent LLM calls
|
|
1190
|
+
* @see {@link setConversationId}
|
|
1191
|
+
*/
|
|
1192
|
+
setConversationId,
|
|
1193
|
+
/**
|
|
1194
|
+
* Clear the global conversation ID
|
|
1195
|
+
* @see {@link clearConversationId}
|
|
1196
|
+
*/
|
|
1197
|
+
clearConversationId,
|
|
1198
|
+
/**
|
|
1199
|
+
* Set a global user ID for subsequent LLM calls
|
|
1200
|
+
* @see {@link setUserId}
|
|
1201
|
+
*/
|
|
1202
|
+
setUserId,
|
|
1203
|
+
/**
|
|
1204
|
+
* Clear the global user ID
|
|
1205
|
+
* @see {@link clearUserId}
|
|
1206
|
+
*/
|
|
1207
|
+
clearUserId,
|
|
1208
|
+
/**
|
|
1209
|
+
* Get the OpenTelemetry tracer for custom spans
|
|
1210
|
+
* @see {@link getTracer}
|
|
1211
|
+
*/
|
|
1212
|
+
getTracer,
|
|
1213
|
+
/**
|
|
1214
|
+
* Get or set the global conversation ID.
|
|
1215
|
+
* Setting to null clears the conversation ID.
|
|
1216
|
+
*
|
|
1217
|
+
* @example
|
|
1218
|
+
* ```typescript
|
|
1219
|
+
* Moda.conversationId = 'session_123';
|
|
1220
|
+
* await client.chat.completions.create({...});
|
|
1221
|
+
* Moda.conversationId = null; // clear
|
|
1222
|
+
* ```
|
|
1223
|
+
*/
|
|
1224
|
+
get conversationId() {
|
|
1225
|
+
return getGlobalContext().conversationId ?? null;
|
|
1226
|
+
},
|
|
1227
|
+
set conversationId(id) {
|
|
1228
|
+
if (id) {
|
|
1229
|
+
setConversationId(id);
|
|
1230
|
+
}
|
|
1231
|
+
else {
|
|
1232
|
+
clearConversationId();
|
|
1233
|
+
}
|
|
1234
|
+
},
|
|
1235
|
+
/**
|
|
1236
|
+
* Get or set the global user ID.
|
|
1237
|
+
* Setting to null clears the user ID.
|
|
1238
|
+
*
|
|
1239
|
+
* @example
|
|
1240
|
+
* ```typescript
|
|
1241
|
+
* Moda.userId = 'user_456';
|
|
1242
|
+
* await client.chat.completions.create({...});
|
|
1243
|
+
* Moda.userId = null; // clear
|
|
1244
|
+
* ```
|
|
1245
|
+
*/
|
|
1246
|
+
get userId() {
|
|
1247
|
+
return getGlobalContext().userId ?? null;
|
|
1248
|
+
},
|
|
1249
|
+
set userId(id) {
|
|
1250
|
+
if (id) {
|
|
1251
|
+
setUserId(id);
|
|
1252
|
+
}
|
|
1253
|
+
else {
|
|
1254
|
+
clearUserId();
|
|
1255
|
+
}
|
|
1256
|
+
},
|
|
1257
|
+
};
|
|
1258
|
+
|
|
1259
|
+
exports.DEFAULT_OPTIONS = DEFAULT_OPTIONS;
|
|
1260
|
+
exports.Moda = Moda;
|
|
1261
|
+
exports.clearConversationId = clearConversationId;
|
|
1262
|
+
exports.clearUserId = clearUserId;
|
|
1263
|
+
exports.computeConversationId = computeConversationId;
|
|
1264
|
+
exports.default = Moda;
|
|
1265
|
+
exports.flush = flush;
|
|
1266
|
+
exports.generateRandomConversationId = generateRandomConversationId;
|
|
1267
|
+
exports.getContext = getContext;
|
|
1268
|
+
exports.getEffectiveContext = getEffectiveContext;
|
|
1269
|
+
exports.getGlobalContext = getGlobalContext;
|
|
1270
|
+
exports.getTracer = getTracer;
|
|
1271
|
+
exports.init = init;
|
|
1272
|
+
exports.isInitialized = isInitialized;
|
|
1273
|
+
exports.isValidConversationId = isValidConversationId;
|
|
1274
|
+
exports.setConversationId = setConversationId;
|
|
1275
|
+
exports.setUserId = setUserId;
|
|
1276
|
+
exports.shutdown = shutdown;
|
|
1277
|
+
exports.withContext = withContext;
|
|
1278
|
+
exports.withConversationId = withConversationId;
|
|
1279
|
+
exports.withUserId = withUserId;
|
|
1280
|
+
//# sourceMappingURL=index.cjs.map
|