observa-sdk 0.0.8 → 0.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +300 -11
- package/dist/index.cjs +658 -7
- package/dist/index.d.cts +181 -3
- package/dist/index.d.ts +181 -3
- package/dist/index.js +663 -7
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -24,6 +24,389 @@ __export(index_exports, {
|
|
|
24
24
|
init: () => init
|
|
25
25
|
});
|
|
26
26
|
module.exports = __toCommonJS(index_exports);
|
|
27
|
+
|
|
28
|
+
// src/instrumentation/utils.ts
|
|
29
|
+
function estimateTokens(text) {
|
|
30
|
+
return Math.ceil(text.length / 4);
|
|
31
|
+
}
|
|
32
|
+
function reconstructOpenAIResponse(chunks) {
|
|
33
|
+
if (chunks.length === 0) {
|
|
34
|
+
return null;
|
|
35
|
+
}
|
|
36
|
+
const firstChunk = chunks[0];
|
|
37
|
+
const lastChunk = chunks[chunks.length - 1];
|
|
38
|
+
let fullContent = "";
|
|
39
|
+
const messages = [];
|
|
40
|
+
for (const chunk of chunks) {
|
|
41
|
+
if (chunk?.choices?.[0]?.delta?.content) {
|
|
42
|
+
fullContent += chunk.choices[0].delta.content;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
const response = {
|
|
46
|
+
id: lastChunk?.id || firstChunk?.id || null,
|
|
47
|
+
model: lastChunk?.model || firstChunk?.model || null,
|
|
48
|
+
object: "chat.completion",
|
|
49
|
+
created: lastChunk?.created || firstChunk?.created || Math.floor(Date.now() / 1e3),
|
|
50
|
+
choices: [
|
|
51
|
+
{
|
|
52
|
+
index: 0,
|
|
53
|
+
message: {
|
|
54
|
+
role: "assistant",
|
|
55
|
+
content: fullContent
|
|
56
|
+
},
|
|
57
|
+
finish_reason: lastChunk?.choices?.[0]?.finish_reason || null
|
|
58
|
+
}
|
|
59
|
+
],
|
|
60
|
+
usage: lastChunk?.usage || null
|
|
61
|
+
// Usage info typically in last chunk
|
|
62
|
+
};
|
|
63
|
+
return response;
|
|
64
|
+
}
|
|
65
|
+
function reconstructAnthropicResponse(chunks) {
|
|
66
|
+
if (chunks.length === 0) {
|
|
67
|
+
return null;
|
|
68
|
+
}
|
|
69
|
+
const lastChunk = chunks[chunks.length - 1];
|
|
70
|
+
let fullContent = "";
|
|
71
|
+
for (const chunk of chunks) {
|
|
72
|
+
if (chunk?.type === "content_block_delta" && chunk?.delta?.text) {
|
|
73
|
+
fullContent += chunk.delta.text;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
return {
|
|
77
|
+
id: lastChunk?.id || null,
|
|
78
|
+
model: lastChunk?.model || null,
|
|
79
|
+
type: "message",
|
|
80
|
+
role: "assistant",
|
|
81
|
+
content: [{ type: "text", text: fullContent }],
|
|
82
|
+
stop_reason: lastChunk?.stop_reason || null,
|
|
83
|
+
stop_sequence: lastChunk?.stop_sequence || null,
|
|
84
|
+
usage: lastChunk?.usage || null
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
async function* wrapStream(stream, onComplete, onError, provider = "openai") {
|
|
88
|
+
let firstTokenTime = null;
|
|
89
|
+
const chunks = [];
|
|
90
|
+
let tokenCount = 0;
|
|
91
|
+
const streamStartTime = Date.now();
|
|
92
|
+
try {
|
|
93
|
+
for await (const chunk of stream) {
|
|
94
|
+
if (firstTokenTime === null) {
|
|
95
|
+
firstTokenTime = Date.now();
|
|
96
|
+
}
|
|
97
|
+
if (provider === "openai") {
|
|
98
|
+
const content = chunk?.choices?.[0]?.delta?.content;
|
|
99
|
+
if (content && typeof content === "string") {
|
|
100
|
+
tokenCount += estimateTokens(content);
|
|
101
|
+
}
|
|
102
|
+
} else if (provider === "anthropic") {
|
|
103
|
+
const text = chunk?.delta?.text;
|
|
104
|
+
if (text && typeof text === "string") {
|
|
105
|
+
tokenCount += estimateTokens(text);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
chunks.push(chunk);
|
|
109
|
+
yield chunk;
|
|
110
|
+
}
|
|
111
|
+
const fullResponse = provider === "openai" ? reconstructOpenAIResponse(chunks) : reconstructAnthropicResponse(chunks);
|
|
112
|
+
Promise.resolve().then(() => {
|
|
113
|
+
try {
|
|
114
|
+
onComplete({
|
|
115
|
+
...fullResponse,
|
|
116
|
+
timeToFirstToken: firstTokenTime ? firstTokenTime - streamStartTime : null,
|
|
117
|
+
streamingDuration: firstTokenTime ? Date.now() - firstTokenTime : null,
|
|
118
|
+
estimatedTokenCount: tokenCount,
|
|
119
|
+
totalLatency: Date.now() - streamStartTime
|
|
120
|
+
});
|
|
121
|
+
} catch (e) {
|
|
122
|
+
onError(e);
|
|
123
|
+
}
|
|
124
|
+
}).catch((e) => {
|
|
125
|
+
onError(e);
|
|
126
|
+
});
|
|
127
|
+
} catch (error) {
|
|
128
|
+
onError(error);
|
|
129
|
+
throw error;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
// src/instrumentation/semconv.ts
|
|
134
|
+
var OTEL_SEMCONV = {
|
|
135
|
+
// Standard GenAI Attributes (Snake Case)
|
|
136
|
+
GEN_AI_SYSTEM: "gen_ai.system",
|
|
137
|
+
GEN_AI_REQUEST_MODEL: "gen_ai.request.model",
|
|
138
|
+
GEN_AI_RESPONSE_MODEL: "gen_ai.response.model",
|
|
139
|
+
GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
|
|
140
|
+
GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
|
|
141
|
+
GEN_AI_FINISH_REASONS: "gen_ai.response.finish_reasons",
|
|
142
|
+
// Observa Internal Attributes
|
|
143
|
+
OBSERVA_TRACE_ID: "observa.trace_id",
|
|
144
|
+
OBSERVA_SPAN_ID: "observa.span_id",
|
|
145
|
+
OBSERVA_VERSION: "observa.sdk_version"
|
|
146
|
+
};
|
|
147
|
+
function mapOpenAIToOTEL(request, response) {
|
|
148
|
+
return {
|
|
149
|
+
[OTEL_SEMCONV.GEN_AI_SYSTEM]: "openai",
|
|
150
|
+
[OTEL_SEMCONV.GEN_AI_REQUEST_MODEL]: request.model,
|
|
151
|
+
[OTEL_SEMCONV.GEN_AI_RESPONSE_MODEL]: response?.model || request.model,
|
|
152
|
+
[OTEL_SEMCONV.GEN_AI_USAGE_INPUT_TOKENS]: response?.usage?.prompt_tokens,
|
|
153
|
+
[OTEL_SEMCONV.GEN_AI_USAGE_OUTPUT_TOKENS]: response?.usage?.completion_tokens,
|
|
154
|
+
[OTEL_SEMCONV.GEN_AI_FINISH_REASONS]: response?.choices?.map((c) => c.finish_reason)
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// src/context.ts
|
|
159
|
+
var traceContextStorage = null;
|
|
160
|
+
var edgeContextMap = null;
|
|
161
|
+
try {
|
|
162
|
+
const { AsyncLocalStorage } = require("async_hooks");
|
|
163
|
+
traceContextStorage = new AsyncLocalStorage();
|
|
164
|
+
} catch {
|
|
165
|
+
edgeContextMap = /* @__PURE__ */ new WeakMap();
|
|
166
|
+
}
|
|
167
|
+
function getTraceContext() {
|
|
168
|
+
try {
|
|
169
|
+
if (traceContextStorage) {
|
|
170
|
+
return traceContextStorage.getStore();
|
|
171
|
+
}
|
|
172
|
+
return void 0;
|
|
173
|
+
} catch {
|
|
174
|
+
return void 0;
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// src/instrumentation/openai.ts
|
|
179
|
+
var proxyCache = /* @__PURE__ */ new WeakMap();
|
|
180
|
+
function observeOpenAI(client, options) {
|
|
181
|
+
if (proxyCache.has(client)) {
|
|
182
|
+
return proxyCache.get(client);
|
|
183
|
+
}
|
|
184
|
+
try {
|
|
185
|
+
const wrapped = new Proxy(client, {
|
|
186
|
+
get(target, prop, receiver) {
|
|
187
|
+
const value = Reflect.get(target, prop, receiver);
|
|
188
|
+
if (typeof value === "object" && value !== null) {
|
|
189
|
+
if (prop === "prototype" || prop === "constructor") {
|
|
190
|
+
return value;
|
|
191
|
+
}
|
|
192
|
+
return observeOpenAI(value, options);
|
|
193
|
+
}
|
|
194
|
+
if (typeof value === "function" && prop === "create") {
|
|
195
|
+
return async function(...args) {
|
|
196
|
+
return traceOpenAICall(value.bind(target), args, options);
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
return value;
|
|
200
|
+
}
|
|
201
|
+
});
|
|
202
|
+
proxyCache.set(client, wrapped);
|
|
203
|
+
return wrapped;
|
|
204
|
+
} catch (error) {
|
|
205
|
+
console.error("[Observa] Failed to wrap OpenAI client:", error);
|
|
206
|
+
return client;
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
async function traceOpenAICall(originalFn, args, options) {
|
|
210
|
+
const startTime = Date.now();
|
|
211
|
+
const requestParams = args[0] || {};
|
|
212
|
+
const isStreaming = requestParams.stream === true;
|
|
213
|
+
try {
|
|
214
|
+
const result = await originalFn(...args);
|
|
215
|
+
if (isStreaming) {
|
|
216
|
+
return wrapStream(
|
|
217
|
+
result,
|
|
218
|
+
(fullResponse) => {
|
|
219
|
+
recordTrace(
|
|
220
|
+
requestParams,
|
|
221
|
+
fullResponse,
|
|
222
|
+
startTime,
|
|
223
|
+
options,
|
|
224
|
+
fullResponse.timeToFirstToken,
|
|
225
|
+
fullResponse.streamingDuration
|
|
226
|
+
);
|
|
227
|
+
},
|
|
228
|
+
(err) => recordError(requestParams, err, startTime, options),
|
|
229
|
+
"openai"
|
|
230
|
+
);
|
|
231
|
+
} else {
|
|
232
|
+
recordTrace(requestParams, result, startTime, options);
|
|
233
|
+
return result;
|
|
234
|
+
}
|
|
235
|
+
} catch (error) {
|
|
236
|
+
recordError(requestParams, error, startTime, options);
|
|
237
|
+
throw error;
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration) {
|
|
241
|
+
const duration = Date.now() - start;
|
|
242
|
+
try {
|
|
243
|
+
const context = getTraceContext();
|
|
244
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
245
|
+
const sanitizedRes = opts?.redact ? opts.redact(res) : res;
|
|
246
|
+
const otelAttributes = mapOpenAIToOTEL(sanitizedReq, sanitizedRes);
|
|
247
|
+
if (opts?.observa) {
|
|
248
|
+
const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
|
|
249
|
+
const outputText = sanitizedRes?.choices?.[0]?.message?.content || null;
|
|
250
|
+
opts.observa.trackLLMCall({
|
|
251
|
+
model: sanitizedReq.model || sanitizedRes?.model || "unknown",
|
|
252
|
+
input: inputText,
|
|
253
|
+
output: outputText,
|
|
254
|
+
inputMessages: sanitizedReq.messages || null,
|
|
255
|
+
outputMessages: sanitizedRes?.choices?.map((c) => c.message) || null,
|
|
256
|
+
inputTokens: sanitizedRes?.usage?.prompt_tokens || null,
|
|
257
|
+
outputTokens: sanitizedRes?.usage?.completion_tokens || null,
|
|
258
|
+
totalTokens: sanitizedRes?.usage?.total_tokens || null,
|
|
259
|
+
latencyMs: duration,
|
|
260
|
+
timeToFirstTokenMs: timeToFirstToken || null,
|
|
261
|
+
streamingDurationMs: streamingDuration || null,
|
|
262
|
+
finishReason: sanitizedRes?.choices?.[0]?.finish_reason || null,
|
|
263
|
+
responseId: sanitizedRes?.id || null,
|
|
264
|
+
operationName: "chat",
|
|
265
|
+
providerName: "openai",
|
|
266
|
+
responseModel: sanitizedRes?.model || sanitizedReq.model || null,
|
|
267
|
+
temperature: sanitizedReq.temperature || null,
|
|
268
|
+
maxTokens: sanitizedReq.max_tokens || null
|
|
269
|
+
});
|
|
270
|
+
}
|
|
271
|
+
} catch (e) {
|
|
272
|
+
console.error("[Observa] Failed to record trace", e);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
function recordError(req, error, start, opts) {
|
|
276
|
+
try {
|
|
277
|
+
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
278
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
279
|
+
if (opts?.observa) {
|
|
280
|
+
opts.observa.trackError({
|
|
281
|
+
errorType: "openai_api_error",
|
|
282
|
+
errorMessage: error?.message || String(error),
|
|
283
|
+
stackTrace: error?.stack || null,
|
|
284
|
+
context: { request: sanitizedReq }
|
|
285
|
+
});
|
|
286
|
+
}
|
|
287
|
+
} catch (e) {
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
// src/instrumentation/anthropic.ts
|
|
292
|
+
var proxyCache2 = /* @__PURE__ */ new WeakMap();
|
|
293
|
+
function observeAnthropic(client, options) {
|
|
294
|
+
if (proxyCache2.has(client)) {
|
|
295
|
+
return proxyCache2.get(client);
|
|
296
|
+
}
|
|
297
|
+
try {
|
|
298
|
+
const wrapped = new Proxy(client, {
|
|
299
|
+
get(target, prop, receiver) {
|
|
300
|
+
const value = Reflect.get(target, prop, receiver);
|
|
301
|
+
if (typeof value === "object" && value !== null) {
|
|
302
|
+
if (prop === "prototype" || prop === "constructor") {
|
|
303
|
+
return value;
|
|
304
|
+
}
|
|
305
|
+
return observeAnthropic(value, options);
|
|
306
|
+
}
|
|
307
|
+
if (typeof value === "function" && prop === "create") {
|
|
308
|
+
return async function(...args) {
|
|
309
|
+
return traceAnthropicCall(value.bind(target), args, options);
|
|
310
|
+
};
|
|
311
|
+
}
|
|
312
|
+
return value;
|
|
313
|
+
}
|
|
314
|
+
});
|
|
315
|
+
proxyCache2.set(client, wrapped);
|
|
316
|
+
return wrapped;
|
|
317
|
+
} catch (error) {
|
|
318
|
+
console.error("[Observa] Failed to wrap Anthropic client:", error);
|
|
319
|
+
return client;
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
async function traceAnthropicCall(originalFn, args, options) {
|
|
323
|
+
const startTime = Date.now();
|
|
324
|
+
const requestParams = args[0] || {};
|
|
325
|
+
const isStreaming = requestParams.stream === true;
|
|
326
|
+
try {
|
|
327
|
+
const result = await originalFn(...args);
|
|
328
|
+
if (isStreaming) {
|
|
329
|
+
return wrapStream(
|
|
330
|
+
result,
|
|
331
|
+
(fullResponse) => {
|
|
332
|
+
recordTrace2(
|
|
333
|
+
requestParams,
|
|
334
|
+
fullResponse,
|
|
335
|
+
startTime,
|
|
336
|
+
options,
|
|
337
|
+
fullResponse.timeToFirstToken,
|
|
338
|
+
fullResponse.streamingDuration
|
|
339
|
+
);
|
|
340
|
+
},
|
|
341
|
+
(err) => recordError2(requestParams, err, startTime, options),
|
|
342
|
+
"anthropic"
|
|
343
|
+
);
|
|
344
|
+
} else {
|
|
345
|
+
recordTrace2(requestParams, result, startTime, options);
|
|
346
|
+
return result;
|
|
347
|
+
}
|
|
348
|
+
} catch (error) {
|
|
349
|
+
recordError2(requestParams, error, startTime, options);
|
|
350
|
+
throw error;
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration) {
|
|
354
|
+
const duration = Date.now() - start;
|
|
355
|
+
try {
|
|
356
|
+
const context = getTraceContext();
|
|
357
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
358
|
+
const sanitizedRes = opts?.redact ? opts.redact(res) : res;
|
|
359
|
+
if (opts?.observa) {
|
|
360
|
+
const inputText = sanitizedReq.messages?.map((m) => {
|
|
361
|
+
if (typeof m.content === "string") return m.content;
|
|
362
|
+
if (Array.isArray(m.content)) {
|
|
363
|
+
return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
|
|
364
|
+
}
|
|
365
|
+
return null;
|
|
366
|
+
}).filter(Boolean).join("\n") || null;
|
|
367
|
+
const outputText = sanitizedRes?.content?.map((c) => c.text).filter(Boolean).join("\n") || null;
|
|
368
|
+
opts.observa.trackLLMCall({
|
|
369
|
+
model: sanitizedReq.model || sanitizedRes?.model || "unknown",
|
|
370
|
+
input: inputText,
|
|
371
|
+
output: outputText,
|
|
372
|
+
inputMessages: sanitizedReq.messages || null,
|
|
373
|
+
outputMessages: sanitizedRes?.content || null,
|
|
374
|
+
inputTokens: sanitizedRes?.usage?.input_tokens || null,
|
|
375
|
+
outputTokens: sanitizedRes?.usage?.output_tokens || null,
|
|
376
|
+
totalTokens: (sanitizedRes?.usage?.input_tokens || 0) + (sanitizedRes?.usage?.output_tokens || 0) || null,
|
|
377
|
+
latencyMs: duration,
|
|
378
|
+
timeToFirstTokenMs: timeToFirstToken || null,
|
|
379
|
+
streamingDurationMs: streamingDuration || null,
|
|
380
|
+
finishReason: sanitizedRes?.stop_reason || null,
|
|
381
|
+
responseId: sanitizedRes?.id || null,
|
|
382
|
+
operationName: "chat",
|
|
383
|
+
providerName: "anthropic",
|
|
384
|
+
responseModel: sanitizedRes?.model || sanitizedReq.model || null,
|
|
385
|
+
temperature: sanitizedReq.temperature || null,
|
|
386
|
+
maxTokens: sanitizedReq.max_tokens || null
|
|
387
|
+
});
|
|
388
|
+
}
|
|
389
|
+
} catch (e) {
|
|
390
|
+
console.error("[Observa] Failed to record trace", e);
|
|
391
|
+
}
|
|
392
|
+
}
|
|
393
|
+
function recordError2(req, error, start, opts) {
|
|
394
|
+
try {
|
|
395
|
+
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
396
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
397
|
+
if (opts?.observa) {
|
|
398
|
+
opts.observa.trackError({
|
|
399
|
+
errorType: "anthropic_api_error",
|
|
400
|
+
errorMessage: error?.message || String(error),
|
|
401
|
+
stackTrace: error?.stack || null,
|
|
402
|
+
context: { request: sanitizedReq }
|
|
403
|
+
});
|
|
404
|
+
}
|
|
405
|
+
} catch (e) {
|
|
406
|
+
}
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
// src/index.ts
|
|
27
410
|
var contextModule = null;
|
|
28
411
|
try {
|
|
29
412
|
const requireFn = globalThis.require;
|
|
@@ -408,7 +791,78 @@ var Observa = class {
|
|
|
408
791
|
return this.currentTraceId;
|
|
409
792
|
}
|
|
410
793
|
/**
|
|
411
|
-
* Track
|
|
794
|
+
* Track an LLM call with full OTEL support
|
|
795
|
+
* CRITICAL: This is the primary method for tracking LLM calls with all SOTA parameters
|
|
796
|
+
*/
|
|
797
|
+
trackLLMCall(options) {
|
|
798
|
+
const spanId = crypto.randomUUID();
|
|
799
|
+
let providerName = options.providerName;
|
|
800
|
+
if (!providerName && options.model) {
|
|
801
|
+
const modelLower = options.model.toLowerCase();
|
|
802
|
+
if (modelLower.includes("gpt") || modelLower.includes("openai")) {
|
|
803
|
+
providerName = "openai";
|
|
804
|
+
} else if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
|
|
805
|
+
providerName = "anthropic";
|
|
806
|
+
} else if (modelLower.includes("gemini") || modelLower.includes("google")) {
|
|
807
|
+
providerName = "google";
|
|
808
|
+
} else if (modelLower.includes("vertex")) {
|
|
809
|
+
providerName = "gcp.vertex_ai";
|
|
810
|
+
} else if (modelLower.includes("bedrock") || modelLower.includes("aws")) {
|
|
811
|
+
providerName = "aws.bedrock";
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
const operationName = options.operationName || "chat";
|
|
815
|
+
this.addEvent({
|
|
816
|
+
event_type: "llm_call",
|
|
817
|
+
span_id: spanId,
|
|
818
|
+
attributes: {
|
|
819
|
+
llm_call: {
|
|
820
|
+
model: options.model,
|
|
821
|
+
input: options.input || null,
|
|
822
|
+
output: options.output || null,
|
|
823
|
+
input_tokens: options.inputTokens || null,
|
|
824
|
+
output_tokens: options.outputTokens || null,
|
|
825
|
+
total_tokens: options.totalTokens || null,
|
|
826
|
+
latency_ms: options.latencyMs,
|
|
827
|
+
time_to_first_token_ms: options.timeToFirstTokenMs || null,
|
|
828
|
+
streaming_duration_ms: options.streamingDurationMs || null,
|
|
829
|
+
finish_reason: options.finishReason || null,
|
|
830
|
+
response_id: options.responseId || null,
|
|
831
|
+
system_fingerprint: options.systemFingerprint || null,
|
|
832
|
+
cost: options.cost || null,
|
|
833
|
+
temperature: options.temperature || null,
|
|
834
|
+
max_tokens: options.maxTokens || null,
|
|
835
|
+
// TIER 1: OTEL Semantic Conventions
|
|
836
|
+
operation_name: operationName,
|
|
837
|
+
provider_name: providerName || null,
|
|
838
|
+
response_model: options.responseModel || null,
|
|
839
|
+
// TIER 2: Sampling parameters
|
|
840
|
+
top_k: options.topK || null,
|
|
841
|
+
top_p: options.topP || null,
|
|
842
|
+
frequency_penalty: options.frequencyPenalty || null,
|
|
843
|
+
presence_penalty: options.presencePenalty || null,
|
|
844
|
+
stop_sequences: options.stopSequences || null,
|
|
845
|
+
seed: options.seed || null,
|
|
846
|
+
// TIER 2: Structured cost tracking
|
|
847
|
+
input_cost: options.inputCost || null,
|
|
848
|
+
output_cost: options.outputCost || null,
|
|
849
|
+
// TIER 1: Structured message objects
|
|
850
|
+
input_messages: options.inputMessages || null,
|
|
851
|
+
output_messages: options.outputMessages || null,
|
|
852
|
+
system_instructions: options.systemInstructions || null,
|
|
853
|
+
// TIER 2: Server metadata
|
|
854
|
+
server_address: options.serverAddress || null,
|
|
855
|
+
server_port: options.serverPort || null,
|
|
856
|
+
// TIER 2: Conversation grouping
|
|
857
|
+
conversation_id_otel: options.conversationIdOtel || null,
|
|
858
|
+
choice_count: options.choiceCount || null
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
});
|
|
862
|
+
return spanId;
|
|
863
|
+
}
|
|
864
|
+
/**
|
|
865
|
+
* Track a tool call with OTEL standardization
|
|
412
866
|
*/
|
|
413
867
|
trackToolCall(options) {
|
|
414
868
|
const spanId = crypto.randomUUID();
|
|
@@ -422,14 +876,21 @@ var Observa = class {
|
|
|
422
876
|
result: options.result || null,
|
|
423
877
|
result_status: options.resultStatus,
|
|
424
878
|
latency_ms: options.latencyMs,
|
|
425
|
-
error_message: options.errorMessage || null
|
|
879
|
+
error_message: options.errorMessage || null,
|
|
880
|
+
// TIER 2: OTEL Tool Standardization
|
|
881
|
+
operation_name: options.operationName || "execute_tool",
|
|
882
|
+
tool_type: options.toolType || null,
|
|
883
|
+
tool_description: options.toolDescription || null,
|
|
884
|
+
tool_call_id: options.toolCallId || null,
|
|
885
|
+
error_type: options.errorType || null,
|
|
886
|
+
error_category: options.errorCategory || null
|
|
426
887
|
}
|
|
427
888
|
}
|
|
428
889
|
});
|
|
429
890
|
return spanId;
|
|
430
891
|
}
|
|
431
892
|
/**
|
|
432
|
-
* Track a retrieval operation
|
|
893
|
+
* Track a retrieval operation with vector metadata enrichment
|
|
433
894
|
*/
|
|
434
895
|
trackRetrieval(options) {
|
|
435
896
|
const spanId = crypto.randomUUID();
|
|
@@ -443,14 +904,23 @@ var Observa = class {
|
|
|
443
904
|
k: options.k || null,
|
|
444
905
|
top_k: options.k || null,
|
|
445
906
|
similarity_scores: options.similarityScores || null,
|
|
446
|
-
latency_ms: options.latencyMs
|
|
907
|
+
latency_ms: options.latencyMs,
|
|
908
|
+
// TIER 2: Retrieval enrichment
|
|
909
|
+
retrieval_context: options.retrievalContext || null,
|
|
910
|
+
embedding_model: options.embeddingModel || null,
|
|
911
|
+
embedding_dimensions: options.embeddingDimensions || null,
|
|
912
|
+
vector_metric: options.vectorMetric || null,
|
|
913
|
+
rerank_score: options.rerankScore || null,
|
|
914
|
+
fusion_method: options.fusionMethod || null,
|
|
915
|
+
deduplication_removed_count: options.deduplicationRemovedCount || null,
|
|
916
|
+
quality_score: options.qualityScore || null
|
|
447
917
|
}
|
|
448
918
|
}
|
|
449
919
|
});
|
|
450
920
|
return spanId;
|
|
451
921
|
}
|
|
452
922
|
/**
|
|
453
|
-
* Track an error with
|
|
923
|
+
* Track an error with structured error classification
|
|
454
924
|
*/
|
|
455
925
|
trackError(options) {
|
|
456
926
|
const spanId = crypto.randomUUID();
|
|
@@ -466,7 +936,10 @@ var Observa = class {
|
|
|
466
936
|
error_type: options.errorType,
|
|
467
937
|
error_message: options.errorMessage,
|
|
468
938
|
stack_trace: stackTrace || null,
|
|
469
|
-
context: options.context || null
|
|
939
|
+
context: options.context || null,
|
|
940
|
+
// TIER 2: Structured error classification
|
|
941
|
+
error_category: options.errorCategory || null,
|
|
942
|
+
error_code: options.errorCode || null
|
|
470
943
|
}
|
|
471
944
|
}
|
|
472
945
|
});
|
|
@@ -520,6 +993,114 @@ var Observa = class {
|
|
|
520
993
|
});
|
|
521
994
|
return spanId;
|
|
522
995
|
}
|
|
996
|
+
/**
|
|
997
|
+
* Track an embedding operation (TIER 1: Critical)
|
|
998
|
+
*/
|
|
999
|
+
trackEmbedding(options) {
|
|
1000
|
+
const spanId = crypto.randomUUID();
|
|
1001
|
+
let providerName = options.providerName;
|
|
1002
|
+
if (!providerName && options.model) {
|
|
1003
|
+
const modelLower = options.model.toLowerCase();
|
|
1004
|
+
if (modelLower.includes("text-embedding") || modelLower.includes("openai")) {
|
|
1005
|
+
providerName = "openai";
|
|
1006
|
+
} else if (modelLower.includes("textembedding") || modelLower.includes("google")) {
|
|
1007
|
+
providerName = "google";
|
|
1008
|
+
} else if (modelLower.includes("vertex")) {
|
|
1009
|
+
providerName = "gcp.vertex_ai";
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
this.addEvent({
|
|
1013
|
+
event_type: "embedding",
|
|
1014
|
+
span_id: spanId,
|
|
1015
|
+
attributes: {
|
|
1016
|
+
embedding: {
|
|
1017
|
+
model: options.model,
|
|
1018
|
+
dimension_count: options.dimensionCount || null,
|
|
1019
|
+
encoding_formats: options.encodingFormats || null,
|
|
1020
|
+
input_tokens: options.inputTokens || null,
|
|
1021
|
+
output_tokens: options.outputTokens || null,
|
|
1022
|
+
latency_ms: options.latencyMs,
|
|
1023
|
+
cost: options.cost || null,
|
|
1024
|
+
input_text: options.inputText || null,
|
|
1025
|
+
input_hash: options.inputHash || null,
|
|
1026
|
+
embeddings: options.embeddings || null,
|
|
1027
|
+
embeddings_hash: options.embeddingsHash || null,
|
|
1028
|
+
operation_name: options.operationName || "embeddings",
|
|
1029
|
+
provider_name: providerName || null
|
|
1030
|
+
}
|
|
1031
|
+
}
|
|
1032
|
+
});
|
|
1033
|
+
return spanId;
|
|
1034
|
+
}
|
|
1035
|
+
/**
|
|
1036
|
+
* Track a vector database operation (TIER 3)
|
|
1037
|
+
*/
|
|
1038
|
+
trackVectorDbOperation(options) {
|
|
1039
|
+
const spanId = crypto.randomUUID();
|
|
1040
|
+
this.addEvent({
|
|
1041
|
+
event_type: "vector_db_operation",
|
|
1042
|
+
span_id: spanId,
|
|
1043
|
+
attributes: {
|
|
1044
|
+
vector_db_operation: {
|
|
1045
|
+
operation_type: options.operationType,
|
|
1046
|
+
index_name: options.indexName || null,
|
|
1047
|
+
index_version: options.indexVersion || null,
|
|
1048
|
+
vector_dimensions: options.vectorDimensions || null,
|
|
1049
|
+
vector_metric: options.vectorMetric || null,
|
|
1050
|
+
results_count: options.resultsCount || null,
|
|
1051
|
+
scores: options.scores || null,
|
|
1052
|
+
latency_ms: options.latencyMs,
|
|
1053
|
+
cost: options.cost || null,
|
|
1054
|
+
api_version: options.apiVersion || null,
|
|
1055
|
+
provider_name: options.providerName || null
|
|
1056
|
+
}
|
|
1057
|
+
}
|
|
1058
|
+
});
|
|
1059
|
+
return spanId;
|
|
1060
|
+
}
|
|
1061
|
+
/**
|
|
1062
|
+
* Track a cache operation (TIER 3)
|
|
1063
|
+
*/
|
|
1064
|
+
trackCacheOperation(options) {
|
|
1065
|
+
const spanId = crypto.randomUUID();
|
|
1066
|
+
this.addEvent({
|
|
1067
|
+
event_type: "cache_operation",
|
|
1068
|
+
span_id: spanId,
|
|
1069
|
+
attributes: {
|
|
1070
|
+
cache_operation: {
|
|
1071
|
+
cache_backend: options.cacheBackend || null,
|
|
1072
|
+
cache_key: options.cacheKey || null,
|
|
1073
|
+
cache_namespace: options.cacheNamespace || null,
|
|
1074
|
+
hit_status: options.hitStatus,
|
|
1075
|
+
latency_ms: options.latencyMs,
|
|
1076
|
+
saved_cost: options.savedCost || null,
|
|
1077
|
+
ttl: options.ttl || null,
|
|
1078
|
+
eviction_info: options.evictionInfo || null
|
|
1079
|
+
}
|
|
1080
|
+
}
|
|
1081
|
+
});
|
|
1082
|
+
return spanId;
|
|
1083
|
+
}
|
|
1084
|
+
/**
|
|
1085
|
+
* Track agent creation (TIER 3)
|
|
1086
|
+
*/
|
|
1087
|
+
trackAgentCreate(options) {
|
|
1088
|
+
const spanId = crypto.randomUUID();
|
|
1089
|
+
this.addEvent({
|
|
1090
|
+
event_type: "agent_create",
|
|
1091
|
+
span_id: spanId,
|
|
1092
|
+
attributes: {
|
|
1093
|
+
agent_create: {
|
|
1094
|
+
agent_name: options.agentName,
|
|
1095
|
+
agent_config: options.agentConfig || null,
|
|
1096
|
+
tools_bound: options.toolsBound || null,
|
|
1097
|
+
model_config: options.modelConfig || null,
|
|
1098
|
+
operation_name: options.operationName || "create_agent"
|
|
1099
|
+
}
|
|
1100
|
+
}
|
|
1101
|
+
});
|
|
1102
|
+
return spanId;
|
|
1103
|
+
}
|
|
523
1104
|
/**
|
|
524
1105
|
* Execute a function within a span context (for nested operations)
|
|
525
1106
|
* This allows tool calls to be nested under LLM calls, etc.
|
|
@@ -620,6 +1201,21 @@ var Observa = class {
|
|
|
620
1201
|
});
|
|
621
1202
|
if (trace.model) {
|
|
622
1203
|
const llmSpanId = crypto.randomUUID();
|
|
1204
|
+
let providerName = null;
|
|
1205
|
+
if (trace.model) {
|
|
1206
|
+
const modelLower = trace.model.toLowerCase();
|
|
1207
|
+
if (modelLower.includes("gpt") || modelLower.includes("openai")) {
|
|
1208
|
+
providerName = "openai";
|
|
1209
|
+
} else if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
|
|
1210
|
+
providerName = "anthropic";
|
|
1211
|
+
} else if (modelLower.includes("gemini") || modelLower.includes("google")) {
|
|
1212
|
+
providerName = "google";
|
|
1213
|
+
} else if (modelLower.includes("vertex")) {
|
|
1214
|
+
providerName = "gcp.vertex_ai";
|
|
1215
|
+
} else if (modelLower.includes("bedrock") || modelLower.includes("aws")) {
|
|
1216
|
+
providerName = "aws.bedrock";
|
|
1217
|
+
}
|
|
1218
|
+
}
|
|
623
1219
|
events.push({
|
|
624
1220
|
...baseEvent,
|
|
625
1221
|
span_id: llmSpanId,
|
|
@@ -640,8 +1236,13 @@ var Observa = class {
|
|
|
640
1236
|
finish_reason: trace.finishReason || null,
|
|
641
1237
|
response_id: trace.responseId || null,
|
|
642
1238
|
system_fingerprint: trace.systemFingerprint || null,
|
|
643
|
-
cost: null
|
|
1239
|
+
cost: null,
|
|
644
1240
|
// Cost calculation handled by backend
|
|
1241
|
+
// TIER 1: OTEL Semantic Conventions (auto-inferred)
|
|
1242
|
+
operation_name: "chat",
|
|
1243
|
+
// Default for legacy track() method
|
|
1244
|
+
provider_name: providerName
|
|
1245
|
+
// Other OTEL fields can be added via trackLLMCall() method
|
|
645
1246
|
}
|
|
646
1247
|
}
|
|
647
1248
|
});
|
|
@@ -733,6 +1334,56 @@ var Observa = class {
|
|
|
733
1334
|
}
|
|
734
1335
|
await this.flush();
|
|
735
1336
|
}
|
|
1337
|
+
/**
|
|
1338
|
+
* Observe OpenAI client - wraps client with automatic tracing
|
|
1339
|
+
*
|
|
1340
|
+
* @param client - OpenAI client instance
|
|
1341
|
+
* @param options - Observation options (name, tags, userId, sessionId, redact)
|
|
1342
|
+
* @returns Wrapped OpenAI client
|
|
1343
|
+
*
|
|
1344
|
+
* @example
|
|
1345
|
+
* ```typescript
|
|
1346
|
+
* import OpenAI from 'openai';
|
|
1347
|
+
* const openai = new OpenAI({ apiKey: '...' });
|
|
1348
|
+
* const wrapped = observa.observeOpenAI(openai, {
|
|
1349
|
+
* name: 'my-app',
|
|
1350
|
+
* redact: (data) => ({ ...data, messages: '[REDACTED]' })
|
|
1351
|
+
* });
|
|
1352
|
+
* ```
|
|
1353
|
+
*/
|
|
1354
|
+
observeOpenAI(client, options) {
|
|
1355
|
+
try {
|
|
1356
|
+
return observeOpenAI(client, { ...options, observa: this });
|
|
1357
|
+
} catch (error) {
|
|
1358
|
+
console.error("[Observa] Failed to load OpenAI wrapper:", error);
|
|
1359
|
+
return client;
|
|
1360
|
+
}
|
|
1361
|
+
}
|
|
1362
|
+
/**
|
|
1363
|
+
* Observe Anthropic client - wraps client with automatic tracing
|
|
1364
|
+
*
|
|
1365
|
+
* @param client - Anthropic client instance
|
|
1366
|
+
* @param options - Observation options (name, tags, userId, sessionId, redact)
|
|
1367
|
+
* @returns Wrapped Anthropic client
|
|
1368
|
+
*
|
|
1369
|
+
* @example
|
|
1370
|
+
* ```typescript
|
|
1371
|
+
* import Anthropic from '@anthropic-ai/sdk';
|
|
1372
|
+
* const anthropic = new Anthropic({ apiKey: '...' });
|
|
1373
|
+
* const wrapped = observa.observeAnthropic(anthropic, {
|
|
1374
|
+
* name: 'my-app',
|
|
1375
|
+
* redact: (data) => ({ ...data, messages: '[REDACTED]' })
|
|
1376
|
+
* });
|
|
1377
|
+
* ```
|
|
1378
|
+
*/
|
|
1379
|
+
observeAnthropic(client, options) {
|
|
1380
|
+
try {
|
|
1381
|
+
return observeAnthropic(client, { ...options, observa: this });
|
|
1382
|
+
} catch (error) {
|
|
1383
|
+
console.error("[Observa] Failed to load Anthropic wrapper:", error);
|
|
1384
|
+
return client;
|
|
1385
|
+
}
|
|
1386
|
+
}
|
|
736
1387
|
async track(event, action, options) {
|
|
737
1388
|
if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
|
|
738
1389
|
return action();
|