observa-sdk 0.0.8 → 0.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +300 -11
- package/dist/index.cjs +658 -7
- package/dist/index.d.cts +181 -3
- package/dist/index.d.ts +181 -3
- package/dist/index.js +663 -7
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -1,3 +1,391 @@
|
|
|
1
|
+
var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
|
|
2
|
+
get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
|
|
3
|
+
}) : x)(function(x) {
|
|
4
|
+
if (typeof require !== "undefined") return require.apply(this, arguments);
|
|
5
|
+
throw Error('Dynamic require of "' + x + '" is not supported');
|
|
6
|
+
});
|
|
7
|
+
|
|
8
|
+
// src/instrumentation/utils.ts
|
|
9
|
+
function estimateTokens(text) {
|
|
10
|
+
return Math.ceil(text.length / 4);
|
|
11
|
+
}
|
|
12
|
+
function reconstructOpenAIResponse(chunks) {
|
|
13
|
+
if (chunks.length === 0) {
|
|
14
|
+
return null;
|
|
15
|
+
}
|
|
16
|
+
const firstChunk = chunks[0];
|
|
17
|
+
const lastChunk = chunks[chunks.length - 1];
|
|
18
|
+
let fullContent = "";
|
|
19
|
+
const messages = [];
|
|
20
|
+
for (const chunk of chunks) {
|
|
21
|
+
if (chunk?.choices?.[0]?.delta?.content) {
|
|
22
|
+
fullContent += chunk.choices[0].delta.content;
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
const response = {
|
|
26
|
+
id: lastChunk?.id || firstChunk?.id || null,
|
|
27
|
+
model: lastChunk?.model || firstChunk?.model || null,
|
|
28
|
+
object: "chat.completion",
|
|
29
|
+
created: lastChunk?.created || firstChunk?.created || Math.floor(Date.now() / 1e3),
|
|
30
|
+
choices: [
|
|
31
|
+
{
|
|
32
|
+
index: 0,
|
|
33
|
+
message: {
|
|
34
|
+
role: "assistant",
|
|
35
|
+
content: fullContent
|
|
36
|
+
},
|
|
37
|
+
finish_reason: lastChunk?.choices?.[0]?.finish_reason || null
|
|
38
|
+
}
|
|
39
|
+
],
|
|
40
|
+
usage: lastChunk?.usage || null
|
|
41
|
+
// Usage info typically in last chunk
|
|
42
|
+
};
|
|
43
|
+
return response;
|
|
44
|
+
}
|
|
45
|
+
function reconstructAnthropicResponse(chunks) {
|
|
46
|
+
if (chunks.length === 0) {
|
|
47
|
+
return null;
|
|
48
|
+
}
|
|
49
|
+
const lastChunk = chunks[chunks.length - 1];
|
|
50
|
+
let fullContent = "";
|
|
51
|
+
for (const chunk of chunks) {
|
|
52
|
+
if (chunk?.type === "content_block_delta" && chunk?.delta?.text) {
|
|
53
|
+
fullContent += chunk.delta.text;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
return {
|
|
57
|
+
id: lastChunk?.id || null,
|
|
58
|
+
model: lastChunk?.model || null,
|
|
59
|
+
type: "message",
|
|
60
|
+
role: "assistant",
|
|
61
|
+
content: [{ type: "text", text: fullContent }],
|
|
62
|
+
stop_reason: lastChunk?.stop_reason || null,
|
|
63
|
+
stop_sequence: lastChunk?.stop_sequence || null,
|
|
64
|
+
usage: lastChunk?.usage || null
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
async function* wrapStream(stream, onComplete, onError, provider = "openai") {
|
|
68
|
+
let firstTokenTime = null;
|
|
69
|
+
const chunks = [];
|
|
70
|
+
let tokenCount = 0;
|
|
71
|
+
const streamStartTime = Date.now();
|
|
72
|
+
try {
|
|
73
|
+
for await (const chunk of stream) {
|
|
74
|
+
if (firstTokenTime === null) {
|
|
75
|
+
firstTokenTime = Date.now();
|
|
76
|
+
}
|
|
77
|
+
if (provider === "openai") {
|
|
78
|
+
const content = chunk?.choices?.[0]?.delta?.content;
|
|
79
|
+
if (content && typeof content === "string") {
|
|
80
|
+
tokenCount += estimateTokens(content);
|
|
81
|
+
}
|
|
82
|
+
} else if (provider === "anthropic") {
|
|
83
|
+
const text = chunk?.delta?.text;
|
|
84
|
+
if (text && typeof text === "string") {
|
|
85
|
+
tokenCount += estimateTokens(text);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
chunks.push(chunk);
|
|
89
|
+
yield chunk;
|
|
90
|
+
}
|
|
91
|
+
const fullResponse = provider === "openai" ? reconstructOpenAIResponse(chunks) : reconstructAnthropicResponse(chunks);
|
|
92
|
+
Promise.resolve().then(() => {
|
|
93
|
+
try {
|
|
94
|
+
onComplete({
|
|
95
|
+
...fullResponse,
|
|
96
|
+
timeToFirstToken: firstTokenTime ? firstTokenTime - streamStartTime : null,
|
|
97
|
+
streamingDuration: firstTokenTime ? Date.now() - firstTokenTime : null,
|
|
98
|
+
estimatedTokenCount: tokenCount,
|
|
99
|
+
totalLatency: Date.now() - streamStartTime
|
|
100
|
+
});
|
|
101
|
+
} catch (e) {
|
|
102
|
+
onError(e);
|
|
103
|
+
}
|
|
104
|
+
}).catch((e) => {
|
|
105
|
+
onError(e);
|
|
106
|
+
});
|
|
107
|
+
} catch (error) {
|
|
108
|
+
onError(error);
|
|
109
|
+
throw error;
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// src/instrumentation/semconv.ts
|
|
114
|
+
var OTEL_SEMCONV = {
|
|
115
|
+
// Standard GenAI Attributes (Snake Case)
|
|
116
|
+
GEN_AI_SYSTEM: "gen_ai.system",
|
|
117
|
+
GEN_AI_REQUEST_MODEL: "gen_ai.request.model",
|
|
118
|
+
GEN_AI_RESPONSE_MODEL: "gen_ai.response.model",
|
|
119
|
+
GEN_AI_USAGE_INPUT_TOKENS: "gen_ai.usage.input_tokens",
|
|
120
|
+
GEN_AI_USAGE_OUTPUT_TOKENS: "gen_ai.usage.output_tokens",
|
|
121
|
+
GEN_AI_FINISH_REASONS: "gen_ai.response.finish_reasons",
|
|
122
|
+
// Observa Internal Attributes
|
|
123
|
+
OBSERVA_TRACE_ID: "observa.trace_id",
|
|
124
|
+
OBSERVA_SPAN_ID: "observa.span_id",
|
|
125
|
+
OBSERVA_VERSION: "observa.sdk_version"
|
|
126
|
+
};
|
|
127
|
+
function mapOpenAIToOTEL(request, response) {
|
|
128
|
+
return {
|
|
129
|
+
[OTEL_SEMCONV.GEN_AI_SYSTEM]: "openai",
|
|
130
|
+
[OTEL_SEMCONV.GEN_AI_REQUEST_MODEL]: request.model,
|
|
131
|
+
[OTEL_SEMCONV.GEN_AI_RESPONSE_MODEL]: response?.model || request.model,
|
|
132
|
+
[OTEL_SEMCONV.GEN_AI_USAGE_INPUT_TOKENS]: response?.usage?.prompt_tokens,
|
|
133
|
+
[OTEL_SEMCONV.GEN_AI_USAGE_OUTPUT_TOKENS]: response?.usage?.completion_tokens,
|
|
134
|
+
[OTEL_SEMCONV.GEN_AI_FINISH_REASONS]: response?.choices?.map((c) => c.finish_reason)
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
// src/context.ts
|
|
139
|
+
var traceContextStorage = null;
|
|
140
|
+
var edgeContextMap = null;
|
|
141
|
+
try {
|
|
142
|
+
const { AsyncLocalStorage } = __require("async_hooks");
|
|
143
|
+
traceContextStorage = new AsyncLocalStorage();
|
|
144
|
+
} catch {
|
|
145
|
+
edgeContextMap = /* @__PURE__ */ new WeakMap();
|
|
146
|
+
}
|
|
147
|
+
function getTraceContext() {
|
|
148
|
+
try {
|
|
149
|
+
if (traceContextStorage) {
|
|
150
|
+
return traceContextStorage.getStore();
|
|
151
|
+
}
|
|
152
|
+
return void 0;
|
|
153
|
+
} catch {
|
|
154
|
+
return void 0;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
// src/instrumentation/openai.ts
|
|
159
|
+
var proxyCache = /* @__PURE__ */ new WeakMap();
|
|
160
|
+
function observeOpenAI(client, options) {
|
|
161
|
+
if (proxyCache.has(client)) {
|
|
162
|
+
return proxyCache.get(client);
|
|
163
|
+
}
|
|
164
|
+
try {
|
|
165
|
+
const wrapped = new Proxy(client, {
|
|
166
|
+
get(target, prop, receiver) {
|
|
167
|
+
const value = Reflect.get(target, prop, receiver);
|
|
168
|
+
if (typeof value === "object" && value !== null) {
|
|
169
|
+
if (prop === "prototype" || prop === "constructor") {
|
|
170
|
+
return value;
|
|
171
|
+
}
|
|
172
|
+
return observeOpenAI(value, options);
|
|
173
|
+
}
|
|
174
|
+
if (typeof value === "function" && prop === "create") {
|
|
175
|
+
return async function(...args) {
|
|
176
|
+
return traceOpenAICall(value.bind(target), args, options);
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
return value;
|
|
180
|
+
}
|
|
181
|
+
});
|
|
182
|
+
proxyCache.set(client, wrapped);
|
|
183
|
+
return wrapped;
|
|
184
|
+
} catch (error) {
|
|
185
|
+
console.error("[Observa] Failed to wrap OpenAI client:", error);
|
|
186
|
+
return client;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
async function traceOpenAICall(originalFn, args, options) {
|
|
190
|
+
const startTime = Date.now();
|
|
191
|
+
const requestParams = args[0] || {};
|
|
192
|
+
const isStreaming = requestParams.stream === true;
|
|
193
|
+
try {
|
|
194
|
+
const result = await originalFn(...args);
|
|
195
|
+
if (isStreaming) {
|
|
196
|
+
return wrapStream(
|
|
197
|
+
result,
|
|
198
|
+
(fullResponse) => {
|
|
199
|
+
recordTrace(
|
|
200
|
+
requestParams,
|
|
201
|
+
fullResponse,
|
|
202
|
+
startTime,
|
|
203
|
+
options,
|
|
204
|
+
fullResponse.timeToFirstToken,
|
|
205
|
+
fullResponse.streamingDuration
|
|
206
|
+
);
|
|
207
|
+
},
|
|
208
|
+
(err) => recordError(requestParams, err, startTime, options),
|
|
209
|
+
"openai"
|
|
210
|
+
);
|
|
211
|
+
} else {
|
|
212
|
+
recordTrace(requestParams, result, startTime, options);
|
|
213
|
+
return result;
|
|
214
|
+
}
|
|
215
|
+
} catch (error) {
|
|
216
|
+
recordError(requestParams, error, startTime, options);
|
|
217
|
+
throw error;
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration) {
|
|
221
|
+
const duration = Date.now() - start;
|
|
222
|
+
try {
|
|
223
|
+
const context = getTraceContext();
|
|
224
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
225
|
+
const sanitizedRes = opts?.redact ? opts.redact(res) : res;
|
|
226
|
+
const otelAttributes = mapOpenAIToOTEL(sanitizedReq, sanitizedRes);
|
|
227
|
+
if (opts?.observa) {
|
|
228
|
+
const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
|
|
229
|
+
const outputText = sanitizedRes?.choices?.[0]?.message?.content || null;
|
|
230
|
+
opts.observa.trackLLMCall({
|
|
231
|
+
model: sanitizedReq.model || sanitizedRes?.model || "unknown",
|
|
232
|
+
input: inputText,
|
|
233
|
+
output: outputText,
|
|
234
|
+
inputMessages: sanitizedReq.messages || null,
|
|
235
|
+
outputMessages: sanitizedRes?.choices?.map((c) => c.message) || null,
|
|
236
|
+
inputTokens: sanitizedRes?.usage?.prompt_tokens || null,
|
|
237
|
+
outputTokens: sanitizedRes?.usage?.completion_tokens || null,
|
|
238
|
+
totalTokens: sanitizedRes?.usage?.total_tokens || null,
|
|
239
|
+
latencyMs: duration,
|
|
240
|
+
timeToFirstTokenMs: timeToFirstToken || null,
|
|
241
|
+
streamingDurationMs: streamingDuration || null,
|
|
242
|
+
finishReason: sanitizedRes?.choices?.[0]?.finish_reason || null,
|
|
243
|
+
responseId: sanitizedRes?.id || null,
|
|
244
|
+
operationName: "chat",
|
|
245
|
+
providerName: "openai",
|
|
246
|
+
responseModel: sanitizedRes?.model || sanitizedReq.model || null,
|
|
247
|
+
temperature: sanitizedReq.temperature || null,
|
|
248
|
+
maxTokens: sanitizedReq.max_tokens || null
|
|
249
|
+
});
|
|
250
|
+
}
|
|
251
|
+
} catch (e) {
|
|
252
|
+
console.error("[Observa] Failed to record trace", e);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
function recordError(req, error, start, opts) {
|
|
256
|
+
try {
|
|
257
|
+
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
258
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
259
|
+
if (opts?.observa) {
|
|
260
|
+
opts.observa.trackError({
|
|
261
|
+
errorType: "openai_api_error",
|
|
262
|
+
errorMessage: error?.message || String(error),
|
|
263
|
+
stackTrace: error?.stack || null,
|
|
264
|
+
context: { request: sanitizedReq }
|
|
265
|
+
});
|
|
266
|
+
}
|
|
267
|
+
} catch (e) {
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// src/instrumentation/anthropic.ts
|
|
272
|
+
var proxyCache2 = /* @__PURE__ */ new WeakMap();
|
|
273
|
+
function observeAnthropic(client, options) {
|
|
274
|
+
if (proxyCache2.has(client)) {
|
|
275
|
+
return proxyCache2.get(client);
|
|
276
|
+
}
|
|
277
|
+
try {
|
|
278
|
+
const wrapped = new Proxy(client, {
|
|
279
|
+
get(target, prop, receiver) {
|
|
280
|
+
const value = Reflect.get(target, prop, receiver);
|
|
281
|
+
if (typeof value === "object" && value !== null) {
|
|
282
|
+
if (prop === "prototype" || prop === "constructor") {
|
|
283
|
+
return value;
|
|
284
|
+
}
|
|
285
|
+
return observeAnthropic(value, options);
|
|
286
|
+
}
|
|
287
|
+
if (typeof value === "function" && prop === "create") {
|
|
288
|
+
return async function(...args) {
|
|
289
|
+
return traceAnthropicCall(value.bind(target), args, options);
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
return value;
|
|
293
|
+
}
|
|
294
|
+
});
|
|
295
|
+
proxyCache2.set(client, wrapped);
|
|
296
|
+
return wrapped;
|
|
297
|
+
} catch (error) {
|
|
298
|
+
console.error("[Observa] Failed to wrap Anthropic client:", error);
|
|
299
|
+
return client;
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
async function traceAnthropicCall(originalFn, args, options) {
|
|
303
|
+
const startTime = Date.now();
|
|
304
|
+
const requestParams = args[0] || {};
|
|
305
|
+
const isStreaming = requestParams.stream === true;
|
|
306
|
+
try {
|
|
307
|
+
const result = await originalFn(...args);
|
|
308
|
+
if (isStreaming) {
|
|
309
|
+
return wrapStream(
|
|
310
|
+
result,
|
|
311
|
+
(fullResponse) => {
|
|
312
|
+
recordTrace2(
|
|
313
|
+
requestParams,
|
|
314
|
+
fullResponse,
|
|
315
|
+
startTime,
|
|
316
|
+
options,
|
|
317
|
+
fullResponse.timeToFirstToken,
|
|
318
|
+
fullResponse.streamingDuration
|
|
319
|
+
);
|
|
320
|
+
},
|
|
321
|
+
(err) => recordError2(requestParams, err, startTime, options),
|
|
322
|
+
"anthropic"
|
|
323
|
+
);
|
|
324
|
+
} else {
|
|
325
|
+
recordTrace2(requestParams, result, startTime, options);
|
|
326
|
+
return result;
|
|
327
|
+
}
|
|
328
|
+
} catch (error) {
|
|
329
|
+
recordError2(requestParams, error, startTime, options);
|
|
330
|
+
throw error;
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration) {
|
|
334
|
+
const duration = Date.now() - start;
|
|
335
|
+
try {
|
|
336
|
+
const context = getTraceContext();
|
|
337
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
338
|
+
const sanitizedRes = opts?.redact ? opts.redact(res) : res;
|
|
339
|
+
if (opts?.observa) {
|
|
340
|
+
const inputText = sanitizedReq.messages?.map((m) => {
|
|
341
|
+
if (typeof m.content === "string") return m.content;
|
|
342
|
+
if (Array.isArray(m.content)) {
|
|
343
|
+
return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
|
|
344
|
+
}
|
|
345
|
+
return null;
|
|
346
|
+
}).filter(Boolean).join("\n") || null;
|
|
347
|
+
const outputText = sanitizedRes?.content?.map((c) => c.text).filter(Boolean).join("\n") || null;
|
|
348
|
+
opts.observa.trackLLMCall({
|
|
349
|
+
model: sanitizedReq.model || sanitizedRes?.model || "unknown",
|
|
350
|
+
input: inputText,
|
|
351
|
+
output: outputText,
|
|
352
|
+
inputMessages: sanitizedReq.messages || null,
|
|
353
|
+
outputMessages: sanitizedRes?.content || null,
|
|
354
|
+
inputTokens: sanitizedRes?.usage?.input_tokens || null,
|
|
355
|
+
outputTokens: sanitizedRes?.usage?.output_tokens || null,
|
|
356
|
+
totalTokens: (sanitizedRes?.usage?.input_tokens || 0) + (sanitizedRes?.usage?.output_tokens || 0) || null,
|
|
357
|
+
latencyMs: duration,
|
|
358
|
+
timeToFirstTokenMs: timeToFirstToken || null,
|
|
359
|
+
streamingDurationMs: streamingDuration || null,
|
|
360
|
+
finishReason: sanitizedRes?.stop_reason || null,
|
|
361
|
+
responseId: sanitizedRes?.id || null,
|
|
362
|
+
operationName: "chat",
|
|
363
|
+
providerName: "anthropic",
|
|
364
|
+
responseModel: sanitizedRes?.model || sanitizedReq.model || null,
|
|
365
|
+
temperature: sanitizedReq.temperature || null,
|
|
366
|
+
maxTokens: sanitizedReq.max_tokens || null
|
|
367
|
+
});
|
|
368
|
+
}
|
|
369
|
+
} catch (e) {
|
|
370
|
+
console.error("[Observa] Failed to record trace", e);
|
|
371
|
+
}
|
|
372
|
+
}
|
|
373
|
+
function recordError2(req, error, start, opts) {
|
|
374
|
+
try {
|
|
375
|
+
console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
|
|
376
|
+
const sanitizedReq = opts?.redact ? opts.redact(req) : req;
|
|
377
|
+
if (opts?.observa) {
|
|
378
|
+
opts.observa.trackError({
|
|
379
|
+
errorType: "anthropic_api_error",
|
|
380
|
+
errorMessage: error?.message || String(error),
|
|
381
|
+
stackTrace: error?.stack || null,
|
|
382
|
+
context: { request: sanitizedReq }
|
|
383
|
+
});
|
|
384
|
+
}
|
|
385
|
+
} catch (e) {
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
1
389
|
// src/index.ts
|
|
2
390
|
var contextModule = null;
|
|
3
391
|
try {
|
|
@@ -383,7 +771,78 @@ var Observa = class {
|
|
|
383
771
|
return this.currentTraceId;
|
|
384
772
|
}
|
|
385
773
|
/**
|
|
386
|
-
* Track
|
|
774
|
+
* Track an LLM call with full OTEL support
|
|
775
|
+
* CRITICAL: This is the primary method for tracking LLM calls with all SOTA parameters
|
|
776
|
+
*/
|
|
777
|
+
trackLLMCall(options) {
|
|
778
|
+
const spanId = crypto.randomUUID();
|
|
779
|
+
let providerName = options.providerName;
|
|
780
|
+
if (!providerName && options.model) {
|
|
781
|
+
const modelLower = options.model.toLowerCase();
|
|
782
|
+
if (modelLower.includes("gpt") || modelLower.includes("openai")) {
|
|
783
|
+
providerName = "openai";
|
|
784
|
+
} else if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
|
|
785
|
+
providerName = "anthropic";
|
|
786
|
+
} else if (modelLower.includes("gemini") || modelLower.includes("google")) {
|
|
787
|
+
providerName = "google";
|
|
788
|
+
} else if (modelLower.includes("vertex")) {
|
|
789
|
+
providerName = "gcp.vertex_ai";
|
|
790
|
+
} else if (modelLower.includes("bedrock") || modelLower.includes("aws")) {
|
|
791
|
+
providerName = "aws.bedrock";
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
const operationName = options.operationName || "chat";
|
|
795
|
+
this.addEvent({
|
|
796
|
+
event_type: "llm_call",
|
|
797
|
+
span_id: spanId,
|
|
798
|
+
attributes: {
|
|
799
|
+
llm_call: {
|
|
800
|
+
model: options.model,
|
|
801
|
+
input: options.input || null,
|
|
802
|
+
output: options.output || null,
|
|
803
|
+
input_tokens: options.inputTokens || null,
|
|
804
|
+
output_tokens: options.outputTokens || null,
|
|
805
|
+
total_tokens: options.totalTokens || null,
|
|
806
|
+
latency_ms: options.latencyMs,
|
|
807
|
+
time_to_first_token_ms: options.timeToFirstTokenMs || null,
|
|
808
|
+
streaming_duration_ms: options.streamingDurationMs || null,
|
|
809
|
+
finish_reason: options.finishReason || null,
|
|
810
|
+
response_id: options.responseId || null,
|
|
811
|
+
system_fingerprint: options.systemFingerprint || null,
|
|
812
|
+
cost: options.cost || null,
|
|
813
|
+
temperature: options.temperature || null,
|
|
814
|
+
max_tokens: options.maxTokens || null,
|
|
815
|
+
// TIER 1: OTEL Semantic Conventions
|
|
816
|
+
operation_name: operationName,
|
|
817
|
+
provider_name: providerName || null,
|
|
818
|
+
response_model: options.responseModel || null,
|
|
819
|
+
// TIER 2: Sampling parameters
|
|
820
|
+
top_k: options.topK || null,
|
|
821
|
+
top_p: options.topP || null,
|
|
822
|
+
frequency_penalty: options.frequencyPenalty || null,
|
|
823
|
+
presence_penalty: options.presencePenalty || null,
|
|
824
|
+
stop_sequences: options.stopSequences || null,
|
|
825
|
+
seed: options.seed || null,
|
|
826
|
+
// TIER 2: Structured cost tracking
|
|
827
|
+
input_cost: options.inputCost || null,
|
|
828
|
+
output_cost: options.outputCost || null,
|
|
829
|
+
// TIER 1: Structured message objects
|
|
830
|
+
input_messages: options.inputMessages || null,
|
|
831
|
+
output_messages: options.outputMessages || null,
|
|
832
|
+
system_instructions: options.systemInstructions || null,
|
|
833
|
+
// TIER 2: Server metadata
|
|
834
|
+
server_address: options.serverAddress || null,
|
|
835
|
+
server_port: options.serverPort || null,
|
|
836
|
+
// TIER 2: Conversation grouping
|
|
837
|
+
conversation_id_otel: options.conversationIdOtel || null,
|
|
838
|
+
choice_count: options.choiceCount || null
|
|
839
|
+
}
|
|
840
|
+
}
|
|
841
|
+
});
|
|
842
|
+
return spanId;
|
|
843
|
+
}
|
|
844
|
+
/**
|
|
845
|
+
* Track a tool call with OTEL standardization
|
|
387
846
|
*/
|
|
388
847
|
trackToolCall(options) {
|
|
389
848
|
const spanId = crypto.randomUUID();
|
|
@@ -397,14 +856,21 @@ var Observa = class {
|
|
|
397
856
|
result: options.result || null,
|
|
398
857
|
result_status: options.resultStatus,
|
|
399
858
|
latency_ms: options.latencyMs,
|
|
400
|
-
error_message: options.errorMessage || null
|
|
859
|
+
error_message: options.errorMessage || null,
|
|
860
|
+
// TIER 2: OTEL Tool Standardization
|
|
861
|
+
operation_name: options.operationName || "execute_tool",
|
|
862
|
+
tool_type: options.toolType || null,
|
|
863
|
+
tool_description: options.toolDescription || null,
|
|
864
|
+
tool_call_id: options.toolCallId || null,
|
|
865
|
+
error_type: options.errorType || null,
|
|
866
|
+
error_category: options.errorCategory || null
|
|
401
867
|
}
|
|
402
868
|
}
|
|
403
869
|
});
|
|
404
870
|
return spanId;
|
|
405
871
|
}
|
|
406
872
|
/**
|
|
407
|
-
* Track a retrieval operation
|
|
873
|
+
* Track a retrieval operation with vector metadata enrichment
|
|
408
874
|
*/
|
|
409
875
|
trackRetrieval(options) {
|
|
410
876
|
const spanId = crypto.randomUUID();
|
|
@@ -418,14 +884,23 @@ var Observa = class {
|
|
|
418
884
|
k: options.k || null,
|
|
419
885
|
top_k: options.k || null,
|
|
420
886
|
similarity_scores: options.similarityScores || null,
|
|
421
|
-
latency_ms: options.latencyMs
|
|
887
|
+
latency_ms: options.latencyMs,
|
|
888
|
+
// TIER 2: Retrieval enrichment
|
|
889
|
+
retrieval_context: options.retrievalContext || null,
|
|
890
|
+
embedding_model: options.embeddingModel || null,
|
|
891
|
+
embedding_dimensions: options.embeddingDimensions || null,
|
|
892
|
+
vector_metric: options.vectorMetric || null,
|
|
893
|
+
rerank_score: options.rerankScore || null,
|
|
894
|
+
fusion_method: options.fusionMethod || null,
|
|
895
|
+
deduplication_removed_count: options.deduplicationRemovedCount || null,
|
|
896
|
+
quality_score: options.qualityScore || null
|
|
422
897
|
}
|
|
423
898
|
}
|
|
424
899
|
});
|
|
425
900
|
return spanId;
|
|
426
901
|
}
|
|
427
902
|
/**
|
|
428
|
-
* Track an error with
|
|
903
|
+
* Track an error with structured error classification
|
|
429
904
|
*/
|
|
430
905
|
trackError(options) {
|
|
431
906
|
const spanId = crypto.randomUUID();
|
|
@@ -441,7 +916,10 @@ var Observa = class {
|
|
|
441
916
|
error_type: options.errorType,
|
|
442
917
|
error_message: options.errorMessage,
|
|
443
918
|
stack_trace: stackTrace || null,
|
|
444
|
-
context: options.context || null
|
|
919
|
+
context: options.context || null,
|
|
920
|
+
// TIER 2: Structured error classification
|
|
921
|
+
error_category: options.errorCategory || null,
|
|
922
|
+
error_code: options.errorCode || null
|
|
445
923
|
}
|
|
446
924
|
}
|
|
447
925
|
});
|
|
@@ -495,6 +973,114 @@ var Observa = class {
|
|
|
495
973
|
});
|
|
496
974
|
return spanId;
|
|
497
975
|
}
|
|
976
|
+
/**
|
|
977
|
+
* Track an embedding operation (TIER 1: Critical)
|
|
978
|
+
*/
|
|
979
|
+
trackEmbedding(options) {
|
|
980
|
+
const spanId = crypto.randomUUID();
|
|
981
|
+
let providerName = options.providerName;
|
|
982
|
+
if (!providerName && options.model) {
|
|
983
|
+
const modelLower = options.model.toLowerCase();
|
|
984
|
+
if (modelLower.includes("text-embedding") || modelLower.includes("openai")) {
|
|
985
|
+
providerName = "openai";
|
|
986
|
+
} else if (modelLower.includes("textembedding") || modelLower.includes("google")) {
|
|
987
|
+
providerName = "google";
|
|
988
|
+
} else if (modelLower.includes("vertex")) {
|
|
989
|
+
providerName = "gcp.vertex_ai";
|
|
990
|
+
}
|
|
991
|
+
}
|
|
992
|
+
this.addEvent({
|
|
993
|
+
event_type: "embedding",
|
|
994
|
+
span_id: spanId,
|
|
995
|
+
attributes: {
|
|
996
|
+
embedding: {
|
|
997
|
+
model: options.model,
|
|
998
|
+
dimension_count: options.dimensionCount || null,
|
|
999
|
+
encoding_formats: options.encodingFormats || null,
|
|
1000
|
+
input_tokens: options.inputTokens || null,
|
|
1001
|
+
output_tokens: options.outputTokens || null,
|
|
1002
|
+
latency_ms: options.latencyMs,
|
|
1003
|
+
cost: options.cost || null,
|
|
1004
|
+
input_text: options.inputText || null,
|
|
1005
|
+
input_hash: options.inputHash || null,
|
|
1006
|
+
embeddings: options.embeddings || null,
|
|
1007
|
+
embeddings_hash: options.embeddingsHash || null,
|
|
1008
|
+
operation_name: options.operationName || "embeddings",
|
|
1009
|
+
provider_name: providerName || null
|
|
1010
|
+
}
|
|
1011
|
+
}
|
|
1012
|
+
});
|
|
1013
|
+
return spanId;
|
|
1014
|
+
}
|
|
1015
|
+
/**
|
|
1016
|
+
* Track a vector database operation (TIER 3)
|
|
1017
|
+
*/
|
|
1018
|
+
trackVectorDbOperation(options) {
|
|
1019
|
+
const spanId = crypto.randomUUID();
|
|
1020
|
+
this.addEvent({
|
|
1021
|
+
event_type: "vector_db_operation",
|
|
1022
|
+
span_id: spanId,
|
|
1023
|
+
attributes: {
|
|
1024
|
+
vector_db_operation: {
|
|
1025
|
+
operation_type: options.operationType,
|
|
1026
|
+
index_name: options.indexName || null,
|
|
1027
|
+
index_version: options.indexVersion || null,
|
|
1028
|
+
vector_dimensions: options.vectorDimensions || null,
|
|
1029
|
+
vector_metric: options.vectorMetric || null,
|
|
1030
|
+
results_count: options.resultsCount || null,
|
|
1031
|
+
scores: options.scores || null,
|
|
1032
|
+
latency_ms: options.latencyMs,
|
|
1033
|
+
cost: options.cost || null,
|
|
1034
|
+
api_version: options.apiVersion || null,
|
|
1035
|
+
provider_name: options.providerName || null
|
|
1036
|
+
}
|
|
1037
|
+
}
|
|
1038
|
+
});
|
|
1039
|
+
return spanId;
|
|
1040
|
+
}
|
|
1041
|
+
/**
|
|
1042
|
+
* Track a cache operation (TIER 3)
|
|
1043
|
+
*/
|
|
1044
|
+
trackCacheOperation(options) {
|
|
1045
|
+
const spanId = crypto.randomUUID();
|
|
1046
|
+
this.addEvent({
|
|
1047
|
+
event_type: "cache_operation",
|
|
1048
|
+
span_id: spanId,
|
|
1049
|
+
attributes: {
|
|
1050
|
+
cache_operation: {
|
|
1051
|
+
cache_backend: options.cacheBackend || null,
|
|
1052
|
+
cache_key: options.cacheKey || null,
|
|
1053
|
+
cache_namespace: options.cacheNamespace || null,
|
|
1054
|
+
hit_status: options.hitStatus,
|
|
1055
|
+
latency_ms: options.latencyMs,
|
|
1056
|
+
saved_cost: options.savedCost || null,
|
|
1057
|
+
ttl: options.ttl || null,
|
|
1058
|
+
eviction_info: options.evictionInfo || null
|
|
1059
|
+
}
|
|
1060
|
+
}
|
|
1061
|
+
});
|
|
1062
|
+
return spanId;
|
|
1063
|
+
}
|
|
1064
|
+
/**
|
|
1065
|
+
* Track agent creation (TIER 3)
|
|
1066
|
+
*/
|
|
1067
|
+
trackAgentCreate(options) {
|
|
1068
|
+
const spanId = crypto.randomUUID();
|
|
1069
|
+
this.addEvent({
|
|
1070
|
+
event_type: "agent_create",
|
|
1071
|
+
span_id: spanId,
|
|
1072
|
+
attributes: {
|
|
1073
|
+
agent_create: {
|
|
1074
|
+
agent_name: options.agentName,
|
|
1075
|
+
agent_config: options.agentConfig || null,
|
|
1076
|
+
tools_bound: options.toolsBound || null,
|
|
1077
|
+
model_config: options.modelConfig || null,
|
|
1078
|
+
operation_name: options.operationName || "create_agent"
|
|
1079
|
+
}
|
|
1080
|
+
}
|
|
1081
|
+
});
|
|
1082
|
+
return spanId;
|
|
1083
|
+
}
|
|
498
1084
|
/**
|
|
499
1085
|
* Execute a function within a span context (for nested operations)
|
|
500
1086
|
* This allows tool calls to be nested under LLM calls, etc.
|
|
@@ -595,6 +1181,21 @@ var Observa = class {
|
|
|
595
1181
|
});
|
|
596
1182
|
if (trace.model) {
|
|
597
1183
|
const llmSpanId = crypto.randomUUID();
|
|
1184
|
+
let providerName = null;
|
|
1185
|
+
if (trace.model) {
|
|
1186
|
+
const modelLower = trace.model.toLowerCase();
|
|
1187
|
+
if (modelLower.includes("gpt") || modelLower.includes("openai")) {
|
|
1188
|
+
providerName = "openai";
|
|
1189
|
+
} else if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
|
|
1190
|
+
providerName = "anthropic";
|
|
1191
|
+
} else if (modelLower.includes("gemini") || modelLower.includes("google")) {
|
|
1192
|
+
providerName = "google";
|
|
1193
|
+
} else if (modelLower.includes("vertex")) {
|
|
1194
|
+
providerName = "gcp.vertex_ai";
|
|
1195
|
+
} else if (modelLower.includes("bedrock") || modelLower.includes("aws")) {
|
|
1196
|
+
providerName = "aws.bedrock";
|
|
1197
|
+
}
|
|
1198
|
+
}
|
|
598
1199
|
events.push({
|
|
599
1200
|
...baseEvent,
|
|
600
1201
|
span_id: llmSpanId,
|
|
@@ -615,8 +1216,13 @@ var Observa = class {
|
|
|
615
1216
|
finish_reason: trace.finishReason || null,
|
|
616
1217
|
response_id: trace.responseId || null,
|
|
617
1218
|
system_fingerprint: trace.systemFingerprint || null,
|
|
618
|
-
cost: null
|
|
1219
|
+
cost: null,
|
|
619
1220
|
// Cost calculation handled by backend
|
|
1221
|
+
// TIER 1: OTEL Semantic Conventions (auto-inferred)
|
|
1222
|
+
operation_name: "chat",
|
|
1223
|
+
// Default for legacy track() method
|
|
1224
|
+
provider_name: providerName
|
|
1225
|
+
// Other OTEL fields can be added via trackLLMCall() method
|
|
620
1226
|
}
|
|
621
1227
|
}
|
|
622
1228
|
});
|
|
@@ -708,6 +1314,56 @@ var Observa = class {
|
|
|
708
1314
|
}
|
|
709
1315
|
await this.flush();
|
|
710
1316
|
}
|
|
1317
|
+
/**
|
|
1318
|
+
* Observe OpenAI client - wraps client with automatic tracing
|
|
1319
|
+
*
|
|
1320
|
+
* @param client - OpenAI client instance
|
|
1321
|
+
* @param options - Observation options (name, tags, userId, sessionId, redact)
|
|
1322
|
+
* @returns Wrapped OpenAI client
|
|
1323
|
+
*
|
|
1324
|
+
* @example
|
|
1325
|
+
* ```typescript
|
|
1326
|
+
* import OpenAI from 'openai';
|
|
1327
|
+
* const openai = new OpenAI({ apiKey: '...' });
|
|
1328
|
+
* const wrapped = observa.observeOpenAI(openai, {
|
|
1329
|
+
* name: 'my-app',
|
|
1330
|
+
* redact: (data) => ({ ...data, messages: '[REDACTED]' })
|
|
1331
|
+
* });
|
|
1332
|
+
* ```
|
|
1333
|
+
*/
|
|
1334
|
+
observeOpenAI(client, options) {
|
|
1335
|
+
try {
|
|
1336
|
+
return observeOpenAI(client, { ...options, observa: this });
|
|
1337
|
+
} catch (error) {
|
|
1338
|
+
console.error("[Observa] Failed to load OpenAI wrapper:", error);
|
|
1339
|
+
return client;
|
|
1340
|
+
}
|
|
1341
|
+
}
|
|
1342
|
+
/**
|
|
1343
|
+
* Observe Anthropic client - wraps client with automatic tracing
|
|
1344
|
+
*
|
|
1345
|
+
* @param client - Anthropic client instance
|
|
1346
|
+
* @param options - Observation options (name, tags, userId, sessionId, redact)
|
|
1347
|
+
* @returns Wrapped Anthropic client
|
|
1348
|
+
*
|
|
1349
|
+
* @example
|
|
1350
|
+
* ```typescript
|
|
1351
|
+
* import Anthropic from '@anthropic-ai/sdk';
|
|
1352
|
+
* const anthropic = new Anthropic({ apiKey: '...' });
|
|
1353
|
+
* const wrapped = observa.observeAnthropic(anthropic, {
|
|
1354
|
+
* name: 'my-app',
|
|
1355
|
+
* redact: (data) => ({ ...data, messages: '[REDACTED]' })
|
|
1356
|
+
* });
|
|
1357
|
+
* ```
|
|
1358
|
+
*/
|
|
1359
|
+
observeAnthropic(client, options) {
|
|
1360
|
+
try {
|
|
1361
|
+
return observeAnthropic(client, { ...options, observa: this });
|
|
1362
|
+
} catch (error) {
|
|
1363
|
+
console.error("[Observa] Failed to load Anthropic wrapper:", error);
|
|
1364
|
+
return client;
|
|
1365
|
+
}
|
|
1366
|
+
}
|
|
711
1367
|
async track(event, action, options) {
|
|
712
1368
|
if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
|
|
713
1369
|
return action();
|