observa-sdk 0.0.17 → 0.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -194,6 +194,96 @@ function getTraceContext() {
194
194
  }
195
195
  }
196
196
 
197
+ // src/instrumentation/error-utils.ts
198
+ function extractErrorCode(error, provider) {
199
+ if (!error) return null;
200
+ if (provider === "openai" || provider === "vercel-ai") {
201
+ if (error.code) return error.code;
202
+ if (error.type) return error.type;
203
+ if (error.status === 401) return "invalid_api_key";
204
+ if (error.status === 429) return "rate_limit_exceeded";
205
+ if (error.status === 400) return "invalid_request";
206
+ if (error.status === 403) return "insufficient_quota";
207
+ if (error.status === 404) return "model_not_found";
208
+ if (error.status === 500) return "internal_server_error";
209
+ if (error.status === 502) return "bad_gateway";
210
+ if (error.status === 503) return "service_unavailable";
211
+ }
212
+ if (provider === "anthropic") {
213
+ if (error.error?.type) return error.error.type;
214
+ if (error.status === 401) return "invalid_api_key";
215
+ if (error.status === 429) return "rate_limit_error";
216
+ if (error.status === 400) return "invalid_request";
217
+ if (error.status === 500) return "internal_server_error";
218
+ }
219
+ if (error.status) {
220
+ if (error.status === 401) return "unauthorized";
221
+ if (error.status === 403) return "forbidden";
222
+ if (error.status === 404) return "not_found";
223
+ if (error.status === 429) return "rate_limit_exceeded";
224
+ if (error.status >= 500) return "server_error";
225
+ }
226
+ const message = String(error.message || error);
227
+ if (message.toLowerCase().includes("timeout")) return "timeout_error";
228
+ if (message.toLowerCase().includes("network")) return "network_error";
229
+ if (message.toLowerCase().includes("connection")) return "connection_error";
230
+ return null;
231
+ }
232
+ function categorizeError(error, provider) {
233
+ if (!error) return "unknown_error";
234
+ const statusCode = error.status || error.statusCode || error.status_code;
235
+ const errorCode = extractErrorCode(error, provider);
236
+ const message = String(error.message || error.error?.message || error || "").toLowerCase();
237
+ if (statusCode === 401 || statusCode === 403 || errorCode === "invalid_api_key" || errorCode === "unauthorized" || errorCode === "forbidden" || message.includes("authentication") || message.includes("unauthorized") || message.includes("invalid api key") || message.includes("invalid api_token")) {
238
+ return "authentication_error";
239
+ }
240
+ if (statusCode === 429 || errorCode === "rate_limit_exceeded" || errorCode === "rate_limit_error" || message.includes("rate limit") || message.includes("too many requests")) {
241
+ return "rate_limit_error";
242
+ }
243
+ if (statusCode === 400 || errorCode === "invalid_request" || message.includes("validation") || message.includes("invalid") || message.includes("bad request")) {
244
+ return "validation_error";
245
+ }
246
+ if (errorCode === "timeout_error" || message.includes("timeout") || message.includes("timed out") || message.includes("request timeout")) {
247
+ return "timeout_error";
248
+ }
249
+ if (errorCode === "network_error" || errorCode === "connection_error" || message.includes("network") || message.includes("connection") || message.includes("econn") || message.includes("enotfound") || message.includes("eai_again")) {
250
+ return "network_error";
251
+ }
252
+ if (statusCode >= 500 || errorCode === "internal_server_error" || errorCode === "server_error" || errorCode === "bad_gateway" || errorCode === "service_unavailable" || message.includes("server error") || message.includes("internal error")) {
253
+ return "server_error";
254
+ }
255
+ if (errorCode === "insufficient_quota" || message.includes("quota") || message.includes("billing") || message.includes("credits")) {
256
+ return "quota_error";
257
+ }
258
+ if (statusCode === 404 || errorCode === "model_not_found" || errorCode === "not_found" || message.includes("model not found") || message.includes("model unavailable")) {
259
+ return "model_error";
260
+ }
261
+ return "unknown_error";
262
+ }
263
+ function extractProviderError(error, provider) {
264
+ const code = extractErrorCode(error, provider) || "unknown_error";
265
+ const category = categorizeError(error, provider);
266
+ const statusCode = error.status || error.statusCode || error.status_code;
267
+ let message = "An unknown error occurred";
268
+ if (error.message) {
269
+ message = error.message;
270
+ } else if (error.error?.message) {
271
+ message = error.error.message;
272
+ } else if (error.response?.data?.error?.message) {
273
+ message = error.response.data.error.message;
274
+ } else if (typeof error === "string") {
275
+ message = error;
276
+ } else if (error.toString && error.toString() !== "[object Object]") {
277
+ message = error.toString();
278
+ }
279
+ return {
280
+ code,
281
+ category,
282
+ message,
283
+ statusCode
284
+ };
285
+ }
286
+
197
287
  // src/instrumentation/openai.ts
198
288
  var proxyCache = /* @__PURE__ */ new WeakMap();
199
289
  function observeOpenAI(client, options) {
@@ -229,6 +319,9 @@ async function traceOpenAICall(originalFn, args, options) {
229
319
  const startTime = Date.now();
230
320
  const requestParams = args[0] || {};
231
321
  const isStreaming = requestParams.stream === true;
322
+ const inputText = requestParams.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
323
+ const inputMessages = requestParams.messages || null;
324
+ const model = requestParams.model || "unknown";
232
325
  try {
233
326
  const result = await originalFn(...args);
234
327
  if (isStreaming) {
@@ -244,7 +337,15 @@ async function traceOpenAICall(originalFn, args, options) {
244
337
  fullResponse.streamingDuration
245
338
  );
246
339
  },
247
- (err) => recordError(requestParams, err, startTime, options),
340
+ (err) => recordError(
341
+ requestParams,
342
+ err,
343
+ startTime,
344
+ options,
345
+ inputText,
346
+ inputMessages,
347
+ model
348
+ ),
248
349
  "openai"
249
350
  );
250
351
  } else {
@@ -252,7 +353,15 @@ async function traceOpenAICall(originalFn, args, options) {
252
353
  return result;
253
354
  }
254
355
  } catch (error) {
255
- recordError(requestParams, error, startTime, options);
356
+ recordError(
357
+ requestParams,
358
+ error,
359
+ startTime,
360
+ options,
361
+ inputText,
362
+ inputMessages,
363
+ model
364
+ );
256
365
  throw error;
257
366
  }
258
367
  }
@@ -291,20 +400,26 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
291
400
  console.error("[Observa] Failed to record trace", e);
292
401
  }
293
402
  }
294
- function recordError(req, error, start, opts) {
403
+ function recordError(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
295
404
  const duration = Date.now() - start;
296
405
  try {
297
406
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
298
407
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
299
408
  if (opts?.observa) {
300
- const model = sanitizedReq.model || "unknown";
301
- const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
409
+ const model = preExtractedModel || sanitizedReq.model || "unknown";
410
+ let inputText = preExtractedInputText || null;
411
+ let inputMessages = preExtractedInputMessages || null;
412
+ if (!inputText) {
413
+ inputMessages = sanitizedReq.messages || null;
414
+ inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
415
+ }
416
+ const extractedError = extractProviderError(error, "openai");
302
417
  opts.observa.trackLLMCall({
303
418
  model,
304
419
  input: inputText,
305
420
  output: null,
306
421
  // No output on error
307
- inputMessages: sanitizedReq.messages || null,
422
+ inputMessages,
308
423
  outputMessages: null,
309
424
  inputTokens: null,
310
425
  outputTokens: null,
@@ -321,17 +436,19 @@ function recordError(req, error, start, opts) {
321
436
  maxTokens: sanitizedReq.max_tokens || null
322
437
  });
323
438
  opts.observa.trackError({
324
- errorType: "openai_api_error",
325
- errorMessage: error?.message || String(error),
439
+ errorType: error?.name || extractedError.code || "openai_api_error",
440
+ errorMessage: extractedError.message,
326
441
  stackTrace: error?.stack || null,
327
442
  context: {
328
443
  request: sanitizedReq,
329
444
  model,
330
445
  input: inputText,
331
446
  provider: "openai",
332
- duration_ms: duration
447
+ duration_ms: duration,
448
+ status_code: extractedError.statusCode || null
333
449
  },
334
- errorCategory: "llm_error"
450
+ errorCategory: extractedError.category,
451
+ errorCode: extractedError.code
335
452
  });
336
453
  }
337
454
  } catch (e) {
@@ -374,6 +491,15 @@ async function traceAnthropicCall(originalFn, args, options) {
374
491
  const startTime = Date.now();
375
492
  const requestParams = args[0] || {};
376
493
  const isStreaming = requestParams.stream === true;
494
+ const inputText = requestParams.messages?.map((m) => {
495
+ if (typeof m.content === "string") return m.content;
496
+ if (Array.isArray(m.content)) {
497
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
498
+ }
499
+ return null;
500
+ }).filter(Boolean).join("\n") || null;
501
+ const inputMessages = requestParams.messages || null;
502
+ const model = requestParams.model || "unknown";
377
503
  try {
378
504
  const result = await originalFn(...args);
379
505
  if (isStreaming) {
@@ -389,7 +515,15 @@ async function traceAnthropicCall(originalFn, args, options) {
389
515
  fullResponse.streamingDuration
390
516
  );
391
517
  },
392
- (err) => recordError2(requestParams, err, startTime, options),
518
+ (err) => recordError2(
519
+ requestParams,
520
+ err,
521
+ startTime,
522
+ options,
523
+ inputText,
524
+ inputMessages,
525
+ model
526
+ ),
393
527
  "anthropic"
394
528
  );
395
529
  } else {
@@ -397,7 +531,15 @@ async function traceAnthropicCall(originalFn, args, options) {
397
531
  return result;
398
532
  }
399
533
  } catch (error) {
400
- recordError2(requestParams, error, startTime, options);
534
+ recordError2(
535
+ requestParams,
536
+ error,
537
+ startTime,
538
+ options,
539
+ inputText,
540
+ inputMessages,
541
+ model
542
+ );
401
543
  throw error;
402
544
  }
403
545
  }
@@ -441,26 +583,32 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
441
583
  console.error("[Observa] Failed to record trace", e);
442
584
  }
443
585
  }
444
- function recordError2(req, error, start, opts) {
586
+ function recordError2(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
445
587
  const duration = Date.now() - start;
446
588
  try {
447
589
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
448
590
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
449
591
  if (opts?.observa) {
450
- const model = sanitizedReq.model || "unknown";
451
- const inputText = sanitizedReq.messages?.map((m) => {
452
- if (typeof m.content === "string") return m.content;
453
- if (Array.isArray(m.content)) {
454
- return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
455
- }
456
- return null;
457
- }).filter(Boolean).join("\n") || null;
592
+ const model = preExtractedModel || sanitizedReq.model || "unknown";
593
+ let inputText = preExtractedInputText || null;
594
+ let inputMessages = preExtractedInputMessages || null;
595
+ if (!inputText) {
596
+ inputMessages = sanitizedReq.messages || null;
597
+ inputText = sanitizedReq.messages?.map((m) => {
598
+ if (typeof m.content === "string") return m.content;
599
+ if (Array.isArray(m.content)) {
600
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
601
+ }
602
+ return null;
603
+ }).filter(Boolean).join("\n") || null;
604
+ }
605
+ const extractedError = extractProviderError(error, "anthropic");
458
606
  opts.observa.trackLLMCall({
459
607
  model,
460
608
  input: inputText,
461
609
  output: null,
462
610
  // No output on error
463
- inputMessages: sanitizedReq.messages || null,
611
+ inputMessages,
464
612
  outputMessages: null,
465
613
  inputTokens: null,
466
614
  outputTokens: null,
@@ -477,17 +625,19 @@ function recordError2(req, error, start, opts) {
477
625
  maxTokens: sanitizedReq.max_tokens || null
478
626
  });
479
627
  opts.observa.trackError({
480
- errorType: "anthropic_api_error",
481
- errorMessage: error?.message || String(error),
628
+ errorType: error?.name || extractedError.code || "anthropic_api_error",
629
+ errorMessage: extractedError.message,
482
630
  stackTrace: error?.stack || null,
483
631
  context: {
484
632
  request: sanitizedReq,
485
633
  model,
486
634
  input: inputText,
487
635
  provider: "anthropic",
488
- duration_ms: duration
636
+ duration_ms: duration,
637
+ status_code: extractedError.statusCode || null
489
638
  },
490
- errorCategory: "llm_error"
639
+ errorCategory: extractedError.category,
640
+ errorCode: extractedError.code
491
641
  });
492
642
  }
493
643
  } catch (e) {
@@ -566,6 +716,14 @@ async function traceGenerateText(originalFn, args, options) {
566
716
  const model = requestParams.model || "unknown";
567
717
  const provider = extractProviderFromModel(model);
568
718
  const modelIdentifier = extractModelIdentifier(model);
719
+ let inputText = null;
720
+ let inputMessages = null;
721
+ if (requestParams.prompt) {
722
+ inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
723
+ } else if (requestParams.messages) {
724
+ inputMessages = requestParams.messages;
725
+ inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
726
+ }
569
727
  try {
570
728
  const result = await originalFn(...args);
571
729
  const responseText = result.text || "";
@@ -606,7 +764,9 @@ async function traceGenerateText(originalFn, args, options) {
606
764
  error,
607
765
  startTime,
608
766
  options,
609
- provider
767
+ provider,
768
+ inputText,
769
+ inputMessages
610
770
  );
611
771
  throw error;
612
772
  }
@@ -664,6 +824,14 @@ async function traceStreamText(originalFn, args, options) {
664
824
  const model = requestParams.model || "unknown";
665
825
  const provider = extractProviderFromModel(model);
666
826
  const modelIdentifier = extractModelIdentifier(model);
827
+ let inputText = null;
828
+ let inputMessages = null;
829
+ if (requestParams.prompt) {
830
+ inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
831
+ } else if (requestParams.messages) {
832
+ inputMessages = requestParams.messages;
833
+ inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
834
+ }
667
835
  try {
668
836
  const result = await originalFn(...args);
669
837
  if (result.textStream) {
@@ -698,7 +866,9 @@ async function traceStreamText(originalFn, args, options) {
698
866
  err,
699
867
  startTime,
700
868
  options,
701
- provider
869
+ provider,
870
+ inputText,
871
+ inputMessages
702
872
  )
703
873
  );
704
874
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -738,7 +908,9 @@ async function traceStreamText(originalFn, args, options) {
738
908
  error,
739
909
  startTime,
740
910
  options,
741
- provider
911
+ provider,
912
+ inputText,
913
+ inputMessages
742
914
  );
743
915
  throw error;
744
916
  }
@@ -785,21 +957,25 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
785
957
  console.error("[Observa] Failed to record trace", e);
786
958
  }
787
959
  }
788
- function recordError3(req, error, start, opts, provider) {
960
+ function recordError3(req, error, start, opts, provider, preExtractedInputText, preExtractedInputMessages) {
789
961
  const duration = Date.now() - start;
790
962
  try {
791
- console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
963
+ console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
792
964
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
793
965
  if (opts?.observa) {
794
966
  const model = sanitizedReq.model || "unknown";
795
- let inputText = null;
796
- let inputMessages = null;
797
- if (sanitizedReq.prompt) {
798
- inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
799
- } else if (sanitizedReq.messages) {
800
- inputMessages = sanitizedReq.messages;
801
- inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
967
+ let inputText = preExtractedInputText || null;
968
+ let inputMessages = preExtractedInputMessages || null;
969
+ if (!inputText) {
970
+ if (sanitizedReq.prompt) {
971
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
972
+ } else if (sanitizedReq.messages) {
973
+ inputMessages = sanitizedReq.messages;
974
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
975
+ }
802
976
  }
977
+ const providerName = provider || "vercel-ai";
978
+ const extractedError = extractProviderError(error, providerName);
803
979
  opts.observa.trackLLMCall({
804
980
  model,
805
981
  input: inputText,
@@ -816,23 +992,25 @@ function recordError3(req, error, start, opts, provider) {
816
992
  finishReason: null,
817
993
  responseId: null,
818
994
  operationName: "generate_text",
819
- providerName: provider || "vercel-ai",
995
+ providerName,
820
996
  responseModel: model,
821
997
  temperature: sanitizedReq.temperature || null,
822
998
  maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
823
999
  });
824
1000
  opts.observa.trackError({
825
- errorType: error.name || "UnknownError",
826
- errorMessage: error.message || "An unknown error occurred",
827
- stackTrace: error.stack,
1001
+ errorType: error?.name || extractedError.code || "UnknownError",
1002
+ errorMessage: extractedError.message,
1003
+ stackTrace: error?.stack || null,
828
1004
  context: {
829
1005
  request: sanitizedReq,
830
1006
  model,
831
1007
  input: inputText,
832
- provider: provider || "vercel-ai",
833
- duration_ms: duration
1008
+ provider: providerName,
1009
+ duration_ms: duration,
1010
+ status_code: extractedError.statusCode || null
834
1011
  },
835
- errorCategory: "llm_error"
1012
+ errorCategory: extractedError.category,
1013
+ errorCode: extractedError.code
836
1014
  });
837
1015
  }
838
1016
  } catch (e) {
@@ -1104,6 +1282,10 @@ var Observa = class {
1104
1282
  spanStack = [];
1105
1283
  // Stack for tracking parent-child relationships
1106
1284
  traceStartTime = null;
1285
+ // Track traces with errors (for automatic trace_end generation when using instrumentation)
1286
+ tracesWithErrors = /* @__PURE__ */ new Set();
1287
+ // Track root span IDs for traces (for automatic trace_end generation)
1288
+ traceRootSpanIds = /* @__PURE__ */ new Map();
1107
1289
  constructor(config) {
1108
1290
  this.apiKey = config.apiKey;
1109
1291
  let apiUrlEnv;
@@ -1194,9 +1376,13 @@ var Observa = class {
1194
1376
  addEvent(eventData) {
1195
1377
  const baseProps = this.createBaseEventProperties();
1196
1378
  const parentSpanId = this.spanStack.length > 0 ? this.spanStack[this.spanStack.length - 1] : null;
1379
+ const spanId = eventData.span_id || crypto.randomUUID();
1380
+ if (!this.currentTraceId && !this.traceRootSpanIds.has(baseProps.trace_id)) {
1381
+ this.traceRootSpanIds.set(baseProps.trace_id, spanId);
1382
+ }
1197
1383
  const event = {
1198
1384
  ...baseProps,
1199
- span_id: eventData.span_id || crypto.randomUUID(),
1385
+ span_id: spanId,
1200
1386
  parent_span_id: (eventData.parent_span_id !== void 0 ? eventData.parent_span_id : parentSpanId) ?? null,
1201
1387
  timestamp: eventData.timestamp || (/* @__PURE__ */ new Date()).toISOString(),
1202
1388
  event_type: eventData.event_type,
@@ -1381,6 +1567,8 @@ var Observa = class {
1381
1567
  if (!stackTrace && options.error instanceof Error && options.error.stack) {
1382
1568
  stackTrace = options.error.stack;
1383
1569
  }
1570
+ const baseProps = this.createBaseEventProperties();
1571
+ this.tracesWithErrors.add(baseProps.trace_id);
1384
1572
  this.addEvent({
1385
1573
  event_type: "error",
1386
1574
  span_id: spanId,
@@ -1748,6 +1936,65 @@ var Observa = class {
1748
1936
  eventsByTrace.get(event.trace_id).push(event);
1749
1937
  }
1750
1938
  for (const [traceId, events] of eventsByTrace.entries()) {
1939
+ const hasTraceStart = events.some((e) => e.event_type === "trace_start");
1940
+ const hasTraceEnd = events.some((e) => e.event_type === "trace_end");
1941
+ const hasError = this.tracesWithErrors.has(traceId);
1942
+ const rootSpanId = this.traceRootSpanIds.get(traceId) || events[0]?.span_id || crypto.randomUUID();
1943
+ const firstEvent = events[0];
1944
+ if (!firstEvent) continue;
1945
+ if (!hasTraceStart) {
1946
+ const traceStartEvent = {
1947
+ tenant_id: firstEvent.tenant_id,
1948
+ project_id: firstEvent.project_id,
1949
+ environment: firstEvent.environment,
1950
+ trace_id: traceId,
1951
+ span_id: rootSpanId,
1952
+ parent_span_id: null,
1953
+ timestamp: firstEvent.timestamp,
1954
+ event_type: "trace_start",
1955
+ attributes: {
1956
+ trace_start: {
1957
+ name: null,
1958
+ metadata: null
1959
+ }
1960
+ }
1961
+ };
1962
+ events.unshift(traceStartEvent);
1963
+ }
1964
+ if (!hasTraceEnd) {
1965
+ const llmEvents = events.filter((e) => e.event_type === "llm_call");
1966
+ const totalTokens = llmEvents.reduce(
1967
+ (sum, e) => sum + (e.attributes.llm_call?.total_tokens || 0),
1968
+ 0
1969
+ );
1970
+ const totalCost = llmEvents.reduce(
1971
+ (sum, e) => sum + (e.attributes.llm_call?.cost || 0),
1972
+ 0
1973
+ );
1974
+ const timestamps = events.map((e) => new Date(e.timestamp).getTime()).filter(Boolean);
1975
+ const totalLatency = timestamps.length > 0 ? Math.max(...timestamps) - Math.min(...timestamps) : null;
1976
+ const traceEndEvent = {
1977
+ tenant_id: firstEvent.tenant_id,
1978
+ project_id: firstEvent.project_id,
1979
+ environment: firstEvent.environment,
1980
+ trace_id: traceId,
1981
+ span_id: rootSpanId,
1982
+ parent_span_id: null,
1983
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1984
+ event_type: "trace_end",
1985
+ attributes: {
1986
+ trace_end: {
1987
+ total_latency_ms: totalLatency,
1988
+ total_tokens: totalTokens || null,
1989
+ total_cost: totalCost || null,
1990
+ outcome: hasError ? "error" : "success"
1991
+ }
1992
+ }
1993
+ };
1994
+ events.push(traceEndEvent);
1995
+ }
1996
+ this.tracesWithErrors.delete(traceId);
1997
+ this.traceRootSpanIds.delete(traceId);
1751
1998
  await this._sendEventsWithRetry(events);
1752
1999
  }
1753
2000
  }
@@ -1789,11 +2036,11 @@ var Observa = class {
1789
2036
  }
1790
2037
  /**
1791
2038
  * Observe OpenAI client - wraps client with automatic tracing
1792
- *
2039
+ *
1793
2040
  * @param client - OpenAI client instance
1794
2041
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1795
2042
  * @returns Wrapped OpenAI client
1796
- *
2043
+ *
1797
2044
  * @example
1798
2045
  * ```typescript
1799
2046
  * import OpenAI from 'openai';
@@ -1814,11 +2061,11 @@ var Observa = class {
1814
2061
  }
1815
2062
  /**
1816
2063
  * Observe Anthropic client - wraps client with automatic tracing
1817
- *
2064
+ *
1818
2065
  * @param client - Anthropic client instance
1819
2066
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1820
2067
  * @returns Wrapped Anthropic client
1821
- *
2068
+ *
1822
2069
  * @example
1823
2070
  * ```typescript
1824
2071
  * import Anthropic from '@anthropic-ai/sdk';
@@ -1839,21 +2086,21 @@ var Observa = class {
1839
2086
  }
1840
2087
  /**
1841
2088
  * Observe Vercel AI SDK - wraps generateText and streamText functions
1842
- *
2089
+ *
1843
2090
  * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1844
2091
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1845
2092
  * @returns Wrapped AI SDK with automatic tracing
1846
- *
2093
+ *
1847
2094
  * @example
1848
2095
  * ```typescript
1849
2096
  * import { generateText, streamText } from 'ai';
1850
2097
  * const observa = init({ apiKey: '...' });
1851
- *
2098
+ *
1852
2099
  * const ai = observa.observeVercelAI({ generateText, streamText }, {
1853
2100
  * name: 'my-app',
1854
2101
  * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1855
2102
  * });
1856
- *
2103
+ *
1857
2104
  * // Use wrapped functions - automatically tracked!
1858
2105
  * const result = await ai.generateText({
1859
2106
  * model: 'openai/gpt-4',
package/dist/index.d.cts CHANGED
@@ -38,6 +38,8 @@ declare class Observa {
38
38
  private rootSpanId;
39
39
  private spanStack;
40
40
  private traceStartTime;
41
+ private tracesWithErrors;
42
+ private traceRootSpanIds;
41
43
  constructor(config: ObservaInitConfig);
42
44
  /**
43
45
  * Flush buffered events to the API
package/dist/index.d.ts CHANGED
@@ -38,6 +38,8 @@ declare class Observa {
38
38
  private rootSpanId;
39
39
  private spanStack;
40
40
  private traceStartTime;
41
+ private tracesWithErrors;
42
+ private traceRootSpanIds;
41
43
  constructor(config: ObservaInitConfig);
42
44
  /**
43
45
  * Flush buffered events to the API
package/dist/index.js CHANGED
@@ -174,6 +174,96 @@ function getTraceContext() {
174
174
  }
175
175
  }
176
176
 
177
+ // src/instrumentation/error-utils.ts
178
+ function extractErrorCode(error, provider) {
179
+ if (!error) return null;
180
+ if (provider === "openai" || provider === "vercel-ai") {
181
+ if (error.code) return error.code;
182
+ if (error.type) return error.type;
183
+ if (error.status === 401) return "invalid_api_key";
184
+ if (error.status === 429) return "rate_limit_exceeded";
185
+ if (error.status === 400) return "invalid_request";
186
+ if (error.status === 403) return "insufficient_quota";
187
+ if (error.status === 404) return "model_not_found";
188
+ if (error.status === 500) return "internal_server_error";
189
+ if (error.status === 502) return "bad_gateway";
190
+ if (error.status === 503) return "service_unavailable";
191
+ }
192
+ if (provider === "anthropic") {
193
+ if (error.error?.type) return error.error.type;
194
+ if (error.status === 401) return "invalid_api_key";
195
+ if (error.status === 429) return "rate_limit_error";
196
+ if (error.status === 400) return "invalid_request";
197
+ if (error.status === 500) return "internal_server_error";
198
+ }
199
+ if (error.status) {
200
+ if (error.status === 401) return "unauthorized";
201
+ if (error.status === 403) return "forbidden";
202
+ if (error.status === 404) return "not_found";
203
+ if (error.status === 429) return "rate_limit_exceeded";
204
+ if (error.status >= 500) return "server_error";
205
+ }
206
+ const message = String(error.message || error);
207
+ if (message.toLowerCase().includes("timeout")) return "timeout_error";
208
+ if (message.toLowerCase().includes("network")) return "network_error";
209
+ if (message.toLowerCase().includes("connection")) return "connection_error";
210
+ return null;
211
+ }
212
+ function categorizeError(error, provider) {
213
+ if (!error) return "unknown_error";
214
+ const statusCode = error.status || error.statusCode || error.status_code;
215
+ const errorCode = extractErrorCode(error, provider);
216
+ const message = String(error.message || error.error?.message || error || "").toLowerCase();
217
+ if (statusCode === 401 || statusCode === 403 || errorCode === "invalid_api_key" || errorCode === "unauthorized" || errorCode === "forbidden" || message.includes("authentication") || message.includes("unauthorized") || message.includes("invalid api key") || message.includes("invalid api_token")) {
218
+ return "authentication_error";
219
+ }
220
+ if (statusCode === 429 || errorCode === "rate_limit_exceeded" || errorCode === "rate_limit_error" || message.includes("rate limit") || message.includes("too many requests")) {
221
+ return "rate_limit_error";
222
+ }
223
+ if (statusCode === 400 || errorCode === "invalid_request" || message.includes("validation") || message.includes("invalid") || message.includes("bad request")) {
224
+ return "validation_error";
225
+ }
226
+ if (errorCode === "timeout_error" || message.includes("timeout") || message.includes("timed out") || message.includes("request timeout")) {
227
+ return "timeout_error";
228
+ }
229
+ if (errorCode === "network_error" || errorCode === "connection_error" || message.includes("network") || message.includes("connection") || message.includes("econn") || message.includes("enotfound") || message.includes("eai_again")) {
230
+ return "network_error";
231
+ }
232
+ if (statusCode >= 500 || errorCode === "internal_server_error" || errorCode === "server_error" || errorCode === "bad_gateway" || errorCode === "service_unavailable" || message.includes("server error") || message.includes("internal error")) {
233
+ return "server_error";
234
+ }
235
+ if (errorCode === "insufficient_quota" || message.includes("quota") || message.includes("billing") || message.includes("credits")) {
236
+ return "quota_error";
237
+ }
238
+ if (statusCode === 404 || errorCode === "model_not_found" || errorCode === "not_found" || message.includes("model not found") || message.includes("model unavailable")) {
239
+ return "model_error";
240
+ }
241
+ return "unknown_error";
242
+ }
243
+ function extractProviderError(error, provider) {
244
+ const code = extractErrorCode(error, provider) || "unknown_error";
245
+ const category = categorizeError(error, provider);
246
+ const statusCode = error.status || error.statusCode || error.status_code;
247
+ let message = "An unknown error occurred";
248
+ if (error.message) {
249
+ message = error.message;
250
+ } else if (error.error?.message) {
251
+ message = error.error.message;
252
+ } else if (error.response?.data?.error?.message) {
253
+ message = error.response.data.error.message;
254
+ } else if (typeof error === "string") {
255
+ message = error;
256
+ } else if (error.toString && error.toString() !== "[object Object]") {
257
+ message = error.toString();
258
+ }
259
+ return {
260
+ code,
261
+ category,
262
+ message,
263
+ statusCode
264
+ };
265
+ }
266
+
177
267
  // src/instrumentation/openai.ts
178
268
  var proxyCache = /* @__PURE__ */ new WeakMap();
179
269
  function observeOpenAI(client, options) {
@@ -209,6 +299,9 @@ async function traceOpenAICall(originalFn, args, options) {
209
299
  const startTime = Date.now();
210
300
  const requestParams = args[0] || {};
211
301
  const isStreaming = requestParams.stream === true;
302
+ const inputText = requestParams.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
303
+ const inputMessages = requestParams.messages || null;
304
+ const model = requestParams.model || "unknown";
212
305
  try {
213
306
  const result = await originalFn(...args);
214
307
  if (isStreaming) {
@@ -224,7 +317,15 @@ async function traceOpenAICall(originalFn, args, options) {
224
317
  fullResponse.streamingDuration
225
318
  );
226
319
  },
227
- (err) => recordError(requestParams, err, startTime, options),
320
+ (err) => recordError(
321
+ requestParams,
322
+ err,
323
+ startTime,
324
+ options,
325
+ inputText,
326
+ inputMessages,
327
+ model
328
+ ),
228
329
  "openai"
229
330
  );
230
331
  } else {
@@ -232,7 +333,15 @@ async function traceOpenAICall(originalFn, args, options) {
232
333
  return result;
233
334
  }
234
335
  } catch (error) {
235
- recordError(requestParams, error, startTime, options);
336
+ recordError(
337
+ requestParams,
338
+ error,
339
+ startTime,
340
+ options,
341
+ inputText,
342
+ inputMessages,
343
+ model
344
+ );
236
345
  throw error;
237
346
  }
238
347
  }
@@ -271,20 +380,26 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
271
380
  console.error("[Observa] Failed to record trace", e);
272
381
  }
273
382
  }
274
- function recordError(req, error, start, opts) {
383
+ function recordError(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
275
384
  const duration = Date.now() - start;
276
385
  try {
277
386
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
278
387
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
279
388
  if (opts?.observa) {
280
- const model = sanitizedReq.model || "unknown";
281
- const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
389
+ const model = preExtractedModel || sanitizedReq.model || "unknown";
390
+ let inputText = preExtractedInputText || null;
391
+ let inputMessages = preExtractedInputMessages || null;
392
+ if (!inputText) {
393
+ inputMessages = sanitizedReq.messages || null;
394
+ inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
395
+ }
396
+ const extractedError = extractProviderError(error, "openai");
282
397
  opts.observa.trackLLMCall({
283
398
  model,
284
399
  input: inputText,
285
400
  output: null,
286
401
  // No output on error
287
- inputMessages: sanitizedReq.messages || null,
402
+ inputMessages,
288
403
  outputMessages: null,
289
404
  inputTokens: null,
290
405
  outputTokens: null,
@@ -301,17 +416,19 @@ function recordError(req, error, start, opts) {
301
416
  maxTokens: sanitizedReq.max_tokens || null
302
417
  });
303
418
  opts.observa.trackError({
304
- errorType: "openai_api_error",
305
- errorMessage: error?.message || String(error),
419
+ errorType: error?.name || extractedError.code || "openai_api_error",
420
+ errorMessage: extractedError.message,
306
421
  stackTrace: error?.stack || null,
307
422
  context: {
308
423
  request: sanitizedReq,
309
424
  model,
310
425
  input: inputText,
311
426
  provider: "openai",
312
- duration_ms: duration
427
+ duration_ms: duration,
428
+ status_code: extractedError.statusCode || null
313
429
  },
314
- errorCategory: "llm_error"
430
+ errorCategory: extractedError.category,
431
+ errorCode: extractedError.code
315
432
  });
316
433
  }
317
434
  } catch (e) {
@@ -354,6 +471,15 @@ async function traceAnthropicCall(originalFn, args, options) {
354
471
  const startTime = Date.now();
355
472
  const requestParams = args[0] || {};
356
473
  const isStreaming = requestParams.stream === true;
474
+ const inputText = requestParams.messages?.map((m) => {
475
+ if (typeof m.content === "string") return m.content;
476
+ if (Array.isArray(m.content)) {
477
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
478
+ }
479
+ return null;
480
+ }).filter(Boolean).join("\n") || null;
481
+ const inputMessages = requestParams.messages || null;
482
+ const model = requestParams.model || "unknown";
357
483
  try {
358
484
  const result = await originalFn(...args);
359
485
  if (isStreaming) {
@@ -369,7 +495,15 @@ async function traceAnthropicCall(originalFn, args, options) {
369
495
  fullResponse.streamingDuration
370
496
  );
371
497
  },
372
- (err) => recordError2(requestParams, err, startTime, options),
498
+ (err) => recordError2(
499
+ requestParams,
500
+ err,
501
+ startTime,
502
+ options,
503
+ inputText,
504
+ inputMessages,
505
+ model
506
+ ),
373
507
  "anthropic"
374
508
  );
375
509
  } else {
@@ -377,7 +511,15 @@ async function traceAnthropicCall(originalFn, args, options) {
377
511
  return result;
378
512
  }
379
513
  } catch (error) {
380
- recordError2(requestParams, error, startTime, options);
514
+ recordError2(
515
+ requestParams,
516
+ error,
517
+ startTime,
518
+ options,
519
+ inputText,
520
+ inputMessages,
521
+ model
522
+ );
381
523
  throw error;
382
524
  }
383
525
  }
@@ -421,26 +563,32 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
421
563
  console.error("[Observa] Failed to record trace", e);
422
564
  }
423
565
  }
424
- function recordError2(req, error, start, opts) {
566
+ function recordError2(req, error, start, opts, preExtractedInputText, preExtractedInputMessages, preExtractedModel) {
425
567
  const duration = Date.now() - start;
426
568
  try {
427
569
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
428
570
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
429
571
  if (opts?.observa) {
430
- const model = sanitizedReq.model || "unknown";
431
- const inputText = sanitizedReq.messages?.map((m) => {
432
- if (typeof m.content === "string") return m.content;
433
- if (Array.isArray(m.content)) {
434
- return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
435
- }
436
- return null;
437
- }).filter(Boolean).join("\n") || null;
572
+ const model = preExtractedModel || sanitizedReq.model || "unknown";
573
+ let inputText = preExtractedInputText || null;
574
+ let inputMessages = preExtractedInputMessages || null;
575
+ if (!inputText) {
576
+ inputMessages = sanitizedReq.messages || null;
577
+ inputText = sanitizedReq.messages?.map((m) => {
578
+ if (typeof m.content === "string") return m.content;
579
+ if (Array.isArray(m.content)) {
580
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
581
+ }
582
+ return null;
583
+ }).filter(Boolean).join("\n") || null;
584
+ }
585
+ const extractedError = extractProviderError(error, "anthropic");
438
586
  opts.observa.trackLLMCall({
439
587
  model,
440
588
  input: inputText,
441
589
  output: null,
442
590
  // No output on error
443
- inputMessages: sanitizedReq.messages || null,
591
+ inputMessages,
444
592
  outputMessages: null,
445
593
  inputTokens: null,
446
594
  outputTokens: null,
@@ -457,17 +605,19 @@ function recordError2(req, error, start, opts) {
457
605
  maxTokens: sanitizedReq.max_tokens || null
458
606
  });
459
607
  opts.observa.trackError({
460
- errorType: "anthropic_api_error",
461
- errorMessage: error?.message || String(error),
608
+ errorType: error?.name || extractedError.code || "anthropic_api_error",
609
+ errorMessage: extractedError.message,
462
610
  stackTrace: error?.stack || null,
463
611
  context: {
464
612
  request: sanitizedReq,
465
613
  model,
466
614
  input: inputText,
467
615
  provider: "anthropic",
468
- duration_ms: duration
616
+ duration_ms: duration,
617
+ status_code: extractedError.statusCode || null
469
618
  },
470
- errorCategory: "llm_error"
619
+ errorCategory: extractedError.category,
620
+ errorCode: extractedError.code
471
621
  });
472
622
  }
473
623
  } catch (e) {
@@ -546,6 +696,14 @@ async function traceGenerateText(originalFn, args, options) {
546
696
  const model = requestParams.model || "unknown";
547
697
  const provider = extractProviderFromModel(model);
548
698
  const modelIdentifier = extractModelIdentifier(model);
699
+ let inputText = null;
700
+ let inputMessages = null;
701
+ if (requestParams.prompt) {
702
+ inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
703
+ } else if (requestParams.messages) {
704
+ inputMessages = requestParams.messages;
705
+ inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
706
+ }
549
707
  try {
550
708
  const result = await originalFn(...args);
551
709
  const responseText = result.text || "";
@@ -586,7 +744,9 @@ async function traceGenerateText(originalFn, args, options) {
586
744
  error,
587
745
  startTime,
588
746
  options,
589
- provider
747
+ provider,
748
+ inputText,
749
+ inputMessages
590
750
  );
591
751
  throw error;
592
752
  }
@@ -644,6 +804,14 @@ async function traceStreamText(originalFn, args, options) {
644
804
  const model = requestParams.model || "unknown";
645
805
  const provider = extractProviderFromModel(model);
646
806
  const modelIdentifier = extractModelIdentifier(model);
807
+ let inputText = null;
808
+ let inputMessages = null;
809
+ if (requestParams.prompt) {
810
+ inputText = typeof requestParams.prompt === "string" ? requestParams.prompt : JSON.stringify(requestParams.prompt);
811
+ } else if (requestParams.messages) {
812
+ inputMessages = requestParams.messages;
813
+ inputText = requestParams.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
814
+ }
647
815
  try {
648
816
  const result = await originalFn(...args);
649
817
  if (result.textStream) {
@@ -678,7 +846,9 @@ async function traceStreamText(originalFn, args, options) {
678
846
  err,
679
847
  startTime,
680
848
  options,
681
- provider
849
+ provider,
850
+ inputText,
851
+ inputMessages
682
852
  )
683
853
  );
684
854
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -718,7 +888,9 @@ async function traceStreamText(originalFn, args, options) {
718
888
  error,
719
889
  startTime,
720
890
  options,
721
- provider
891
+ provider,
892
+ inputText,
893
+ inputMessages
722
894
  );
723
895
  throw error;
724
896
  }
@@ -765,21 +937,25 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
765
937
  console.error("[Observa] Failed to record trace", e);
766
938
  }
767
939
  }
768
- function recordError3(req, error, start, opts, provider) {
940
+ function recordError3(req, error, start, opts, provider, preExtractedInputText, preExtractedInputMessages) {
769
941
  const duration = Date.now() - start;
770
942
  try {
771
- console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
943
+ console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
772
944
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
773
945
  if (opts?.observa) {
774
946
  const model = sanitizedReq.model || "unknown";
775
- let inputText = null;
776
- let inputMessages = null;
777
- if (sanitizedReq.prompt) {
778
- inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
779
- } else if (sanitizedReq.messages) {
780
- inputMessages = sanitizedReq.messages;
781
- inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
947
+ let inputText = preExtractedInputText || null;
948
+ let inputMessages = preExtractedInputMessages || null;
949
+ if (!inputText) {
950
+ if (sanitizedReq.prompt) {
951
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
952
+ } else if (sanitizedReq.messages) {
953
+ inputMessages = sanitizedReq.messages;
954
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
955
+ }
782
956
  }
957
+ const providerName = provider || "vercel-ai";
958
+ const extractedError = extractProviderError(error, providerName);
783
959
  opts.observa.trackLLMCall({
784
960
  model,
785
961
  input: inputText,
@@ -796,23 +972,25 @@ function recordError3(req, error, start, opts, provider) {
796
972
  finishReason: null,
797
973
  responseId: null,
798
974
  operationName: "generate_text",
799
- providerName: provider || "vercel-ai",
975
+ providerName,
800
976
  responseModel: model,
801
977
  temperature: sanitizedReq.temperature || null,
802
978
  maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
803
979
  });
804
980
  opts.observa.trackError({
805
- errorType: error.name || "UnknownError",
806
- errorMessage: error.message || "An unknown error occurred",
807
- stackTrace: error.stack,
981
+ errorType: error?.name || extractedError.code || "UnknownError",
982
+ errorMessage: extractedError.message,
983
+ stackTrace: error?.stack || null,
808
984
  context: {
809
985
  request: sanitizedReq,
810
986
  model,
811
987
  input: inputText,
812
- provider: provider || "vercel-ai",
813
- duration_ms: duration
988
+ provider: providerName,
989
+ duration_ms: duration,
990
+ status_code: extractedError.statusCode || null
814
991
  },
815
- errorCategory: "llm_error"
992
+ errorCategory: extractedError.category,
993
+ errorCode: extractedError.code
816
994
  });
817
995
  }
818
996
  } catch (e) {
@@ -1084,6 +1262,10 @@ var Observa = class {
1084
1262
  spanStack = [];
1085
1263
  // Stack for tracking parent-child relationships
1086
1264
  traceStartTime = null;
1265
+ // Track traces with errors (for automatic trace_end generation when using instrumentation)
1266
+ tracesWithErrors = /* @__PURE__ */ new Set();
1267
+ // Track root span IDs for traces (for automatic trace_end generation)
1268
+ traceRootSpanIds = /* @__PURE__ */ new Map();
1087
1269
  constructor(config) {
1088
1270
  this.apiKey = config.apiKey;
1089
1271
  let apiUrlEnv;
@@ -1174,9 +1356,13 @@ var Observa = class {
1174
1356
  addEvent(eventData) {
1175
1357
  const baseProps = this.createBaseEventProperties();
1176
1358
  const parentSpanId = this.spanStack.length > 0 ? this.spanStack[this.spanStack.length - 1] : null;
1359
+ const spanId = eventData.span_id || crypto.randomUUID();
1360
+ if (!this.currentTraceId && !this.traceRootSpanIds.has(baseProps.trace_id)) {
1361
+ this.traceRootSpanIds.set(baseProps.trace_id, spanId);
1362
+ }
1177
1363
  const event = {
1178
1364
  ...baseProps,
1179
- span_id: eventData.span_id || crypto.randomUUID(),
1365
+ span_id: spanId,
1180
1366
  parent_span_id: (eventData.parent_span_id !== void 0 ? eventData.parent_span_id : parentSpanId) ?? null,
1181
1367
  timestamp: eventData.timestamp || (/* @__PURE__ */ new Date()).toISOString(),
1182
1368
  event_type: eventData.event_type,
@@ -1361,6 +1547,8 @@ var Observa = class {
1361
1547
  if (!stackTrace && options.error instanceof Error && options.error.stack) {
1362
1548
  stackTrace = options.error.stack;
1363
1549
  }
1550
+ const baseProps = this.createBaseEventProperties();
1551
+ this.tracesWithErrors.add(baseProps.trace_id);
1364
1552
  this.addEvent({
1365
1553
  event_type: "error",
1366
1554
  span_id: spanId,
@@ -1728,6 +1916,65 @@ var Observa = class {
1728
1916
  eventsByTrace.get(event.trace_id).push(event);
1729
1917
  }
1730
1918
  for (const [traceId, events] of eventsByTrace.entries()) {
1919
+ const hasTraceStart = events.some((e) => e.event_type === "trace_start");
1920
+ const hasTraceEnd = events.some((e) => e.event_type === "trace_end");
1921
+ const hasError = this.tracesWithErrors.has(traceId);
1922
+ const rootSpanId = this.traceRootSpanIds.get(traceId) || events[0]?.span_id || crypto.randomUUID();
1923
+ const firstEvent = events[0];
1924
+ if (!firstEvent) continue;
1925
+ if (!hasTraceStart) {
1926
+ const traceStartEvent = {
1927
+ tenant_id: firstEvent.tenant_id,
1928
+ project_id: firstEvent.project_id,
1929
+ environment: firstEvent.environment,
1930
+ trace_id: traceId,
1931
+ span_id: rootSpanId,
1932
+ parent_span_id: null,
1933
+ timestamp: firstEvent.timestamp,
1934
+ event_type: "trace_start",
1935
+ attributes: {
1936
+ trace_start: {
1937
+ name: null,
1938
+ metadata: null
1939
+ }
1940
+ }
1941
+ };
1942
+ events.unshift(traceStartEvent);
1943
+ }
1944
+ if (!hasTraceEnd) {
1945
+ const llmEvents = events.filter((e) => e.event_type === "llm_call");
1946
+ const totalTokens = llmEvents.reduce(
1947
+ (sum, e) => sum + (e.attributes.llm_call?.total_tokens || 0),
1948
+ 0
1949
+ );
1950
+ const totalCost = llmEvents.reduce(
1951
+ (sum, e) => sum + (e.attributes.llm_call?.cost || 0),
1952
+ 0
1953
+ );
1954
+ const timestamps = events.map((e) => new Date(e.timestamp).getTime()).filter(Boolean);
1955
+ const totalLatency = timestamps.length > 0 ? Math.max(...timestamps) - Math.min(...timestamps) : null;
1956
+ const traceEndEvent = {
1957
+ tenant_id: firstEvent.tenant_id,
1958
+ project_id: firstEvent.project_id,
1959
+ environment: firstEvent.environment,
1960
+ trace_id: traceId,
1961
+ span_id: rootSpanId,
1962
+ parent_span_id: null,
1963
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1964
+ event_type: "trace_end",
1965
+ attributes: {
1966
+ trace_end: {
1967
+ total_latency_ms: totalLatency,
1968
+ total_tokens: totalTokens || null,
1969
+ total_cost: totalCost || null,
1970
+ outcome: hasError ? "error" : "success"
1971
+ }
1972
+ }
1973
+ };
1974
+ events.push(traceEndEvent);
1975
+ }
1976
+ this.tracesWithErrors.delete(traceId);
1977
+ this.traceRootSpanIds.delete(traceId);
1731
1978
  await this._sendEventsWithRetry(events);
1732
1979
  }
1733
1980
  }
@@ -1769,11 +2016,11 @@ var Observa = class {
1769
2016
  }
1770
2017
  /**
1771
2018
  * Observe OpenAI client - wraps client with automatic tracing
1772
- *
2019
+ *
1773
2020
  * @param client - OpenAI client instance
1774
2021
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1775
2022
  * @returns Wrapped OpenAI client
1776
- *
2023
+ *
1777
2024
  * @example
1778
2025
  * ```typescript
1779
2026
  * import OpenAI from 'openai';
@@ -1794,11 +2041,11 @@ var Observa = class {
1794
2041
  }
1795
2042
  /**
1796
2043
  * Observe Anthropic client - wraps client with automatic tracing
1797
- *
2044
+ *
1798
2045
  * @param client - Anthropic client instance
1799
2046
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1800
2047
  * @returns Wrapped Anthropic client
1801
- *
2048
+ *
1802
2049
  * @example
1803
2050
  * ```typescript
1804
2051
  * import Anthropic from '@anthropic-ai/sdk';
@@ -1819,21 +2066,21 @@ var Observa = class {
1819
2066
  }
1820
2067
  /**
1821
2068
  * Observe Vercel AI SDK - wraps generateText and streamText functions
1822
- *
2069
+ *
1823
2070
  * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1824
2071
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1825
2072
  * @returns Wrapped AI SDK with automatic tracing
1826
- *
2073
+ *
1827
2074
  * @example
1828
2075
  * ```typescript
1829
2076
  * import { generateText, streamText } from 'ai';
1830
2077
  * const observa = init({ apiKey: '...' });
1831
- *
2078
+ *
1832
2079
  * const ai = observa.observeVercelAI({ generateText, streamText }, {
1833
2080
  * name: 'my-app',
1834
2081
  * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1835
2082
  * });
1836
- *
2083
+ *
1837
2084
  * // Use wrapped functions - automatically tracked!
1838
2085
  * const result = await ai.generateText({
1839
2086
  * model: 'openai/gpt-4',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.17",
3
+ "version": "0.0.19",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",