observa-sdk 0.0.16 → 0.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -292,18 +292,50 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
292
292
  }
293
293
  }
294
294
  function recordError(req, error, start, opts) {
295
+ const duration = Date.now() - start;
295
296
  try {
296
297
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
297
298
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
298
299
  if (opts?.observa) {
300
+ const model = sanitizedReq.model || "unknown";
301
+ const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
302
+ opts.observa.trackLLMCall({
303
+ model,
304
+ input: inputText,
305
+ output: null,
306
+ // No output on error
307
+ inputMessages: sanitizedReq.messages || null,
308
+ outputMessages: null,
309
+ inputTokens: null,
310
+ outputTokens: null,
311
+ totalTokens: null,
312
+ latencyMs: duration,
313
+ timeToFirstTokenMs: null,
314
+ streamingDurationMs: null,
315
+ finishReason: null,
316
+ responseId: null,
317
+ operationName: "chat",
318
+ providerName: "openai",
319
+ responseModel: model,
320
+ temperature: sanitizedReq.temperature || null,
321
+ maxTokens: sanitizedReq.max_tokens || null
322
+ });
299
323
  opts.observa.trackError({
300
324
  errorType: "openai_api_error",
301
325
  errorMessage: error?.message || String(error),
302
326
  stackTrace: error?.stack || null,
303
- context: { request: sanitizedReq }
327
+ context: {
328
+ request: sanitizedReq,
329
+ model,
330
+ input: inputText,
331
+ provider: "openai",
332
+ duration_ms: duration
333
+ },
334
+ errorCategory: "llm_error"
304
335
  });
305
336
  }
306
337
  } catch (e) {
338
+ console.error("[Observa] Failed to record error", e);
307
339
  }
308
340
  }
309
341
 
@@ -410,18 +442,56 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
410
442
  }
411
443
  }
412
444
  function recordError2(req, error, start, opts) {
445
+ const duration = Date.now() - start;
413
446
  try {
414
447
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
415
448
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
416
449
  if (opts?.observa) {
450
+ const model = sanitizedReq.model || "unknown";
451
+ const inputText = sanitizedReq.messages?.map((m) => {
452
+ if (typeof m.content === "string") return m.content;
453
+ if (Array.isArray(m.content)) {
454
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
455
+ }
456
+ return null;
457
+ }).filter(Boolean).join("\n") || null;
458
+ opts.observa.trackLLMCall({
459
+ model,
460
+ input: inputText,
461
+ output: null,
462
+ // No output on error
463
+ inputMessages: sanitizedReq.messages || null,
464
+ outputMessages: null,
465
+ inputTokens: null,
466
+ outputTokens: null,
467
+ totalTokens: null,
468
+ latencyMs: duration,
469
+ timeToFirstTokenMs: null,
470
+ streamingDurationMs: null,
471
+ finishReason: null,
472
+ responseId: null,
473
+ operationName: "chat",
474
+ providerName: "anthropic",
475
+ responseModel: model,
476
+ temperature: sanitizedReq.temperature || null,
477
+ maxTokens: sanitizedReq.max_tokens || null
478
+ });
417
479
  opts.observa.trackError({
418
480
  errorType: "anthropic_api_error",
419
481
  errorMessage: error?.message || String(error),
420
482
  stackTrace: error?.stack || null,
421
- context: { request: sanitizedReq }
483
+ context: {
484
+ request: sanitizedReq,
485
+ model,
486
+ input: inputText,
487
+ provider: "anthropic",
488
+ duration_ms: duration
489
+ },
490
+ errorCategory: "llm_error"
422
491
  });
423
492
  }
424
493
  } catch (e) {
494
+ console.error("[Observa] Failed to record error", e);
425
495
  }
426
496
  }
427
497
 
@@ -528,11 +598,15 @@ async function traceGenerateText(originalFn, args, options) {
528
598
  recordError3(
529
599
  {
530
600
  model: modelIdentifier,
531
- prompt: requestParams.prompt || requestParams.messages || null
601
+ prompt: requestParams.prompt || null,
602
+ messages: requestParams.messages || null,
603
+ temperature: requestParams.temperature || null,
604
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
532
605
  },
533
606
  error,
534
607
  startTime,
535
- options
608
+ options,
609
+ provider
536
610
  );
537
611
  throw error;
538
612
  }
@@ -616,11 +690,15 @@ async function traceStreamText(originalFn, args, options) {
616
690
  (err) => recordError3(
617
691
  {
618
692
  model: modelIdentifier,
619
- prompt: requestParams.prompt || requestParams.messages || null
693
+ prompt: requestParams.prompt || null,
694
+ messages: requestParams.messages || null,
695
+ temperature: requestParams.temperature || null,
696
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
620
697
  },
621
698
  err,
622
699
  startTime,
623
- options
700
+ options,
701
+ provider
624
702
  )
625
703
  );
626
704
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -652,11 +730,15 @@ async function traceStreamText(originalFn, args, options) {
652
730
  recordError3(
653
731
  {
654
732
  model: modelIdentifier,
655
- prompt: requestParams.prompt || requestParams.messages || null
733
+ prompt: requestParams.prompt || null,
734
+ messages: requestParams.messages || null,
735
+ temperature: requestParams.temperature || null,
736
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
656
737
  },
657
738
  error,
658
739
  startTime,
659
- options
740
+ options,
741
+ provider
660
742
  );
661
743
  throw error;
662
744
  }
@@ -703,20 +785,58 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
703
785
  console.error("[Observa] Failed to record trace", e);
704
786
  }
705
787
  }
706
- function recordError3(req, error, start, opts) {
788
+ function recordError3(req, error, start, opts, provider) {
789
+ const duration = Date.now() - start;
707
790
  try {
708
791
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
709
792
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
710
793
  if (opts?.observa) {
794
+ const model = sanitizedReq.model || "unknown";
795
+ let inputText = null;
796
+ let inputMessages = null;
797
+ if (sanitizedReq.prompt) {
798
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
799
+ } else if (sanitizedReq.messages) {
800
+ inputMessages = sanitizedReq.messages;
801
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
802
+ }
803
+ opts.observa.trackLLMCall({
804
+ model,
805
+ input: inputText,
806
+ output: null,
807
+ // No output on error
808
+ inputMessages,
809
+ outputMessages: null,
810
+ inputTokens: null,
811
+ outputTokens: null,
812
+ totalTokens: null,
813
+ latencyMs: duration,
814
+ timeToFirstTokenMs: null,
815
+ streamingDurationMs: null,
816
+ finishReason: null,
817
+ responseId: null,
818
+ operationName: "generate_text",
819
+ providerName: provider || "vercel-ai",
820
+ responseModel: model,
821
+ temperature: sanitizedReq.temperature || null,
822
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
823
+ });
711
824
  opts.observa.trackError({
712
825
  errorType: error.name || "UnknownError",
713
826
  errorMessage: error.message || "An unknown error occurred",
714
827
  stackTrace: error.stack,
715
- context: { request: sanitizedReq },
828
+ context: {
829
+ request: sanitizedReq,
830
+ model,
831
+ input: inputText,
832
+ provider: provider || "vercel-ai",
833
+ duration_ms: duration
834
+ },
716
835
  errorCategory: "llm_error"
717
836
  });
718
837
  }
719
838
  } catch (e) {
839
+ console.error("[Observa] Failed to record error", e);
720
840
  }
721
841
  }
722
842
  function observeVercelAI(aiSdk, options) {
@@ -984,6 +1104,10 @@ var Observa = class {
984
1104
  spanStack = [];
985
1105
  // Stack for tracking parent-child relationships
986
1106
  traceStartTime = null;
1107
+ // Track traces with errors (for automatic trace_end generation when using instrumentation)
1108
+ tracesWithErrors = /* @__PURE__ */ new Set();
1109
+ // Track root span IDs for traces (for automatic trace_end generation)
1110
+ traceRootSpanIds = /* @__PURE__ */ new Map();
987
1111
  constructor(config) {
988
1112
  this.apiKey = config.apiKey;
989
1113
  let apiUrlEnv;
@@ -1074,9 +1198,13 @@ var Observa = class {
1074
1198
  addEvent(eventData) {
1075
1199
  const baseProps = this.createBaseEventProperties();
1076
1200
  const parentSpanId = this.spanStack.length > 0 ? this.spanStack[this.spanStack.length - 1] : null;
1201
+ const spanId = eventData.span_id || crypto.randomUUID();
1202
+ if (!this.currentTraceId && !this.traceRootSpanIds.has(baseProps.trace_id)) {
1203
+ this.traceRootSpanIds.set(baseProps.trace_id, spanId);
1204
+ }
1077
1205
  const event = {
1078
1206
  ...baseProps,
1079
- span_id: eventData.span_id || crypto.randomUUID(),
1207
+ span_id: spanId,
1080
1208
  parent_span_id: (eventData.parent_span_id !== void 0 ? eventData.parent_span_id : parentSpanId) ?? null,
1081
1209
  timestamp: eventData.timestamp || (/* @__PURE__ */ new Date()).toISOString(),
1082
1210
  event_type: eventData.event_type,
@@ -1261,6 +1389,8 @@ var Observa = class {
1261
1389
  if (!stackTrace && options.error instanceof Error && options.error.stack) {
1262
1390
  stackTrace = options.error.stack;
1263
1391
  }
1392
+ const baseProps = this.createBaseEventProperties();
1393
+ this.tracesWithErrors.add(baseProps.trace_id);
1264
1394
  this.addEvent({
1265
1395
  event_type: "error",
1266
1396
  span_id: spanId,
@@ -1628,6 +1758,65 @@ var Observa = class {
1628
1758
  eventsByTrace.get(event.trace_id).push(event);
1629
1759
  }
1630
1760
  for (const [traceId, events] of eventsByTrace.entries()) {
1761
+ const hasTraceStart = events.some((e) => e.event_type === "trace_start");
1762
+ const hasTraceEnd = events.some((e) => e.event_type === "trace_end");
1763
+ const hasError = this.tracesWithErrors.has(traceId);
1764
+ const rootSpanId = this.traceRootSpanIds.get(traceId) || events[0]?.span_id || crypto.randomUUID();
1765
+ const firstEvent = events[0];
1766
+ if (!firstEvent) continue;
1767
+ if (!hasTraceStart) {
1768
+ const traceStartEvent = {
1769
+ tenant_id: firstEvent.tenant_id,
1770
+ project_id: firstEvent.project_id,
1771
+ environment: firstEvent.environment,
1772
+ trace_id: traceId,
1773
+ span_id: rootSpanId,
1774
+ parent_span_id: null,
1775
+ timestamp: firstEvent.timestamp,
1776
+ event_type: "trace_start",
1777
+ attributes: {
1778
+ trace_start: {
1779
+ name: null,
1780
+ metadata: null
1781
+ }
1782
+ }
1783
+ };
1784
+ events.unshift(traceStartEvent);
1785
+ }
1786
+ if (!hasTraceEnd) {
1787
+ const llmEvents = events.filter((e) => e.event_type === "llm_call");
1788
+ const totalTokens = llmEvents.reduce(
1789
+ (sum, e) => sum + (e.attributes.llm_call?.total_tokens || 0),
1790
+ 0
1791
+ );
1792
+ const totalCost = llmEvents.reduce(
1793
+ (sum, e) => sum + (e.attributes.llm_call?.cost || 0),
1794
+ 0
1795
+ );
1796
+ const timestamps = events.map((e) => new Date(e.timestamp).getTime()).filter(Boolean);
1797
+ const totalLatency = timestamps.length > 0 ? Math.max(...timestamps) - Math.min(...timestamps) : null;
1798
+ const traceEndEvent = {
1799
+ tenant_id: firstEvent.tenant_id,
1800
+ project_id: firstEvent.project_id,
1801
+ environment: firstEvent.environment,
1802
+ trace_id: traceId,
1803
+ span_id: rootSpanId,
1804
+ parent_span_id: null,
1805
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1806
+ event_type: "trace_end",
1807
+ attributes: {
1808
+ trace_end: {
1809
+ total_latency_ms: totalLatency,
1810
+ total_tokens: totalTokens || null,
1811
+ total_cost: totalCost || null,
1812
+ outcome: hasError ? "error" : "success"
1813
+ }
1814
+ }
1815
+ };
1816
+ events.push(traceEndEvent);
1817
+ }
1818
+ this.tracesWithErrors.delete(traceId);
1819
+ this.traceRootSpanIds.delete(traceId);
1631
1820
  await this._sendEventsWithRetry(events);
1632
1821
  }
1633
1822
  }
@@ -1669,11 +1858,11 @@ var Observa = class {
1669
1858
  }
1670
1859
  /**
1671
1860
  * Observe OpenAI client - wraps client with automatic tracing
1672
- *
1861
+ *
1673
1862
  * @param client - OpenAI client instance
1674
1863
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1675
1864
  * @returns Wrapped OpenAI client
1676
- *
1865
+ *
1677
1866
  * @example
1678
1867
  * ```typescript
1679
1868
  * import OpenAI from 'openai';
@@ -1694,11 +1883,11 @@ var Observa = class {
1694
1883
  }
1695
1884
  /**
1696
1885
  * Observe Anthropic client - wraps client with automatic tracing
1697
- *
1886
+ *
1698
1887
  * @param client - Anthropic client instance
1699
1888
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1700
1889
  * @returns Wrapped Anthropic client
1701
- *
1890
+ *
1702
1891
  * @example
1703
1892
  * ```typescript
1704
1893
  * import Anthropic from '@anthropic-ai/sdk';
@@ -1719,21 +1908,21 @@ var Observa = class {
1719
1908
  }
1720
1909
  /**
1721
1910
  * Observe Vercel AI SDK - wraps generateText and streamText functions
1722
- *
1911
+ *
1723
1912
  * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1724
1913
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1725
1914
  * @returns Wrapped AI SDK with automatic tracing
1726
- *
1915
+ *
1727
1916
  * @example
1728
1917
  * ```typescript
1729
1918
  * import { generateText, streamText } from 'ai';
1730
1919
  * const observa = init({ apiKey: '...' });
1731
- *
1920
+ *
1732
1921
  * const ai = observa.observeVercelAI({ generateText, streamText }, {
1733
1922
  * name: 'my-app',
1734
1923
  * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1735
1924
  * });
1736
- *
1925
+ *
1737
1926
  * // Use wrapped functions - automatically tracked!
1738
1927
  * const result = await ai.generateText({
1739
1928
  * model: 'openai/gpt-4',
package/dist/index.d.cts CHANGED
@@ -38,6 +38,8 @@ declare class Observa {
38
38
  private rootSpanId;
39
39
  private spanStack;
40
40
  private traceStartTime;
41
+ private tracesWithErrors;
42
+ private traceRootSpanIds;
41
43
  constructor(config: ObservaInitConfig);
42
44
  /**
43
45
  * Flush buffered events to the API
package/dist/index.d.ts CHANGED
@@ -38,6 +38,8 @@ declare class Observa {
38
38
  private rootSpanId;
39
39
  private spanStack;
40
40
  private traceStartTime;
41
+ private tracesWithErrors;
42
+ private traceRootSpanIds;
41
43
  constructor(config: ObservaInitConfig);
42
44
  /**
43
45
  * Flush buffered events to the API
package/dist/index.js CHANGED
@@ -272,18 +272,50 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
272
272
  }
273
273
  }
274
274
  function recordError(req, error, start, opts) {
275
+ const duration = Date.now() - start;
275
276
  try {
276
277
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
277
278
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
278
279
  if (opts?.observa) {
280
+ const model = sanitizedReq.model || "unknown";
281
+ const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
282
+ opts.observa.trackLLMCall({
283
+ model,
284
+ input: inputText,
285
+ output: null,
286
+ // No output on error
287
+ inputMessages: sanitizedReq.messages || null,
288
+ outputMessages: null,
289
+ inputTokens: null,
290
+ outputTokens: null,
291
+ totalTokens: null,
292
+ latencyMs: duration,
293
+ timeToFirstTokenMs: null,
294
+ streamingDurationMs: null,
295
+ finishReason: null,
296
+ responseId: null,
297
+ operationName: "chat",
298
+ providerName: "openai",
299
+ responseModel: model,
300
+ temperature: sanitizedReq.temperature || null,
301
+ maxTokens: sanitizedReq.max_tokens || null
302
+ });
279
303
  opts.observa.trackError({
280
304
  errorType: "openai_api_error",
281
305
  errorMessage: error?.message || String(error),
282
306
  stackTrace: error?.stack || null,
283
- context: { request: sanitizedReq }
307
+ context: {
308
+ request: sanitizedReq,
309
+ model,
310
+ input: inputText,
311
+ provider: "openai",
312
+ duration_ms: duration
313
+ },
314
+ errorCategory: "llm_error"
284
315
  });
285
316
  }
286
317
  } catch (e) {
318
+ console.error("[Observa] Failed to record error", e);
287
319
  }
288
320
  }
289
321
 
@@ -390,18 +422,56 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
390
422
  }
391
423
  }
392
424
  function recordError2(req, error, start, opts) {
425
+ const duration = Date.now() - start;
393
426
  try {
394
427
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
395
428
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
396
429
  if (opts?.observa) {
430
+ const model = sanitizedReq.model || "unknown";
431
+ const inputText = sanitizedReq.messages?.map((m) => {
432
+ if (typeof m.content === "string") return m.content;
433
+ if (Array.isArray(m.content)) {
434
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
435
+ }
436
+ return null;
437
+ }).filter(Boolean).join("\n") || null;
438
+ opts.observa.trackLLMCall({
439
+ model,
440
+ input: inputText,
441
+ output: null,
442
+ // No output on error
443
+ inputMessages: sanitizedReq.messages || null,
444
+ outputMessages: null,
445
+ inputTokens: null,
446
+ outputTokens: null,
447
+ totalTokens: null,
448
+ latencyMs: duration,
449
+ timeToFirstTokenMs: null,
450
+ streamingDurationMs: null,
451
+ finishReason: null,
452
+ responseId: null,
453
+ operationName: "chat",
454
+ providerName: "anthropic",
455
+ responseModel: model,
456
+ temperature: sanitizedReq.temperature || null,
457
+ maxTokens: sanitizedReq.max_tokens || null
458
+ });
397
459
  opts.observa.trackError({
398
460
  errorType: "anthropic_api_error",
399
461
  errorMessage: error?.message || String(error),
400
462
  stackTrace: error?.stack || null,
401
- context: { request: sanitizedReq }
463
+ context: {
464
+ request: sanitizedReq,
465
+ model,
466
+ input: inputText,
467
+ provider: "anthropic",
468
+ duration_ms: duration
469
+ },
470
+ errorCategory: "llm_error"
402
471
  });
403
472
  }
404
473
  } catch (e) {
474
+ console.error("[Observa] Failed to record error", e);
405
475
  }
406
476
  }
407
477
 
@@ -508,11 +578,15 @@ async function traceGenerateText(originalFn, args, options) {
508
578
  recordError3(
509
579
  {
510
580
  model: modelIdentifier,
511
- prompt: requestParams.prompt || requestParams.messages || null
581
+ prompt: requestParams.prompt || null,
582
+ messages: requestParams.messages || null,
583
+ temperature: requestParams.temperature || null,
584
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
512
585
  },
513
586
  error,
514
587
  startTime,
515
- options
588
+ options,
589
+ provider
516
590
  );
517
591
  throw error;
518
592
  }
@@ -596,11 +670,15 @@ async function traceStreamText(originalFn, args, options) {
596
670
  (err) => recordError3(
597
671
  {
598
672
  model: modelIdentifier,
599
- prompt: requestParams.prompt || requestParams.messages || null
673
+ prompt: requestParams.prompt || null,
674
+ messages: requestParams.messages || null,
675
+ temperature: requestParams.temperature || null,
676
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
600
677
  },
601
678
  err,
602
679
  startTime,
603
- options
680
+ options,
681
+ provider
604
682
  )
605
683
  );
606
684
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -632,11 +710,15 @@ async function traceStreamText(originalFn, args, options) {
632
710
  recordError3(
633
711
  {
634
712
  model: modelIdentifier,
635
- prompt: requestParams.prompt || requestParams.messages || null
713
+ prompt: requestParams.prompt || null,
714
+ messages: requestParams.messages || null,
715
+ temperature: requestParams.temperature || null,
716
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
636
717
  },
637
718
  error,
638
719
  startTime,
639
- options
720
+ options,
721
+ provider
640
722
  );
641
723
  throw error;
642
724
  }
@@ -683,20 +765,58 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
683
765
  console.error("[Observa] Failed to record trace", e);
684
766
  }
685
767
  }
686
- function recordError3(req, error, start, opts) {
768
+ function recordError3(req, error, start, opts, provider) {
769
+ const duration = Date.now() - start;
687
770
  try {
688
771
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
689
772
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
690
773
  if (opts?.observa) {
774
+ const model = sanitizedReq.model || "unknown";
775
+ let inputText = null;
776
+ let inputMessages = null;
777
+ if (sanitizedReq.prompt) {
778
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
779
+ } else if (sanitizedReq.messages) {
780
+ inputMessages = sanitizedReq.messages;
781
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
782
+ }
783
+ opts.observa.trackLLMCall({
784
+ model,
785
+ input: inputText,
786
+ output: null,
787
+ // No output on error
788
+ inputMessages,
789
+ outputMessages: null,
790
+ inputTokens: null,
791
+ outputTokens: null,
792
+ totalTokens: null,
793
+ latencyMs: duration,
794
+ timeToFirstTokenMs: null,
795
+ streamingDurationMs: null,
796
+ finishReason: null,
797
+ responseId: null,
798
+ operationName: "generate_text",
799
+ providerName: provider || "vercel-ai",
800
+ responseModel: model,
801
+ temperature: sanitizedReq.temperature || null,
802
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
803
+ });
691
804
  opts.observa.trackError({
692
805
  errorType: error.name || "UnknownError",
693
806
  errorMessage: error.message || "An unknown error occurred",
694
807
  stackTrace: error.stack,
695
- context: { request: sanitizedReq },
808
+ context: {
809
+ request: sanitizedReq,
810
+ model,
811
+ input: inputText,
812
+ provider: provider || "vercel-ai",
813
+ duration_ms: duration
814
+ },
696
815
  errorCategory: "llm_error"
697
816
  });
698
817
  }
699
818
  } catch (e) {
819
+ console.error("[Observa] Failed to record error", e);
700
820
  }
701
821
  }
702
822
  function observeVercelAI(aiSdk, options) {
@@ -964,6 +1084,10 @@ var Observa = class {
964
1084
  spanStack = [];
965
1085
  // Stack for tracking parent-child relationships
966
1086
  traceStartTime = null;
1087
+ // Track traces with errors (for automatic trace_end generation when using instrumentation)
1088
+ tracesWithErrors = /* @__PURE__ */ new Set();
1089
+ // Track root span IDs for traces (for automatic trace_end generation)
1090
+ traceRootSpanIds = /* @__PURE__ */ new Map();
967
1091
  constructor(config) {
968
1092
  this.apiKey = config.apiKey;
969
1093
  let apiUrlEnv;
@@ -1054,9 +1178,13 @@ var Observa = class {
1054
1178
  addEvent(eventData) {
1055
1179
  const baseProps = this.createBaseEventProperties();
1056
1180
  const parentSpanId = this.spanStack.length > 0 ? this.spanStack[this.spanStack.length - 1] : null;
1181
+ const spanId = eventData.span_id || crypto.randomUUID();
1182
+ if (!this.currentTraceId && !this.traceRootSpanIds.has(baseProps.trace_id)) {
1183
+ this.traceRootSpanIds.set(baseProps.trace_id, spanId);
1184
+ }
1057
1185
  const event = {
1058
1186
  ...baseProps,
1059
- span_id: eventData.span_id || crypto.randomUUID(),
1187
+ span_id: spanId,
1060
1188
  parent_span_id: (eventData.parent_span_id !== void 0 ? eventData.parent_span_id : parentSpanId) ?? null,
1061
1189
  timestamp: eventData.timestamp || (/* @__PURE__ */ new Date()).toISOString(),
1062
1190
  event_type: eventData.event_type,
@@ -1241,6 +1369,8 @@ var Observa = class {
1241
1369
  if (!stackTrace && options.error instanceof Error && options.error.stack) {
1242
1370
  stackTrace = options.error.stack;
1243
1371
  }
1372
+ const baseProps = this.createBaseEventProperties();
1373
+ this.tracesWithErrors.add(baseProps.trace_id);
1244
1374
  this.addEvent({
1245
1375
  event_type: "error",
1246
1376
  span_id: spanId,
@@ -1608,6 +1738,65 @@ var Observa = class {
1608
1738
  eventsByTrace.get(event.trace_id).push(event);
1609
1739
  }
1610
1740
  for (const [traceId, events] of eventsByTrace.entries()) {
1741
+ const hasTraceStart = events.some((e) => e.event_type === "trace_start");
1742
+ const hasTraceEnd = events.some((e) => e.event_type === "trace_end");
1743
+ const hasError = this.tracesWithErrors.has(traceId);
1744
+ const rootSpanId = this.traceRootSpanIds.get(traceId) || events[0]?.span_id || crypto.randomUUID();
1745
+ const firstEvent = events[0];
1746
+ if (!firstEvent) continue;
1747
+ if (!hasTraceStart) {
1748
+ const traceStartEvent = {
1749
+ tenant_id: firstEvent.tenant_id,
1750
+ project_id: firstEvent.project_id,
1751
+ environment: firstEvent.environment,
1752
+ trace_id: traceId,
1753
+ span_id: rootSpanId,
1754
+ parent_span_id: null,
1755
+ timestamp: firstEvent.timestamp,
1756
+ event_type: "trace_start",
1757
+ attributes: {
1758
+ trace_start: {
1759
+ name: null,
1760
+ metadata: null
1761
+ }
1762
+ }
1763
+ };
1764
+ events.unshift(traceStartEvent);
1765
+ }
1766
+ if (!hasTraceEnd) {
1767
+ const llmEvents = events.filter((e) => e.event_type === "llm_call");
1768
+ const totalTokens = llmEvents.reduce(
1769
+ (sum, e) => sum + (e.attributes.llm_call?.total_tokens || 0),
1770
+ 0
1771
+ );
1772
+ const totalCost = llmEvents.reduce(
1773
+ (sum, e) => sum + (e.attributes.llm_call?.cost || 0),
1774
+ 0
1775
+ );
1776
+ const timestamps = events.map((e) => new Date(e.timestamp).getTime()).filter(Boolean);
1777
+ const totalLatency = timestamps.length > 0 ? Math.max(...timestamps) - Math.min(...timestamps) : null;
1778
+ const traceEndEvent = {
1779
+ tenant_id: firstEvent.tenant_id,
1780
+ project_id: firstEvent.project_id,
1781
+ environment: firstEvent.environment,
1782
+ trace_id: traceId,
1783
+ span_id: rootSpanId,
1784
+ parent_span_id: null,
1785
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1786
+ event_type: "trace_end",
1787
+ attributes: {
1788
+ trace_end: {
1789
+ total_latency_ms: totalLatency,
1790
+ total_tokens: totalTokens || null,
1791
+ total_cost: totalCost || null,
1792
+ outcome: hasError ? "error" : "success"
1793
+ }
1794
+ }
1795
+ };
1796
+ events.push(traceEndEvent);
1797
+ }
1798
+ this.tracesWithErrors.delete(traceId);
1799
+ this.traceRootSpanIds.delete(traceId);
1611
1800
  await this._sendEventsWithRetry(events);
1612
1801
  }
1613
1802
  }
@@ -1649,11 +1838,11 @@ var Observa = class {
1649
1838
  }
1650
1839
  /**
1651
1840
  * Observe OpenAI client - wraps client with automatic tracing
1652
- *
1841
+ *
1653
1842
  * @param client - OpenAI client instance
1654
1843
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1655
1844
  * @returns Wrapped OpenAI client
1656
- *
1845
+ *
1657
1846
  * @example
1658
1847
  * ```typescript
1659
1848
  * import OpenAI from 'openai';
@@ -1674,11 +1863,11 @@ var Observa = class {
1674
1863
  }
1675
1864
  /**
1676
1865
  * Observe Anthropic client - wraps client with automatic tracing
1677
- *
1866
+ *
1678
1867
  * @param client - Anthropic client instance
1679
1868
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1680
1869
  * @returns Wrapped Anthropic client
1681
- *
1870
+ *
1682
1871
  * @example
1683
1872
  * ```typescript
1684
1873
  * import Anthropic from '@anthropic-ai/sdk';
@@ -1699,21 +1888,21 @@ var Observa = class {
1699
1888
  }
1700
1889
  /**
1701
1890
  * Observe Vercel AI SDK - wraps generateText and streamText functions
1702
- *
1891
+ *
1703
1892
  * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1704
1893
  * @param options - Observation options (name, tags, userId, sessionId, redact)
1705
1894
  * @returns Wrapped AI SDK with automatic tracing
1706
- *
1895
+ *
1707
1896
  * @example
1708
1897
  * ```typescript
1709
1898
  * import { generateText, streamText } from 'ai';
1710
1899
  * const observa = init({ apiKey: '...' });
1711
- *
1900
+ *
1712
1901
  * const ai = observa.observeVercelAI({ generateText, streamText }, {
1713
1902
  * name: 'my-app',
1714
1903
  * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1715
1904
  * });
1716
- *
1905
+ *
1717
1906
  * // Use wrapped functions - automatically tracked!
1718
1907
  * const result = await ai.generateText({
1719
1908
  * model: 'openai/gpt-4',
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.16",
3
+ "version": "0.0.18",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",