observa-sdk 0.0.15 → 0.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.cjs +148 -12
  2. package/dist/index.js +148 -12
  3. package/package.json +1 -1
package/dist/index.cjs CHANGED
@@ -292,18 +292,50 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
292
292
  }
293
293
  }
294
294
  function recordError(req, error, start, opts) {
295
+ const duration = Date.now() - start;
295
296
  try {
296
297
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
297
298
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
298
299
  if (opts?.observa) {
300
+ const model = sanitizedReq.model || "unknown";
301
+ const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
302
+ opts.observa.trackLLMCall({
303
+ model,
304
+ input: inputText,
305
+ output: null,
306
+ // No output on error
307
+ inputMessages: sanitizedReq.messages || null,
308
+ outputMessages: null,
309
+ inputTokens: null,
310
+ outputTokens: null,
311
+ totalTokens: null,
312
+ latencyMs: duration,
313
+ timeToFirstTokenMs: null,
314
+ streamingDurationMs: null,
315
+ finishReason: null,
316
+ responseId: null,
317
+ operationName: "chat",
318
+ providerName: "openai",
319
+ responseModel: model,
320
+ temperature: sanitizedReq.temperature || null,
321
+ maxTokens: sanitizedReq.max_tokens || null
322
+ });
299
323
  opts.observa.trackError({
300
324
  errorType: "openai_api_error",
301
325
  errorMessage: error?.message || String(error),
302
326
  stackTrace: error?.stack || null,
303
- context: { request: sanitizedReq }
327
+ context: {
328
+ request: sanitizedReq,
329
+ model,
330
+ input: inputText,
331
+ provider: "openai",
332
+ duration_ms: duration
333
+ },
334
+ errorCategory: "llm_error"
304
335
  });
305
336
  }
306
337
  } catch (e) {
338
+ console.error("[Observa] Failed to record error", e);
307
339
  }
308
340
  }
309
341
 
@@ -410,18 +442,56 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
410
442
  }
411
443
  }
412
444
  function recordError2(req, error, start, opts) {
445
+ const duration = Date.now() - start;
413
446
  try {
414
447
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
415
448
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
416
449
  if (opts?.observa) {
450
+ const model = sanitizedReq.model || "unknown";
451
+ const inputText = sanitizedReq.messages?.map((m) => {
452
+ if (typeof m.content === "string") return m.content;
453
+ if (Array.isArray(m.content)) {
454
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
455
+ }
456
+ return null;
457
+ }).filter(Boolean).join("\n") || null;
458
+ opts.observa.trackLLMCall({
459
+ model,
460
+ input: inputText,
461
+ output: null,
462
+ // No output on error
463
+ inputMessages: sanitizedReq.messages || null,
464
+ outputMessages: null,
465
+ inputTokens: null,
466
+ outputTokens: null,
467
+ totalTokens: null,
468
+ latencyMs: duration,
469
+ timeToFirstTokenMs: null,
470
+ streamingDurationMs: null,
471
+ finishReason: null,
472
+ responseId: null,
473
+ operationName: "chat",
474
+ providerName: "anthropic",
475
+ responseModel: model,
476
+ temperature: sanitizedReq.temperature || null,
477
+ maxTokens: sanitizedReq.max_tokens || null
478
+ });
417
479
  opts.observa.trackError({
418
480
  errorType: "anthropic_api_error",
419
481
  errorMessage: error?.message || String(error),
420
482
  stackTrace: error?.stack || null,
421
- context: { request: sanitizedReq }
483
+ context: {
484
+ request: sanitizedReq,
485
+ model,
486
+ input: inputText,
487
+ provider: "anthropic",
488
+ duration_ms: duration
489
+ },
490
+ errorCategory: "llm_error"
422
491
  });
423
492
  }
424
493
  } catch (e) {
494
+ console.error("[Observa] Failed to record error", e);
425
495
  }
426
496
  }
427
497
 
@@ -528,11 +598,15 @@ async function traceGenerateText(originalFn, args, options) {
528
598
  recordError3(
529
599
  {
530
600
  model: modelIdentifier,
531
- prompt: requestParams.prompt || requestParams.messages || null
601
+ prompt: requestParams.prompt || null,
602
+ messages: requestParams.messages || null,
603
+ temperature: requestParams.temperature || null,
604
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
532
605
  },
533
606
  error,
534
607
  startTime,
535
- options
608
+ options,
609
+ provider
536
610
  );
537
611
  throw error;
538
612
  }
@@ -549,10 +623,26 @@ function wrapReadableStream(stream, onComplete, onError) {
549
623
  while (true) {
550
624
  const { done, value } = await reader.read();
551
625
  if (done) break;
552
- if (firstTokenTime === null && value) {
626
+ if (firstTokenTime === null && value !== null && value !== void 0) {
553
627
  firstTokenTime = Date.now();
554
628
  }
555
- const text = decoder.decode(value, { stream: true });
629
+ let text;
630
+ if (typeof value === "string") {
631
+ text = value;
632
+ } else if (value !== null && value !== void 0) {
633
+ try {
634
+ const testValue = value;
635
+ if (testValue instanceof Uint8Array || typeof ArrayBuffer !== "undefined" && typeof ArrayBuffer.isView === "function" && ArrayBuffer.isView(testValue)) {
636
+ text = decoder.decode(testValue, { stream: true });
637
+ } else {
638
+ text = String(value);
639
+ }
640
+ } catch {
641
+ text = String(value);
642
+ }
643
+ } else {
644
+ continue;
645
+ }
556
646
  chunks.push(text);
557
647
  }
558
648
  const fullText = chunks.join("");
@@ -600,11 +690,15 @@ async function traceStreamText(originalFn, args, options) {
600
690
  (err) => recordError3(
601
691
  {
602
692
  model: modelIdentifier,
603
- prompt: requestParams.prompt || requestParams.messages || null
693
+ prompt: requestParams.prompt || null,
694
+ messages: requestParams.messages || null,
695
+ temperature: requestParams.temperature || null,
696
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
604
697
  },
605
698
  err,
606
699
  startTime,
607
- options
700
+ options,
701
+ provider
608
702
  )
609
703
  );
610
704
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -636,11 +730,15 @@ async function traceStreamText(originalFn, args, options) {
636
730
  recordError3(
637
731
  {
638
732
  model: modelIdentifier,
639
- prompt: requestParams.prompt || requestParams.messages || null
733
+ prompt: requestParams.prompt || null,
734
+ messages: requestParams.messages || null,
735
+ temperature: requestParams.temperature || null,
736
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
640
737
  },
641
738
  error,
642
739
  startTime,
643
- options
740
+ options,
741
+ provider
644
742
  );
645
743
  throw error;
646
744
  }
@@ -687,20 +785,58 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
687
785
  console.error("[Observa] Failed to record trace", e);
688
786
  }
689
787
  }
690
- function recordError3(req, error, start, opts) {
788
+ function recordError3(req, error, start, opts, provider) {
789
+ const duration = Date.now() - start;
691
790
  try {
692
791
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
693
792
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
694
793
  if (opts?.observa) {
794
+ const model = sanitizedReq.model || "unknown";
795
+ let inputText = null;
796
+ let inputMessages = null;
797
+ if (sanitizedReq.prompt) {
798
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
799
+ } else if (sanitizedReq.messages) {
800
+ inputMessages = sanitizedReq.messages;
801
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
802
+ }
803
+ opts.observa.trackLLMCall({
804
+ model,
805
+ input: inputText,
806
+ output: null,
807
+ // No output on error
808
+ inputMessages,
809
+ outputMessages: null,
810
+ inputTokens: null,
811
+ outputTokens: null,
812
+ totalTokens: null,
813
+ latencyMs: duration,
814
+ timeToFirstTokenMs: null,
815
+ streamingDurationMs: null,
816
+ finishReason: null,
817
+ responseId: null,
818
+ operationName: "generate_text",
819
+ providerName: provider || "vercel-ai",
820
+ responseModel: model,
821
+ temperature: sanitizedReq.temperature || null,
822
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
823
+ });
695
824
  opts.observa.trackError({
696
825
  errorType: error.name || "UnknownError",
697
826
  errorMessage: error.message || "An unknown error occurred",
698
827
  stackTrace: error.stack,
699
- context: { request: sanitizedReq },
828
+ context: {
829
+ request: sanitizedReq,
830
+ model,
831
+ input: inputText,
832
+ provider: provider || "vercel-ai",
833
+ duration_ms: duration
834
+ },
700
835
  errorCategory: "llm_error"
701
836
  });
702
837
  }
703
838
  } catch (e) {
839
+ console.error("[Observa] Failed to record error", e);
704
840
  }
705
841
  }
706
842
  function observeVercelAI(aiSdk, options) {
package/dist/index.js CHANGED
@@ -272,18 +272,50 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
272
272
  }
273
273
  }
274
274
  function recordError(req, error, start, opts) {
275
+ const duration = Date.now() - start;
275
276
  try {
276
277
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
277
278
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
278
279
  if (opts?.observa) {
280
+ const model = sanitizedReq.model || "unknown";
281
+ const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
282
+ opts.observa.trackLLMCall({
283
+ model,
284
+ input: inputText,
285
+ output: null,
286
+ // No output on error
287
+ inputMessages: sanitizedReq.messages || null,
288
+ outputMessages: null,
289
+ inputTokens: null,
290
+ outputTokens: null,
291
+ totalTokens: null,
292
+ latencyMs: duration,
293
+ timeToFirstTokenMs: null,
294
+ streamingDurationMs: null,
295
+ finishReason: null,
296
+ responseId: null,
297
+ operationName: "chat",
298
+ providerName: "openai",
299
+ responseModel: model,
300
+ temperature: sanitizedReq.temperature || null,
301
+ maxTokens: sanitizedReq.max_tokens || null
302
+ });
279
303
  opts.observa.trackError({
280
304
  errorType: "openai_api_error",
281
305
  errorMessage: error?.message || String(error),
282
306
  stackTrace: error?.stack || null,
283
- context: { request: sanitizedReq }
307
+ context: {
308
+ request: sanitizedReq,
309
+ model,
310
+ input: inputText,
311
+ provider: "openai",
312
+ duration_ms: duration
313
+ },
314
+ errorCategory: "llm_error"
284
315
  });
285
316
  }
286
317
  } catch (e) {
318
+ console.error("[Observa] Failed to record error", e);
287
319
  }
288
320
  }
289
321
 
@@ -390,18 +422,56 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
390
422
  }
391
423
  }
392
424
  function recordError2(req, error, start, opts) {
425
+ const duration = Date.now() - start;
393
426
  try {
394
427
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
395
428
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
396
429
  if (opts?.observa) {
430
+ const model = sanitizedReq.model || "unknown";
431
+ const inputText = sanitizedReq.messages?.map((m) => {
432
+ if (typeof m.content === "string") return m.content;
433
+ if (Array.isArray(m.content)) {
434
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
435
+ }
436
+ return null;
437
+ }).filter(Boolean).join("\n") || null;
438
+ opts.observa.trackLLMCall({
439
+ model,
440
+ input: inputText,
441
+ output: null,
442
+ // No output on error
443
+ inputMessages: sanitizedReq.messages || null,
444
+ outputMessages: null,
445
+ inputTokens: null,
446
+ outputTokens: null,
447
+ totalTokens: null,
448
+ latencyMs: duration,
449
+ timeToFirstTokenMs: null,
450
+ streamingDurationMs: null,
451
+ finishReason: null,
452
+ responseId: null,
453
+ operationName: "chat",
454
+ providerName: "anthropic",
455
+ responseModel: model,
456
+ temperature: sanitizedReq.temperature || null,
457
+ maxTokens: sanitizedReq.max_tokens || null
458
+ });
397
459
  opts.observa.trackError({
398
460
  errorType: "anthropic_api_error",
399
461
  errorMessage: error?.message || String(error),
400
462
  stackTrace: error?.stack || null,
401
- context: { request: sanitizedReq }
463
+ context: {
464
+ request: sanitizedReq,
465
+ model,
466
+ input: inputText,
467
+ provider: "anthropic",
468
+ duration_ms: duration
469
+ },
470
+ errorCategory: "llm_error"
402
471
  });
403
472
  }
404
473
  } catch (e) {
474
+ console.error("[Observa] Failed to record error", e);
405
475
  }
406
476
  }
407
477
 
@@ -508,11 +578,15 @@ async function traceGenerateText(originalFn, args, options) {
508
578
  recordError3(
509
579
  {
510
580
  model: modelIdentifier,
511
- prompt: requestParams.prompt || requestParams.messages || null
581
+ prompt: requestParams.prompt || null,
582
+ messages: requestParams.messages || null,
583
+ temperature: requestParams.temperature || null,
584
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
512
585
  },
513
586
  error,
514
587
  startTime,
515
- options
588
+ options,
589
+ provider
516
590
  );
517
591
  throw error;
518
592
  }
@@ -529,10 +603,26 @@ function wrapReadableStream(stream, onComplete, onError) {
529
603
  while (true) {
530
604
  const { done, value } = await reader.read();
531
605
  if (done) break;
532
- if (firstTokenTime === null && value) {
606
+ if (firstTokenTime === null && value !== null && value !== void 0) {
533
607
  firstTokenTime = Date.now();
534
608
  }
535
- const text = decoder.decode(value, { stream: true });
609
+ let text;
610
+ if (typeof value === "string") {
611
+ text = value;
612
+ } else if (value !== null && value !== void 0) {
613
+ try {
614
+ const testValue = value;
615
+ if (testValue instanceof Uint8Array || typeof ArrayBuffer !== "undefined" && typeof ArrayBuffer.isView === "function" && ArrayBuffer.isView(testValue)) {
616
+ text = decoder.decode(testValue, { stream: true });
617
+ } else {
618
+ text = String(value);
619
+ }
620
+ } catch {
621
+ text = String(value);
622
+ }
623
+ } else {
624
+ continue;
625
+ }
536
626
  chunks.push(text);
537
627
  }
538
628
  const fullText = chunks.join("");
@@ -580,11 +670,15 @@ async function traceStreamText(originalFn, args, options) {
580
670
  (err) => recordError3(
581
671
  {
582
672
  model: modelIdentifier,
583
- prompt: requestParams.prompt || requestParams.messages || null
673
+ prompt: requestParams.prompt || null,
674
+ messages: requestParams.messages || null,
675
+ temperature: requestParams.temperature || null,
676
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
584
677
  },
585
678
  err,
586
679
  startTime,
587
- options
680
+ options,
681
+ provider
588
682
  )
589
683
  );
590
684
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -616,11 +710,15 @@ async function traceStreamText(originalFn, args, options) {
616
710
  recordError3(
617
711
  {
618
712
  model: modelIdentifier,
619
- prompt: requestParams.prompt || requestParams.messages || null
713
+ prompt: requestParams.prompt || null,
714
+ messages: requestParams.messages || null,
715
+ temperature: requestParams.temperature || null,
716
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
620
717
  },
621
718
  error,
622
719
  startTime,
623
- options
720
+ options,
721
+ provider
624
722
  );
625
723
  throw error;
626
724
  }
@@ -667,20 +765,58 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
667
765
  console.error("[Observa] Failed to record trace", e);
668
766
  }
669
767
  }
670
- function recordError3(req, error, start, opts) {
768
+ function recordError3(req, error, start, opts, provider) {
769
+ const duration = Date.now() - start;
671
770
  try {
672
771
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
673
772
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
674
773
  if (opts?.observa) {
774
+ const model = sanitizedReq.model || "unknown";
775
+ let inputText = null;
776
+ let inputMessages = null;
777
+ if (sanitizedReq.prompt) {
778
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
779
+ } else if (sanitizedReq.messages) {
780
+ inputMessages = sanitizedReq.messages;
781
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
782
+ }
783
+ opts.observa.trackLLMCall({
784
+ model,
785
+ input: inputText,
786
+ output: null,
787
+ // No output on error
788
+ inputMessages,
789
+ outputMessages: null,
790
+ inputTokens: null,
791
+ outputTokens: null,
792
+ totalTokens: null,
793
+ latencyMs: duration,
794
+ timeToFirstTokenMs: null,
795
+ streamingDurationMs: null,
796
+ finishReason: null,
797
+ responseId: null,
798
+ operationName: "generate_text",
799
+ providerName: provider || "vercel-ai",
800
+ responseModel: model,
801
+ temperature: sanitizedReq.temperature || null,
802
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
803
+ });
675
804
  opts.observa.trackError({
676
805
  errorType: error.name || "UnknownError",
677
806
  errorMessage: error.message || "An unknown error occurred",
678
807
  stackTrace: error.stack,
679
- context: { request: sanitizedReq },
808
+ context: {
809
+ request: sanitizedReq,
810
+ model,
811
+ input: inputText,
812
+ provider: provider || "vercel-ai",
813
+ duration_ms: duration
814
+ },
680
815
  errorCategory: "llm_error"
681
816
  });
682
817
  }
683
818
  } catch (e) {
819
+ console.error("[Observa] Failed to record error", e);
684
820
  }
685
821
  }
686
822
  function observeVercelAI(aiSdk, options) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.15",
3
+ "version": "0.0.17",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",