observa-sdk 0.0.16 → 0.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.cjs +130 -10
  2. package/dist/index.js +130 -10
  3. package/package.json +1 -1
package/dist/index.cjs CHANGED
@@ -292,18 +292,50 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
292
292
  }
293
293
  }
294
294
  function recordError(req, error, start, opts) {
295
+ const duration = Date.now() - start;
295
296
  try {
296
297
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
297
298
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
298
299
  if (opts?.observa) {
300
+ const model = sanitizedReq.model || "unknown";
301
+ const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
302
+ opts.observa.trackLLMCall({
303
+ model,
304
+ input: inputText,
305
+ output: null,
306
+ // No output on error
307
+ inputMessages: sanitizedReq.messages || null,
308
+ outputMessages: null,
309
+ inputTokens: null,
310
+ outputTokens: null,
311
+ totalTokens: null,
312
+ latencyMs: duration,
313
+ timeToFirstTokenMs: null,
314
+ streamingDurationMs: null,
315
+ finishReason: null,
316
+ responseId: null,
317
+ operationName: "chat",
318
+ providerName: "openai",
319
+ responseModel: model,
320
+ temperature: sanitizedReq.temperature || null,
321
+ maxTokens: sanitizedReq.max_tokens || null
322
+ });
299
323
  opts.observa.trackError({
300
324
  errorType: "openai_api_error",
301
325
  errorMessage: error?.message || String(error),
302
326
  stackTrace: error?.stack || null,
303
- context: { request: sanitizedReq }
327
+ context: {
328
+ request: sanitizedReq,
329
+ model,
330
+ input: inputText,
331
+ provider: "openai",
332
+ duration_ms: duration
333
+ },
334
+ errorCategory: "llm_error"
304
335
  });
305
336
  }
306
337
  } catch (e) {
338
+ console.error("[Observa] Failed to record error", e);
307
339
  }
308
340
  }
309
341
 
@@ -410,18 +442,56 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
410
442
  }
411
443
  }
412
444
  function recordError2(req, error, start, opts) {
445
+ const duration = Date.now() - start;
413
446
  try {
414
447
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
415
448
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
416
449
  if (opts?.observa) {
450
+ const model = sanitizedReq.model || "unknown";
451
+ const inputText = sanitizedReq.messages?.map((m) => {
452
+ if (typeof m.content === "string") return m.content;
453
+ if (Array.isArray(m.content)) {
454
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
455
+ }
456
+ return null;
457
+ }).filter(Boolean).join("\n") || null;
458
+ opts.observa.trackLLMCall({
459
+ model,
460
+ input: inputText,
461
+ output: null,
462
+ // No output on error
463
+ inputMessages: sanitizedReq.messages || null,
464
+ outputMessages: null,
465
+ inputTokens: null,
466
+ outputTokens: null,
467
+ totalTokens: null,
468
+ latencyMs: duration,
469
+ timeToFirstTokenMs: null,
470
+ streamingDurationMs: null,
471
+ finishReason: null,
472
+ responseId: null,
473
+ operationName: "chat",
474
+ providerName: "anthropic",
475
+ responseModel: model,
476
+ temperature: sanitizedReq.temperature || null,
477
+ maxTokens: sanitizedReq.max_tokens || null
478
+ });
417
479
  opts.observa.trackError({
418
480
  errorType: "anthropic_api_error",
419
481
  errorMessage: error?.message || String(error),
420
482
  stackTrace: error?.stack || null,
421
- context: { request: sanitizedReq }
483
+ context: {
484
+ request: sanitizedReq,
485
+ model,
486
+ input: inputText,
487
+ provider: "anthropic",
488
+ duration_ms: duration
489
+ },
490
+ errorCategory: "llm_error"
422
491
  });
423
492
  }
424
493
  } catch (e) {
494
+ console.error("[Observa] Failed to record error", e);
425
495
  }
426
496
  }
427
497
 
@@ -528,11 +598,15 @@ async function traceGenerateText(originalFn, args, options) {
528
598
  recordError3(
529
599
  {
530
600
  model: modelIdentifier,
531
- prompt: requestParams.prompt || requestParams.messages || null
601
+ prompt: requestParams.prompt || null,
602
+ messages: requestParams.messages || null,
603
+ temperature: requestParams.temperature || null,
604
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
532
605
  },
533
606
  error,
534
607
  startTime,
535
- options
608
+ options,
609
+ provider
536
610
  );
537
611
  throw error;
538
612
  }
@@ -616,11 +690,15 @@ async function traceStreamText(originalFn, args, options) {
616
690
  (err) => recordError3(
617
691
  {
618
692
  model: modelIdentifier,
619
- prompt: requestParams.prompt || requestParams.messages || null
693
+ prompt: requestParams.prompt || null,
694
+ messages: requestParams.messages || null,
695
+ temperature: requestParams.temperature || null,
696
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
620
697
  },
621
698
  err,
622
699
  startTime,
623
- options
700
+ options,
701
+ provider
624
702
  )
625
703
  );
626
704
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -652,11 +730,15 @@ async function traceStreamText(originalFn, args, options) {
652
730
  recordError3(
653
731
  {
654
732
  model: modelIdentifier,
655
- prompt: requestParams.prompt || requestParams.messages || null
733
+ prompt: requestParams.prompt || null,
734
+ messages: requestParams.messages || null,
735
+ temperature: requestParams.temperature || null,
736
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
656
737
  },
657
738
  error,
658
739
  startTime,
659
- options
740
+ options,
741
+ provider
660
742
  );
661
743
  throw error;
662
744
  }
@@ -703,20 +785,58 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
703
785
  console.error("[Observa] Failed to record trace", e);
704
786
  }
705
787
  }
706
- function recordError3(req, error, start, opts) {
788
+ function recordError3(req, error, start, opts, provider) {
789
+ const duration = Date.now() - start;
707
790
  try {
708
791
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
709
792
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
710
793
  if (opts?.observa) {
794
+ const model = sanitizedReq.model || "unknown";
795
+ let inputText = null;
796
+ let inputMessages = null;
797
+ if (sanitizedReq.prompt) {
798
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
799
+ } else if (sanitizedReq.messages) {
800
+ inputMessages = sanitizedReq.messages;
801
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
802
+ }
803
+ opts.observa.trackLLMCall({
804
+ model,
805
+ input: inputText,
806
+ output: null,
807
+ // No output on error
808
+ inputMessages,
809
+ outputMessages: null,
810
+ inputTokens: null,
811
+ outputTokens: null,
812
+ totalTokens: null,
813
+ latencyMs: duration,
814
+ timeToFirstTokenMs: null,
815
+ streamingDurationMs: null,
816
+ finishReason: null,
817
+ responseId: null,
818
+ operationName: "generate_text",
819
+ providerName: provider || "vercel-ai",
820
+ responseModel: model,
821
+ temperature: sanitizedReq.temperature || null,
822
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
823
+ });
711
824
  opts.observa.trackError({
712
825
  errorType: error.name || "UnknownError",
713
826
  errorMessage: error.message || "An unknown error occurred",
714
827
  stackTrace: error.stack,
715
- context: { request: sanitizedReq },
828
+ context: {
829
+ request: sanitizedReq,
830
+ model,
831
+ input: inputText,
832
+ provider: provider || "vercel-ai",
833
+ duration_ms: duration
834
+ },
716
835
  errorCategory: "llm_error"
717
836
  });
718
837
  }
719
838
  } catch (e) {
839
+ console.error("[Observa] Failed to record error", e);
720
840
  }
721
841
  }
722
842
  function observeVercelAI(aiSdk, options) {
package/dist/index.js CHANGED
@@ -272,18 +272,50 @@ function recordTrace(req, res, start, opts, timeToFirstToken, streamingDuration)
272
272
  }
273
273
  }
274
274
  function recordError(req, error, start, opts) {
275
+ const duration = Date.now() - start;
275
276
  try {
276
277
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
277
278
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
278
279
  if (opts?.observa) {
280
+ const model = sanitizedReq.model || "unknown";
281
+ const inputText = sanitizedReq.messages?.map((m) => m.content).filter(Boolean).join("\n") || null;
282
+ opts.observa.trackLLMCall({
283
+ model,
284
+ input: inputText,
285
+ output: null,
286
+ // No output on error
287
+ inputMessages: sanitizedReq.messages || null,
288
+ outputMessages: null,
289
+ inputTokens: null,
290
+ outputTokens: null,
291
+ totalTokens: null,
292
+ latencyMs: duration,
293
+ timeToFirstTokenMs: null,
294
+ streamingDurationMs: null,
295
+ finishReason: null,
296
+ responseId: null,
297
+ operationName: "chat",
298
+ providerName: "openai",
299
+ responseModel: model,
300
+ temperature: sanitizedReq.temperature || null,
301
+ maxTokens: sanitizedReq.max_tokens || null
302
+ });
279
303
  opts.observa.trackError({
280
304
  errorType: "openai_api_error",
281
305
  errorMessage: error?.message || String(error),
282
306
  stackTrace: error?.stack || null,
283
- context: { request: sanitizedReq }
307
+ context: {
308
+ request: sanitizedReq,
309
+ model,
310
+ input: inputText,
311
+ provider: "openai",
312
+ duration_ms: duration
313
+ },
314
+ errorCategory: "llm_error"
284
315
  });
285
316
  }
286
317
  } catch (e) {
318
+ console.error("[Observa] Failed to record error", e);
287
319
  }
288
320
  }
289
321
 
@@ -390,18 +422,56 @@ function recordTrace2(req, res, start, opts, timeToFirstToken, streamingDuration
390
422
  }
391
423
  }
392
424
  function recordError2(req, error, start, opts) {
425
+ const duration = Date.now() - start;
393
426
  try {
394
427
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error?.message || error);
395
428
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
396
429
  if (opts?.observa) {
430
+ const model = sanitizedReq.model || "unknown";
431
+ const inputText = sanitizedReq.messages?.map((m) => {
432
+ if (typeof m.content === "string") return m.content;
433
+ if (Array.isArray(m.content)) {
434
+ return m.content.map((c) => c.text || c.type).filter(Boolean).join("\n");
435
+ }
436
+ return null;
437
+ }).filter(Boolean).join("\n") || null;
438
+ opts.observa.trackLLMCall({
439
+ model,
440
+ input: inputText,
441
+ output: null,
442
+ // No output on error
443
+ inputMessages: sanitizedReq.messages || null,
444
+ outputMessages: null,
445
+ inputTokens: null,
446
+ outputTokens: null,
447
+ totalTokens: null,
448
+ latencyMs: duration,
449
+ timeToFirstTokenMs: null,
450
+ streamingDurationMs: null,
451
+ finishReason: null,
452
+ responseId: null,
453
+ operationName: "chat",
454
+ providerName: "anthropic",
455
+ responseModel: model,
456
+ temperature: sanitizedReq.temperature || null,
457
+ maxTokens: sanitizedReq.max_tokens || null
458
+ });
397
459
  opts.observa.trackError({
398
460
  errorType: "anthropic_api_error",
399
461
  errorMessage: error?.message || String(error),
400
462
  stackTrace: error?.stack || null,
401
- context: { request: sanitizedReq }
463
+ context: {
464
+ request: sanitizedReq,
465
+ model,
466
+ input: inputText,
467
+ provider: "anthropic",
468
+ duration_ms: duration
469
+ },
470
+ errorCategory: "llm_error"
402
471
  });
403
472
  }
404
473
  } catch (e) {
474
+ console.error("[Observa] Failed to record error", e);
405
475
  }
406
476
  }
407
477
 
@@ -508,11 +578,15 @@ async function traceGenerateText(originalFn, args, options) {
508
578
  recordError3(
509
579
  {
510
580
  model: modelIdentifier,
511
- prompt: requestParams.prompt || requestParams.messages || null
581
+ prompt: requestParams.prompt || null,
582
+ messages: requestParams.messages || null,
583
+ temperature: requestParams.temperature || null,
584
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
512
585
  },
513
586
  error,
514
587
  startTime,
515
- options
588
+ options,
589
+ provider
516
590
  );
517
591
  throw error;
518
592
  }
@@ -596,11 +670,15 @@ async function traceStreamText(originalFn, args, options) {
596
670
  (err) => recordError3(
597
671
  {
598
672
  model: modelIdentifier,
599
- prompt: requestParams.prompt || requestParams.messages || null
673
+ prompt: requestParams.prompt || null,
674
+ messages: requestParams.messages || null,
675
+ temperature: requestParams.temperature || null,
676
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
600
677
  },
601
678
  err,
602
679
  startTime,
603
- options
680
+ options,
681
+ provider
604
682
  )
605
683
  );
606
684
  const wrappedResult = Object.create(Object.getPrototypeOf(result));
@@ -632,11 +710,15 @@ async function traceStreamText(originalFn, args, options) {
632
710
  recordError3(
633
711
  {
634
712
  model: modelIdentifier,
635
- prompt: requestParams.prompt || requestParams.messages || null
713
+ prompt: requestParams.prompt || null,
714
+ messages: requestParams.messages || null,
715
+ temperature: requestParams.temperature || null,
716
+ maxTokens: requestParams.maxTokens || requestParams.max_tokens || null
636
717
  },
637
718
  error,
638
719
  startTime,
639
- options
720
+ options,
721
+ provider
640
722
  );
641
723
  throw error;
642
724
  }
@@ -683,20 +765,58 @@ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration
683
765
  console.error("[Observa] Failed to record trace", e);
684
766
  }
685
767
  }
686
- function recordError3(req, error, start, opts) {
768
+ function recordError3(req, error, start, opts, provider) {
769
+ const duration = Date.now() - start;
687
770
  try {
688
771
  console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
689
772
  const sanitizedReq = opts?.redact ? opts.redact(req) : req;
690
773
  if (opts?.observa) {
774
+ const model = sanitizedReq.model || "unknown";
775
+ let inputText = null;
776
+ let inputMessages = null;
777
+ if (sanitizedReq.prompt) {
778
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
779
+ } else if (sanitizedReq.messages) {
780
+ inputMessages = sanitizedReq.messages;
781
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
782
+ }
783
+ opts.observa.trackLLMCall({
784
+ model,
785
+ input: inputText,
786
+ output: null,
787
+ // No output on error
788
+ inputMessages,
789
+ outputMessages: null,
790
+ inputTokens: null,
791
+ outputTokens: null,
792
+ totalTokens: null,
793
+ latencyMs: duration,
794
+ timeToFirstTokenMs: null,
795
+ streamingDurationMs: null,
796
+ finishReason: null,
797
+ responseId: null,
798
+ operationName: "generate_text",
799
+ providerName: provider || "vercel-ai",
800
+ responseModel: model,
801
+ temperature: sanitizedReq.temperature || null,
802
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
803
+ });
691
804
  opts.observa.trackError({
692
805
  errorType: error.name || "UnknownError",
693
806
  errorMessage: error.message || "An unknown error occurred",
694
807
  stackTrace: error.stack,
695
- context: { request: sanitizedReq },
808
+ context: {
809
+ request: sanitizedReq,
810
+ model,
811
+ input: inputText,
812
+ provider: provider || "vercel-ai",
813
+ duration_ms: duration
814
+ },
696
815
  errorCategory: "llm_error"
697
816
  });
698
817
  }
699
818
  } catch (e) {
819
+ console.error("[Observa] Failed to record error", e);
700
820
  }
701
821
  }
702
822
  function observeVercelAI(aiSdk, options) {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.16",
3
+ "version": "0.0.17",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",