observa-sdk 0.0.11 → 0.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -428,19 +428,65 @@ function recordError2(req, error, start, opts) {
428
428
  // src/instrumentation/vercel-ai.ts
429
429
  function extractProviderFromModel(model) {
430
430
  if (!model) return "unknown";
431
- const parts = model.split("/");
432
- if (parts.length > 1) {
433
- return parts[0].toLowerCase();
431
+ if (typeof model === "object" && model !== null) {
432
+ if (model.providerId) {
433
+ return model.providerId.toLowerCase();
434
+ }
435
+ if (model.provider) {
436
+ return String(model.provider).toLowerCase();
437
+ }
438
+ if (model.modelId) {
439
+ const modelId = String(model.modelId).toLowerCase();
440
+ if (modelId.includes("gpt") || modelId.includes("openai")) {
441
+ return "openai";
442
+ }
443
+ if (modelId.includes("claude") || modelId.includes("anthropic")) {
444
+ return "anthropic";
445
+ }
446
+ if (modelId.includes("gemini") || modelId.includes("google")) {
447
+ return "google";
448
+ }
449
+ }
450
+ return "unknown";
434
451
  }
435
- const modelLower = model.toLowerCase();
436
- if (modelLower.includes("gpt") || modelLower.includes("openai")) {
437
- return "openai";
452
+ if (typeof model === "string") {
453
+ const parts = model.split("/");
454
+ if (parts.length > 1) {
455
+ return parts[0].toLowerCase();
456
+ }
457
+ const modelLower = model.toLowerCase();
458
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
459
+ return "openai";
460
+ }
461
+ if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
462
+ return "anthropic";
463
+ }
464
+ if (modelLower.includes("gemini") || modelLower.includes("google")) {
465
+ return "google";
466
+ }
438
467
  }
439
- if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
440
- return "anthropic";
468
+ return "unknown";
469
+ }
470
+ function extractModelIdentifier(model) {
471
+ if (!model) return "unknown";
472
+ if (typeof model === "object" && model !== null) {
473
+ if (model.modelId) {
474
+ return String(model.modelId);
475
+ }
476
+ if (model.providerId && model.modelId) {
477
+ return `${model.providerId}/${model.modelId}`;
478
+ }
479
+ if (model.providerId) {
480
+ return String(model.providerId);
481
+ }
482
+ try {
483
+ return JSON.stringify(model);
484
+ } catch {
485
+ return "unknown";
486
+ }
441
487
  }
442
- if (modelLower.includes("gemini") || modelLower.includes("google")) {
443
- return "google";
488
+ if (typeof model === "string") {
489
+ return model;
444
490
  }
445
491
  return "unknown";
446
492
  }
@@ -449,15 +495,17 @@ async function traceGenerateText(originalFn, args, options) {
449
495
  const requestParams = args[0] || {};
450
496
  const model = requestParams.model || "unknown";
451
497
  const provider = extractProviderFromModel(model);
498
+ const modelIdentifier = extractModelIdentifier(model);
452
499
  try {
453
500
  const result = await originalFn(...args);
454
501
  const responseText = result.text || "";
455
502
  const usage = result.usage || {};
456
503
  const finishReason = result.finishReason || null;
457
504
  const responseId = result.response?.id || null;
505
+ const responseModel = result.model ? extractModelIdentifier(result.model) : modelIdentifier;
458
506
  recordTrace3(
459
507
  {
460
- model,
508
+ model: modelIdentifier,
461
509
  prompt: requestParams.prompt || requestParams.messages || null,
462
510
  messages: requestParams.messages || null
463
511
  },
@@ -466,7 +514,7 @@ async function traceGenerateText(originalFn, args, options) {
466
514
  usage,
467
515
  finishReason,
468
516
  responseId,
469
- model: result.model || model
517
+ model: responseModel
470
518
  },
471
519
  startTime,
472
520
  options,
@@ -479,7 +527,7 @@ async function traceGenerateText(originalFn, args, options) {
479
527
  } catch (error) {
480
528
  recordError3(
481
529
  {
482
- model,
530
+ model: modelIdentifier,
483
531
  prompt: requestParams.prompt || requestParams.messages || null
484
532
  },
485
533
  error,
@@ -494,6 +542,7 @@ async function traceStreamText(originalFn, args, options) {
494
542
  const requestParams = args[0] || {};
495
543
  const model = requestParams.model || "unknown";
496
544
  const provider = extractProviderFromModel(model);
545
+ const modelIdentifier = extractModelIdentifier(model);
497
546
  try {
498
547
  const result = await originalFn(...args);
499
548
  if (result.textStream) {
@@ -502,7 +551,7 @@ async function traceStreamText(originalFn, args, options) {
502
551
  (fullResponse) => {
503
552
  recordTrace3(
504
553
  {
505
- model,
554
+ model: modelIdentifier,
506
555
  prompt: requestParams.prompt || requestParams.messages || null,
507
556
  messages: requestParams.messages || null
508
557
  },
@@ -516,7 +565,7 @@ async function traceStreamText(originalFn, args, options) {
516
565
  },
517
566
  (err) => recordError3(
518
567
  {
519
- model,
568
+ model: modelIdentifier,
520
569
  prompt: requestParams.prompt || requestParams.messages || null
521
570
  },
522
571
  err,
@@ -525,14 +574,14 @@ async function traceStreamText(originalFn, args, options) {
525
574
  ),
526
575
  "vercel-ai"
527
576
  );
528
- return {
529
- ...result,
530
- textStream: wrappedStream
531
- };
577
+ const wrappedResult = Object.create(Object.getPrototypeOf(result));
578
+ Object.assign(wrappedResult, result);
579
+ wrappedResult.textStream = wrappedStream;
580
+ return wrappedResult;
532
581
  }
533
582
  recordTrace3(
534
583
  {
535
- model,
584
+ model: modelIdentifier,
536
585
  prompt: requestParams.prompt || requestParams.messages || null,
537
586
  messages: requestParams.messages || null
538
587
  },
@@ -547,7 +596,7 @@ async function traceStreamText(originalFn, args, options) {
547
596
  } catch (error) {
548
597
  recordError3(
549
598
  {
550
- model,
599
+ model: modelIdentifier,
551
600
  prompt: requestParams.prompt || requestParams.messages || null
552
601
  },
553
602
  error,
package/dist/index.js CHANGED
@@ -408,19 +408,65 @@ function recordError2(req, error, start, opts) {
408
408
  // src/instrumentation/vercel-ai.ts
409
409
  function extractProviderFromModel(model) {
410
410
  if (!model) return "unknown";
411
- const parts = model.split("/");
412
- if (parts.length > 1) {
413
- return parts[0].toLowerCase();
411
+ if (typeof model === "object" && model !== null) {
412
+ if (model.providerId) {
413
+ return model.providerId.toLowerCase();
414
+ }
415
+ if (model.provider) {
416
+ return String(model.provider).toLowerCase();
417
+ }
418
+ if (model.modelId) {
419
+ const modelId = String(model.modelId).toLowerCase();
420
+ if (modelId.includes("gpt") || modelId.includes("openai")) {
421
+ return "openai";
422
+ }
423
+ if (modelId.includes("claude") || modelId.includes("anthropic")) {
424
+ return "anthropic";
425
+ }
426
+ if (modelId.includes("gemini") || modelId.includes("google")) {
427
+ return "google";
428
+ }
429
+ }
430
+ return "unknown";
414
431
  }
415
- const modelLower = model.toLowerCase();
416
- if (modelLower.includes("gpt") || modelLower.includes("openai")) {
417
- return "openai";
432
+ if (typeof model === "string") {
433
+ const parts = model.split("/");
434
+ if (parts.length > 1) {
435
+ return parts[0].toLowerCase();
436
+ }
437
+ const modelLower = model.toLowerCase();
438
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
439
+ return "openai";
440
+ }
441
+ if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
442
+ return "anthropic";
443
+ }
444
+ if (modelLower.includes("gemini") || modelLower.includes("google")) {
445
+ return "google";
446
+ }
418
447
  }
419
- if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
420
- return "anthropic";
448
+ return "unknown";
449
+ }
450
+ function extractModelIdentifier(model) {
451
+ if (!model) return "unknown";
452
+ if (typeof model === "object" && model !== null) {
453
+ if (model.modelId) {
454
+ return String(model.modelId);
455
+ }
456
+ if (model.providerId && model.modelId) {
457
+ return `${model.providerId}/${model.modelId}`;
458
+ }
459
+ if (model.providerId) {
460
+ return String(model.providerId);
461
+ }
462
+ try {
463
+ return JSON.stringify(model);
464
+ } catch {
465
+ return "unknown";
466
+ }
421
467
  }
422
- if (modelLower.includes("gemini") || modelLower.includes("google")) {
423
- return "google";
468
+ if (typeof model === "string") {
469
+ return model;
424
470
  }
425
471
  return "unknown";
426
472
  }
@@ -429,15 +475,17 @@ async function traceGenerateText(originalFn, args, options) {
429
475
  const requestParams = args[0] || {};
430
476
  const model = requestParams.model || "unknown";
431
477
  const provider = extractProviderFromModel(model);
478
+ const modelIdentifier = extractModelIdentifier(model);
432
479
  try {
433
480
  const result = await originalFn(...args);
434
481
  const responseText = result.text || "";
435
482
  const usage = result.usage || {};
436
483
  const finishReason = result.finishReason || null;
437
484
  const responseId = result.response?.id || null;
485
+ const responseModel = result.model ? extractModelIdentifier(result.model) : modelIdentifier;
438
486
  recordTrace3(
439
487
  {
440
- model,
488
+ model: modelIdentifier,
441
489
  prompt: requestParams.prompt || requestParams.messages || null,
442
490
  messages: requestParams.messages || null
443
491
  },
@@ -446,7 +494,7 @@ async function traceGenerateText(originalFn, args, options) {
446
494
  usage,
447
495
  finishReason,
448
496
  responseId,
449
- model: result.model || model
497
+ model: responseModel
450
498
  },
451
499
  startTime,
452
500
  options,
@@ -459,7 +507,7 @@ async function traceGenerateText(originalFn, args, options) {
459
507
  } catch (error) {
460
508
  recordError3(
461
509
  {
462
- model,
510
+ model: modelIdentifier,
463
511
  prompt: requestParams.prompt || requestParams.messages || null
464
512
  },
465
513
  error,
@@ -474,6 +522,7 @@ async function traceStreamText(originalFn, args, options) {
474
522
  const requestParams = args[0] || {};
475
523
  const model = requestParams.model || "unknown";
476
524
  const provider = extractProviderFromModel(model);
525
+ const modelIdentifier = extractModelIdentifier(model);
477
526
  try {
478
527
  const result = await originalFn(...args);
479
528
  if (result.textStream) {
@@ -482,7 +531,7 @@ async function traceStreamText(originalFn, args, options) {
482
531
  (fullResponse) => {
483
532
  recordTrace3(
484
533
  {
485
- model,
534
+ model: modelIdentifier,
486
535
  prompt: requestParams.prompt || requestParams.messages || null,
487
536
  messages: requestParams.messages || null
488
537
  },
@@ -496,7 +545,7 @@ async function traceStreamText(originalFn, args, options) {
496
545
  },
497
546
  (err) => recordError3(
498
547
  {
499
- model,
548
+ model: modelIdentifier,
500
549
  prompt: requestParams.prompt || requestParams.messages || null
501
550
  },
502
551
  err,
@@ -505,14 +554,14 @@ async function traceStreamText(originalFn, args, options) {
505
554
  ),
506
555
  "vercel-ai"
507
556
  );
508
- return {
509
- ...result,
510
- textStream: wrappedStream
511
- };
557
+ const wrappedResult = Object.create(Object.getPrototypeOf(result));
558
+ Object.assign(wrappedResult, result);
559
+ wrappedResult.textStream = wrappedStream;
560
+ return wrappedResult;
512
561
  }
513
562
  recordTrace3(
514
563
  {
515
- model,
564
+ model: modelIdentifier,
516
565
  prompt: requestParams.prompt || requestParams.messages || null,
517
566
  messages: requestParams.messages || null
518
567
  },
@@ -527,7 +576,7 @@ async function traceStreamText(originalFn, args, options) {
527
576
  } catch (error) {
528
577
  recordError3(
529
578
  {
530
- model,
579
+ model: modelIdentifier,
531
580
  prompt: requestParams.prompt || requestParams.messages || null
532
581
  },
533
582
  error,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.11",
3
+ "version": "0.0.13",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",