observa-sdk 0.0.10 → 0.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -88,6 +88,70 @@ for await (const chunk of stream) {
88
88
  }
89
89
  ```
90
90
 
91
+ ### Auto-Capture with Anthropic
92
+
93
+ Works the same way with Anthropic:
94
+
95
+ ```typescript
96
+ import { init } from "observa-sdk";
97
+ import Anthropic from "@anthropic-ai/sdk";
98
+
99
+ const observa = init({
100
+ apiKey: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
101
+ });
102
+
103
+ const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
104
+
105
+ // Wrap with Observa (automatic tracing)
106
+ const wrappedAnthropic = observa.observeAnthropic(anthropic, {
107
+ name: 'my-app',
108
+ userId: 'user_123',
109
+ });
110
+
111
+ // Use wrapped client - automatically tracked!
112
+ const response = await wrappedAnthropic.messages.create({
113
+ model: 'claude-3-opus-20240229',
114
+ max_tokens: 1024,
115
+ messages: [{ role: 'user', content: 'Hello!' }],
116
+ });
117
+ ```
118
+
119
+ ### Auto-Capture with Vercel AI SDK
120
+
121
+ Vercel AI SDK is a unified SDK that works with multiple providers:
122
+
123
+ ```typescript
124
+ import { init } from "observa-sdk";
125
+ import { generateText, streamText } from "ai";
126
+ import { openai } from "@ai-sdk/openai";
127
+
128
+ const observa = init({
129
+ apiKey: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
130
+ });
131
+
132
+ // Wrap Vercel AI SDK functions (automatic tracing)
133
+ const ai = observa.observeVercelAI({ generateText, streamText }, {
134
+ name: 'my-app',
135
+ userId: 'user_123',
136
+ });
137
+
138
+ // Use wrapped functions - automatically tracked!
139
+ const result = await ai.generateText({
140
+ model: openai('gpt-4'),
141
+ prompt: 'Hello!',
142
+ });
143
+
144
+ // Streaming also works automatically
145
+ const stream = await ai.streamText({
146
+ model: openai('gpt-4'),
147
+ prompt: 'Tell me a joke',
148
+ });
149
+
150
+ for await (const chunk of stream.textStream) {
151
+ process.stdout.write(chunk);
152
+ }
153
+ ```
154
+
91
155
  ### Legacy Manual Tracking
92
156
 
93
157
  For more control, you can still use the manual `track()` method:
@@ -290,6 +354,55 @@ const response = await wrapped.messages.create({
290
354
  });
291
355
  ```
292
356
 
357
+ ### `observa.observeVercelAI(aiSdk, options?)`
358
+
359
+ Wrap Vercel AI SDK functions (`generateText`, `streamText`) with automatic tracing. Vercel AI SDK is a unified SDK that works with multiple providers (OpenAI, Anthropic, Google, etc.).
360
+
361
+ **Parameters:**
362
+ - `aiSdk` (required): Object containing Vercel AI SDK functions (e.g., `{ generateText, streamText }`)
363
+ - `options` (optional):
364
+ - `name` (optional): Application/service name
365
+ - `tags` (optional): Array of tags
366
+ - `userId` (optional): User identifier
367
+ - `sessionId` (optional): Session identifier
368
+ - `redact` (optional): Function to sanitize data before sending to Observa
369
+
370
+ **Returns**: Wrapped AI SDK object with the same functions (use them exactly like the original functions)
371
+
372
+ **Example:**
373
+ ```typescript
374
+ import { generateText, streamText } from 'ai';
375
+ import { openai } from '@ai-sdk/openai';
376
+
377
+ const ai = observa.observeVercelAI({ generateText, streamText }, {
378
+ name: 'my-app',
379
+ userId: 'user_123',
380
+ redact: (data) => {
381
+ // Sanitize sensitive data
382
+ if (data?.prompt) {
383
+ return { ...data, prompt: '[REDACTED]' };
384
+ }
385
+ return data;
386
+ }
387
+ });
388
+
389
+ // Use wrapped functions - automatically tracked!
390
+ const result = await ai.generateText({
391
+ model: openai('gpt-4'),
392
+ prompt: 'Hello!',
393
+ });
394
+
395
+ // Streaming also works automatically
396
+ const stream = await ai.streamText({
397
+ model: openai('gpt-4'),
398
+ prompt: 'Tell me a joke',
399
+ });
400
+
401
+ for await (const chunk of stream.textStream) {
402
+ process.stdout.write(chunk);
403
+ }
404
+ ```
405
+
293
406
  ### `observa.startTrace(options)`
294
407
 
295
408
  Start a new trace for manual trace management. Returns the trace ID.
package/dist/index.cjs CHANGED
@@ -104,11 +104,30 @@ async function* wrapStream(stream, onComplete, onError, provider = "openai") {
104
104
  if (text && typeof text === "string") {
105
105
  tokenCount += estimateTokens(text);
106
106
  }
107
+ } else if (provider === "vercel-ai") {
108
+ if (typeof chunk === "string") {
109
+ tokenCount += estimateTokens(chunk);
110
+ } else if (chunk?.textDelta) {
111
+ tokenCount += estimateTokens(chunk.textDelta);
112
+ }
107
113
  }
108
114
  chunks.push(chunk);
109
115
  yield chunk;
110
116
  }
111
- const fullResponse = provider === "openai" ? reconstructOpenAIResponse(chunks) : reconstructAnthropicResponse(chunks);
117
+ let fullResponse;
118
+ if (provider === "openai") {
119
+ fullResponse = reconstructOpenAIResponse(chunks);
120
+ } else if (provider === "anthropic") {
121
+ fullResponse = reconstructAnthropicResponse(chunks);
122
+ } else if (provider === "vercel-ai") {
123
+ const fullText = chunks.map((chunk) => typeof chunk === "string" ? chunk : chunk?.textDelta || "").join("");
124
+ fullResponse = {
125
+ text: fullText,
126
+ tokenCount
127
+ };
128
+ } else {
129
+ fullResponse = { chunks, tokenCount };
130
+ }
112
131
  Promise.resolve().then(() => {
113
132
  try {
114
133
  onComplete({
@@ -406,6 +425,265 @@ function recordError2(req, error, start, opts) {
406
425
  }
407
426
  }
408
427
 
428
+ // src/instrumentation/vercel-ai.ts
429
+ function extractProviderFromModel(model) {
430
+ if (!model) return "unknown";
431
+ if (typeof model === "object" && model !== null) {
432
+ if (model.providerId) {
433
+ return model.providerId.toLowerCase();
434
+ }
435
+ if (model.provider) {
436
+ return String(model.provider).toLowerCase();
437
+ }
438
+ if (model.modelId) {
439
+ const modelId = String(model.modelId).toLowerCase();
440
+ if (modelId.includes("gpt") || modelId.includes("openai")) {
441
+ return "openai";
442
+ }
443
+ if (modelId.includes("claude") || modelId.includes("anthropic")) {
444
+ return "anthropic";
445
+ }
446
+ if (modelId.includes("gemini") || modelId.includes("google")) {
447
+ return "google";
448
+ }
449
+ }
450
+ return "unknown";
451
+ }
452
+ if (typeof model === "string") {
453
+ const parts = model.split("/");
454
+ if (parts.length > 1) {
455
+ return parts[0].toLowerCase();
456
+ }
457
+ const modelLower = model.toLowerCase();
458
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
459
+ return "openai";
460
+ }
461
+ if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
462
+ return "anthropic";
463
+ }
464
+ if (modelLower.includes("gemini") || modelLower.includes("google")) {
465
+ return "google";
466
+ }
467
+ }
468
+ return "unknown";
469
+ }
470
+ function extractModelIdentifier(model) {
471
+ if (!model) return "unknown";
472
+ if (typeof model === "object" && model !== null) {
473
+ if (model.modelId) {
474
+ return String(model.modelId);
475
+ }
476
+ if (model.providerId && model.modelId) {
477
+ return `${model.providerId}/${model.modelId}`;
478
+ }
479
+ if (model.providerId) {
480
+ return String(model.providerId);
481
+ }
482
+ try {
483
+ return JSON.stringify(model);
484
+ } catch {
485
+ return "unknown";
486
+ }
487
+ }
488
+ if (typeof model === "string") {
489
+ return model;
490
+ }
491
+ return "unknown";
492
+ }
493
+ async function traceGenerateText(originalFn, args, options) {
494
+ const startTime = Date.now();
495
+ const requestParams = args[0] || {};
496
+ const model = requestParams.model || "unknown";
497
+ const provider = extractProviderFromModel(model);
498
+ const modelIdentifier = extractModelIdentifier(model);
499
+ try {
500
+ const result = await originalFn(...args);
501
+ const responseText = result.text || "";
502
+ const usage = result.usage || {};
503
+ const finishReason = result.finishReason || null;
504
+ const responseId = result.response?.id || null;
505
+ const responseModel = result.model ? extractModelIdentifier(result.model) : modelIdentifier;
506
+ recordTrace3(
507
+ {
508
+ model: modelIdentifier,
509
+ prompt: requestParams.prompt || requestParams.messages || null,
510
+ messages: requestParams.messages || null
511
+ },
512
+ {
513
+ text: responseText,
514
+ usage,
515
+ finishReason,
516
+ responseId,
517
+ model: responseModel
518
+ },
519
+ startTime,
520
+ options,
521
+ null,
522
+ // No streaming for generateText
523
+ null,
524
+ provider
525
+ );
526
+ return result;
527
+ } catch (error) {
528
+ recordError3(
529
+ {
530
+ model: modelIdentifier,
531
+ prompt: requestParams.prompt || requestParams.messages || null
532
+ },
533
+ error,
534
+ startTime,
535
+ options
536
+ );
537
+ throw error;
538
+ }
539
+ }
540
+ async function traceStreamText(originalFn, args, options) {
541
+ const startTime = Date.now();
542
+ const requestParams = args[0] || {};
543
+ const model = requestParams.model || "unknown";
544
+ const provider = extractProviderFromModel(model);
545
+ const modelIdentifier = extractModelIdentifier(model);
546
+ try {
547
+ const result = await originalFn(...args);
548
+ if (result.textStream) {
549
+ const wrappedStream = wrapStream(
550
+ result.textStream,
551
+ (fullResponse) => {
552
+ recordTrace3(
553
+ {
554
+ model: modelIdentifier,
555
+ prompt: requestParams.prompt || requestParams.messages || null,
556
+ messages: requestParams.messages || null
557
+ },
558
+ fullResponse,
559
+ startTime,
560
+ options,
561
+ fullResponse.timeToFirstToken,
562
+ fullResponse.streamingDuration,
563
+ provider
564
+ );
565
+ },
566
+ (err) => recordError3(
567
+ {
568
+ model: modelIdentifier,
569
+ prompt: requestParams.prompt || requestParams.messages || null
570
+ },
571
+ err,
572
+ startTime,
573
+ options
574
+ ),
575
+ "vercel-ai"
576
+ );
577
+ return {
578
+ ...result,
579
+ textStream: wrappedStream
580
+ };
581
+ }
582
+ recordTrace3(
583
+ {
584
+ model: modelIdentifier,
585
+ prompt: requestParams.prompt || requestParams.messages || null,
586
+ messages: requestParams.messages || null
587
+ },
588
+ result,
589
+ startTime,
590
+ options,
591
+ null,
592
+ null,
593
+ provider
594
+ );
595
+ return result;
596
+ } catch (error) {
597
+ recordError3(
598
+ {
599
+ model: modelIdentifier,
600
+ prompt: requestParams.prompt || requestParams.messages || null
601
+ },
602
+ error,
603
+ startTime,
604
+ options
605
+ );
606
+ throw error;
607
+ }
608
+ }
609
+ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration, provider) {
610
+ const duration = Date.now() - start;
611
+ try {
612
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
613
+ const sanitizedRes = opts?.redact ? opts.redact(res) : req;
614
+ if (opts?.observa) {
615
+ let inputText = null;
616
+ if (sanitizedReq.prompt) {
617
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
618
+ } else if (sanitizedReq.messages) {
619
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
620
+ }
621
+ const outputText = sanitizedRes.text || sanitizedRes.content || null;
622
+ const usage = sanitizedRes.usage || {};
623
+ const inputTokens = usage.promptTokens || usage.inputTokens || null;
624
+ const outputTokens = usage.completionTokens || usage.outputTokens || null;
625
+ const totalTokens = usage.totalTokens || null;
626
+ opts.observa.trackLLMCall({
627
+ model: sanitizedReq.model || sanitizedRes.model || "unknown",
628
+ input: inputText,
629
+ output: outputText,
630
+ inputMessages: sanitizedReq.messages || null,
631
+ outputMessages: sanitizedRes.messages || null,
632
+ inputTokens,
633
+ outputTokens,
634
+ totalTokens,
635
+ latencyMs: duration,
636
+ timeToFirstTokenMs: timeToFirstToken || null,
637
+ streamingDurationMs: streamingDuration || null,
638
+ finishReason: sanitizedRes.finishReason || null,
639
+ responseId: sanitizedRes.responseId || sanitizedRes.id || null,
640
+ operationName: "generate_text",
641
+ providerName: provider || "vercel-ai",
642
+ responseModel: sanitizedRes.model || sanitizedReq.model || null,
643
+ temperature: sanitizedReq.temperature || null,
644
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
645
+ });
646
+ }
647
+ } catch (e) {
648
+ console.error("[Observa] Failed to record trace", e);
649
+ }
650
+ }
651
+ function recordError3(req, error, start, opts) {
652
+ try {
653
+ console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
654
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
655
+ if (opts?.observa) {
656
+ opts.observa.trackError({
657
+ errorType: error.name || "UnknownError",
658
+ errorMessage: error.message || "An unknown error occurred",
659
+ stackTrace: error.stack,
660
+ context: { request: sanitizedReq },
661
+ errorCategory: "llm_error"
662
+ });
663
+ }
664
+ } catch (e) {
665
+ }
666
+ }
667
+ function observeVercelAI(aiSdk, options) {
668
+ try {
669
+ const wrapped = { ...aiSdk };
670
+ if (aiSdk.generateText && typeof aiSdk.generateText === "function") {
671
+ wrapped.generateText = async function(...args) {
672
+ return traceGenerateText(aiSdk.generateText.bind(aiSdk), args, options);
673
+ };
674
+ }
675
+ if (aiSdk.streamText && typeof aiSdk.streamText === "function") {
676
+ wrapped.streamText = async function(...args) {
677
+ return traceStreamText(aiSdk.streamText.bind(aiSdk), args, options);
678
+ };
679
+ }
680
+ return wrapped;
681
+ } catch (error) {
682
+ console.error("[Observa] Failed to wrap Vercel AI SDK:", error);
683
+ return aiSdk;
684
+ }
685
+ }
686
+
409
687
  // src/index.ts
410
688
  var contextModule = null;
411
689
  try {
@@ -1384,6 +1662,38 @@ var Observa = class {
1384
1662
  return client;
1385
1663
  }
1386
1664
  }
1665
+ /**
1666
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
1667
+ *
1668
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1669
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
1670
+ * @returns Wrapped AI SDK with automatic tracing
1671
+ *
1672
+ * @example
1673
+ * ```typescript
1674
+ * import { generateText, streamText } from 'ai';
1675
+ * const observa = init({ apiKey: '...' });
1676
+ *
1677
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
1678
+ * name: 'my-app',
1679
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1680
+ * });
1681
+ *
1682
+ * // Use wrapped functions - automatically tracked!
1683
+ * const result = await ai.generateText({
1684
+ * model: 'openai/gpt-4',
1685
+ * prompt: 'Hello!'
1686
+ * });
1687
+ * ```
1688
+ */
1689
+ observeVercelAI(aiSdk, options) {
1690
+ try {
1691
+ return observeVercelAI(aiSdk, { ...options, observa: this });
1692
+ } catch (error) {
1693
+ console.error("[Observa] Failed to load Vercel AI SDK wrapper:", error);
1694
+ return aiSdk;
1695
+ }
1696
+ }
1387
1697
  async track(event, action, options) {
1388
1698
  if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
1389
1699
  return action();
package/dist/index.d.cts CHANGED
@@ -328,6 +328,41 @@ declare class Observa {
328
328
  sessionId?: string;
329
329
  redact?: (data: any) => any;
330
330
  }): any;
331
+ /**
332
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
333
+ *
334
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
335
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
336
+ * @returns Wrapped AI SDK with automatic tracing
337
+ *
338
+ * @example
339
+ * ```typescript
340
+ * import { generateText, streamText } from 'ai';
341
+ * const observa = init({ apiKey: '...' });
342
+ *
343
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
344
+ * name: 'my-app',
345
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
346
+ * });
347
+ *
348
+ * // Use wrapped functions - automatically tracked!
349
+ * const result = await ai.generateText({
350
+ * model: 'openai/gpt-4',
351
+ * prompt: 'Hello!'
352
+ * });
353
+ * ```
354
+ */
355
+ observeVercelAI(aiSdk: {
356
+ generateText?: any;
357
+ streamText?: any;
358
+ [key: string]: any;
359
+ }, options?: {
360
+ name?: string;
361
+ tags?: string[];
362
+ userId?: string;
363
+ sessionId?: string;
364
+ redact?: (data: any) => any;
365
+ }): any;
331
366
  track(event: TrackEventInput, action: () => Promise<Response>, options?: {
332
367
  trackBlocking?: boolean;
333
368
  }): Promise<any>;
package/dist/index.d.ts CHANGED
@@ -328,6 +328,41 @@ declare class Observa {
328
328
  sessionId?: string;
329
329
  redact?: (data: any) => any;
330
330
  }): any;
331
+ /**
332
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
333
+ *
334
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
335
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
336
+ * @returns Wrapped AI SDK with automatic tracing
337
+ *
338
+ * @example
339
+ * ```typescript
340
+ * import { generateText, streamText } from 'ai';
341
+ * const observa = init({ apiKey: '...' });
342
+ *
343
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
344
+ * name: 'my-app',
345
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
346
+ * });
347
+ *
348
+ * // Use wrapped functions - automatically tracked!
349
+ * const result = await ai.generateText({
350
+ * model: 'openai/gpt-4',
351
+ * prompt: 'Hello!'
352
+ * });
353
+ * ```
354
+ */
355
+ observeVercelAI(aiSdk: {
356
+ generateText?: any;
357
+ streamText?: any;
358
+ [key: string]: any;
359
+ }, options?: {
360
+ name?: string;
361
+ tags?: string[];
362
+ userId?: string;
363
+ sessionId?: string;
364
+ redact?: (data: any) => any;
365
+ }): any;
331
366
  track(event: TrackEventInput, action: () => Promise<Response>, options?: {
332
367
  trackBlocking?: boolean;
333
368
  }): Promise<any>;
package/dist/index.js CHANGED
@@ -84,11 +84,30 @@ async function* wrapStream(stream, onComplete, onError, provider = "openai") {
84
84
  if (text && typeof text === "string") {
85
85
  tokenCount += estimateTokens(text);
86
86
  }
87
+ } else if (provider === "vercel-ai") {
88
+ if (typeof chunk === "string") {
89
+ tokenCount += estimateTokens(chunk);
90
+ } else if (chunk?.textDelta) {
91
+ tokenCount += estimateTokens(chunk.textDelta);
92
+ }
87
93
  }
88
94
  chunks.push(chunk);
89
95
  yield chunk;
90
96
  }
91
- const fullResponse = provider === "openai" ? reconstructOpenAIResponse(chunks) : reconstructAnthropicResponse(chunks);
97
+ let fullResponse;
98
+ if (provider === "openai") {
99
+ fullResponse = reconstructOpenAIResponse(chunks);
100
+ } else if (provider === "anthropic") {
101
+ fullResponse = reconstructAnthropicResponse(chunks);
102
+ } else if (provider === "vercel-ai") {
103
+ const fullText = chunks.map((chunk) => typeof chunk === "string" ? chunk : chunk?.textDelta || "").join("");
104
+ fullResponse = {
105
+ text: fullText,
106
+ tokenCount
107
+ };
108
+ } else {
109
+ fullResponse = { chunks, tokenCount };
110
+ }
92
111
  Promise.resolve().then(() => {
93
112
  try {
94
113
  onComplete({
@@ -386,6 +405,265 @@ function recordError2(req, error, start, opts) {
386
405
  }
387
406
  }
388
407
 
408
+ // src/instrumentation/vercel-ai.ts
409
+ function extractProviderFromModel(model) {
410
+ if (!model) return "unknown";
411
+ if (typeof model === "object" && model !== null) {
412
+ if (model.providerId) {
413
+ return model.providerId.toLowerCase();
414
+ }
415
+ if (model.provider) {
416
+ return String(model.provider).toLowerCase();
417
+ }
418
+ if (model.modelId) {
419
+ const modelId = String(model.modelId).toLowerCase();
420
+ if (modelId.includes("gpt") || modelId.includes("openai")) {
421
+ return "openai";
422
+ }
423
+ if (modelId.includes("claude") || modelId.includes("anthropic")) {
424
+ return "anthropic";
425
+ }
426
+ if (modelId.includes("gemini") || modelId.includes("google")) {
427
+ return "google";
428
+ }
429
+ }
430
+ return "unknown";
431
+ }
432
+ if (typeof model === "string") {
433
+ const parts = model.split("/");
434
+ if (parts.length > 1) {
435
+ return parts[0].toLowerCase();
436
+ }
437
+ const modelLower = model.toLowerCase();
438
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
439
+ return "openai";
440
+ }
441
+ if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
442
+ return "anthropic";
443
+ }
444
+ if (modelLower.includes("gemini") || modelLower.includes("google")) {
445
+ return "google";
446
+ }
447
+ }
448
+ return "unknown";
449
+ }
450
+ function extractModelIdentifier(model) {
451
+ if (!model) return "unknown";
452
+ if (typeof model === "object" && model !== null) {
453
+ if (model.modelId) {
454
+ return String(model.modelId);
455
+ }
456
+ if (model.providerId && model.modelId) {
457
+ return `${model.providerId}/${model.modelId}`;
458
+ }
459
+ if (model.providerId) {
460
+ return String(model.providerId);
461
+ }
462
+ try {
463
+ return JSON.stringify(model);
464
+ } catch {
465
+ return "unknown";
466
+ }
467
+ }
468
+ if (typeof model === "string") {
469
+ return model;
470
+ }
471
+ return "unknown";
472
+ }
473
+ async function traceGenerateText(originalFn, args, options) {
474
+ const startTime = Date.now();
475
+ const requestParams = args[0] || {};
476
+ const model = requestParams.model || "unknown";
477
+ const provider = extractProviderFromModel(model);
478
+ const modelIdentifier = extractModelIdentifier(model);
479
+ try {
480
+ const result = await originalFn(...args);
481
+ const responseText = result.text || "";
482
+ const usage = result.usage || {};
483
+ const finishReason = result.finishReason || null;
484
+ const responseId = result.response?.id || null;
485
+ const responseModel = result.model ? extractModelIdentifier(result.model) : modelIdentifier;
486
+ recordTrace3(
487
+ {
488
+ model: modelIdentifier,
489
+ prompt: requestParams.prompt || requestParams.messages || null,
490
+ messages: requestParams.messages || null
491
+ },
492
+ {
493
+ text: responseText,
494
+ usage,
495
+ finishReason,
496
+ responseId,
497
+ model: responseModel
498
+ },
499
+ startTime,
500
+ options,
501
+ null,
502
+ // No streaming for generateText
503
+ null,
504
+ provider
505
+ );
506
+ return result;
507
+ } catch (error) {
508
+ recordError3(
509
+ {
510
+ model: modelIdentifier,
511
+ prompt: requestParams.prompt || requestParams.messages || null
512
+ },
513
+ error,
514
+ startTime,
515
+ options
516
+ );
517
+ throw error;
518
+ }
519
+ }
520
+ async function traceStreamText(originalFn, args, options) {
521
+ const startTime = Date.now();
522
+ const requestParams = args[0] || {};
523
+ const model = requestParams.model || "unknown";
524
+ const provider = extractProviderFromModel(model);
525
+ const modelIdentifier = extractModelIdentifier(model);
526
+ try {
527
+ const result = await originalFn(...args);
528
+ if (result.textStream) {
529
+ const wrappedStream = wrapStream(
530
+ result.textStream,
531
+ (fullResponse) => {
532
+ recordTrace3(
533
+ {
534
+ model: modelIdentifier,
535
+ prompt: requestParams.prompt || requestParams.messages || null,
536
+ messages: requestParams.messages || null
537
+ },
538
+ fullResponse,
539
+ startTime,
540
+ options,
541
+ fullResponse.timeToFirstToken,
542
+ fullResponse.streamingDuration,
543
+ provider
544
+ );
545
+ },
546
+ (err) => recordError3(
547
+ {
548
+ model: modelIdentifier,
549
+ prompt: requestParams.prompt || requestParams.messages || null
550
+ },
551
+ err,
552
+ startTime,
553
+ options
554
+ ),
555
+ "vercel-ai"
556
+ );
557
+ return {
558
+ ...result,
559
+ textStream: wrappedStream
560
+ };
561
+ }
562
+ recordTrace3(
563
+ {
564
+ model: modelIdentifier,
565
+ prompt: requestParams.prompt || requestParams.messages || null,
566
+ messages: requestParams.messages || null
567
+ },
568
+ result,
569
+ startTime,
570
+ options,
571
+ null,
572
+ null,
573
+ provider
574
+ );
575
+ return result;
576
+ } catch (error) {
577
+ recordError3(
578
+ {
579
+ model: modelIdentifier,
580
+ prompt: requestParams.prompt || requestParams.messages || null
581
+ },
582
+ error,
583
+ startTime,
584
+ options
585
+ );
586
+ throw error;
587
+ }
588
+ }
589
+ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration, provider) {
590
+ const duration = Date.now() - start;
591
+ try {
592
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
593
+ const sanitizedRes = opts?.redact ? opts.redact(res) : req;
594
+ if (opts?.observa) {
595
+ let inputText = null;
596
+ if (sanitizedReq.prompt) {
597
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
598
+ } else if (sanitizedReq.messages) {
599
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
600
+ }
601
+ const outputText = sanitizedRes.text || sanitizedRes.content || null;
602
+ const usage = sanitizedRes.usage || {};
603
+ const inputTokens = usage.promptTokens || usage.inputTokens || null;
604
+ const outputTokens = usage.completionTokens || usage.outputTokens || null;
605
+ const totalTokens = usage.totalTokens || null;
606
+ opts.observa.trackLLMCall({
607
+ model: sanitizedReq.model || sanitizedRes.model || "unknown",
608
+ input: inputText,
609
+ output: outputText,
610
+ inputMessages: sanitizedReq.messages || null,
611
+ outputMessages: sanitizedRes.messages || null,
612
+ inputTokens,
613
+ outputTokens,
614
+ totalTokens,
615
+ latencyMs: duration,
616
+ timeToFirstTokenMs: timeToFirstToken || null,
617
+ streamingDurationMs: streamingDuration || null,
618
+ finishReason: sanitizedRes.finishReason || null,
619
+ responseId: sanitizedRes.responseId || sanitizedRes.id || null,
620
+ operationName: "generate_text",
621
+ providerName: provider || "vercel-ai",
622
+ responseModel: sanitizedRes.model || sanitizedReq.model || null,
623
+ temperature: sanitizedReq.temperature || null,
624
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
625
+ });
626
+ }
627
+ } catch (e) {
628
+ console.error("[Observa] Failed to record trace", e);
629
+ }
630
+ }
631
+ function recordError3(req, error, start, opts) {
632
+ try {
633
+ console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
634
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
635
+ if (opts?.observa) {
636
+ opts.observa.trackError({
637
+ errorType: error.name || "UnknownError",
638
+ errorMessage: error.message || "An unknown error occurred",
639
+ stackTrace: error.stack,
640
+ context: { request: sanitizedReq },
641
+ errorCategory: "llm_error"
642
+ });
643
+ }
644
+ } catch (e) {
645
+ }
646
+ }
647
+ function observeVercelAI(aiSdk, options) {
648
+ try {
649
+ const wrapped = { ...aiSdk };
650
+ if (aiSdk.generateText && typeof aiSdk.generateText === "function") {
651
+ wrapped.generateText = async function(...args) {
652
+ return traceGenerateText(aiSdk.generateText.bind(aiSdk), args, options);
653
+ };
654
+ }
655
+ if (aiSdk.streamText && typeof aiSdk.streamText === "function") {
656
+ wrapped.streamText = async function(...args) {
657
+ return traceStreamText(aiSdk.streamText.bind(aiSdk), args, options);
658
+ };
659
+ }
660
+ return wrapped;
661
+ } catch (error) {
662
+ console.error("[Observa] Failed to wrap Vercel AI SDK:", error);
663
+ return aiSdk;
664
+ }
665
+ }
666
+
389
667
  // src/index.ts
390
668
  var contextModule = null;
391
669
  try {
@@ -1364,6 +1642,38 @@ var Observa = class {
1364
1642
  return client;
1365
1643
  }
1366
1644
  }
1645
+ /**
1646
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
1647
+ *
1648
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1649
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
1650
+ * @returns Wrapped AI SDK with automatic tracing
1651
+ *
1652
+ * @example
1653
+ * ```typescript
1654
+ * import { generateText, streamText } from 'ai';
1655
+ * const observa = init({ apiKey: '...' });
1656
+ *
1657
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
1658
+ * name: 'my-app',
1659
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1660
+ * });
1661
+ *
1662
+ * // Use wrapped functions - automatically tracked!
1663
+ * const result = await ai.generateText({
1664
+ * model: 'openai/gpt-4',
1665
+ * prompt: 'Hello!'
1666
+ * });
1667
+ * ```
1668
+ */
1669
+ observeVercelAI(aiSdk, options) {
1670
+ try {
1671
+ return observeVercelAI(aiSdk, { ...options, observa: this });
1672
+ } catch (error) {
1673
+ console.error("[Observa] Failed to load Vercel AI SDK wrapper:", error);
1674
+ return aiSdk;
1675
+ }
1676
+ }
1367
1677
  async track(event, action, options) {
1368
1678
  if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
1369
1679
  return action();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.10",
3
+ "version": "0.0.12",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",
@@ -34,5 +34,12 @@
34
34
  "license": "MIT",
35
35
  "publishConfig": {
36
36
  "access": "public"
37
+ },
38
+ "dependencies": {
39
+ "observa-sdk": "^0.0.10"
40
+ },
41
+ "devDependencies": {
42
+ "tsup": "^8.5.1",
43
+ "typescript": "^5.9.3"
37
44
  }
38
45
  }