observa-sdk 0.0.10 → 0.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -88,6 +88,70 @@ for await (const chunk of stream) {
88
88
  }
89
89
  ```
90
90
 
91
+ ### Auto-Capture with Anthropic
92
+
93
+ Works the same way with Anthropic:
94
+
95
+ ```typescript
96
+ import { init } from "observa-sdk";
97
+ import Anthropic from "@anthropic-ai/sdk";
98
+
99
+ const observa = init({
100
+ apiKey: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
101
+ });
102
+
103
+ const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
104
+
105
+ // Wrap with Observa (automatic tracing)
106
+ const wrappedAnthropic = observa.observeAnthropic(anthropic, {
107
+ name: 'my-app',
108
+ userId: 'user_123',
109
+ });
110
+
111
+ // Use wrapped client - automatically tracked!
112
+ const response = await wrappedAnthropic.messages.create({
113
+ model: 'claude-3-opus-20240229',
114
+ max_tokens: 1024,
115
+ messages: [{ role: 'user', content: 'Hello!' }],
116
+ });
117
+ ```
118
+
119
+ ### Auto-Capture with Vercel AI SDK
120
+
121
+ Vercel AI SDK is a unified SDK that works with multiple providers:
122
+
123
+ ```typescript
124
+ import { init } from "observa-sdk";
125
+ import { generateText, streamText } from "ai";
126
+ import { openai } from "@ai-sdk/openai";
127
+
128
+ const observa = init({
129
+ apiKey: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...",
130
+ });
131
+
132
+ // Wrap Vercel AI SDK functions (automatic tracing)
133
+ const ai = observa.observeVercelAI({ generateText, streamText }, {
134
+ name: 'my-app',
135
+ userId: 'user_123',
136
+ });
137
+
138
+ // Use wrapped functions - automatically tracked!
139
+ const result = await ai.generateText({
140
+ model: openai('gpt-4'),
141
+ prompt: 'Hello!',
142
+ });
143
+
144
+ // Streaming also works automatically
145
+ const stream = await ai.streamText({
146
+ model: openai('gpt-4'),
147
+ prompt: 'Tell me a joke',
148
+ });
149
+
150
+ for await (const chunk of stream.textStream) {
151
+ process.stdout.write(chunk);
152
+ }
153
+ ```
154
+
91
155
  ### Legacy Manual Tracking
92
156
 
93
157
  For more control, you can still use the manual `track()` method:
@@ -290,6 +354,55 @@ const response = await wrapped.messages.create({
290
354
  });
291
355
  ```
292
356
 
357
+ ### `observa.observeVercelAI(aiSdk, options?)`
358
+
359
+ Wrap Vercel AI SDK functions (`generateText`, `streamText`) with automatic tracing. Vercel AI SDK is a unified SDK that works with multiple providers (OpenAI, Anthropic, Google, etc.).
360
+
361
+ **Parameters:**
362
+ - `aiSdk` (required): Object containing Vercel AI SDK functions (e.g., `{ generateText, streamText }`)
363
+ - `options` (optional):
364
+ - `name` (optional): Application/service name
365
+ - `tags` (optional): Array of tags
366
+ - `userId` (optional): User identifier
367
+ - `sessionId` (optional): Session identifier
368
+ - `redact` (optional): Function to sanitize data before sending to Observa
369
+
370
+ **Returns**: Wrapped AI SDK object with the same functions (use them exactly like the original functions)
371
+
372
+ **Example:**
373
+ ```typescript
374
+ import { generateText, streamText } from 'ai';
375
+ import { openai } from '@ai-sdk/openai';
376
+
377
+ const ai = observa.observeVercelAI({ generateText, streamText }, {
378
+ name: 'my-app',
379
+ userId: 'user_123',
380
+ redact: (data) => {
381
+ // Sanitize sensitive data
382
+ if (data?.prompt) {
383
+ return { ...data, prompt: '[REDACTED]' };
384
+ }
385
+ return data;
386
+ }
387
+ });
388
+
389
+ // Use wrapped functions - automatically tracked!
390
+ const result = await ai.generateText({
391
+ model: openai('gpt-4'),
392
+ prompt: 'Hello!',
393
+ });
394
+
395
+ // Streaming also works automatically
396
+ const stream = await ai.streamText({
397
+ model: openai('gpt-4'),
398
+ prompt: 'Tell me a joke',
399
+ });
400
+
401
+ for await (const chunk of stream.textStream) {
402
+ process.stdout.write(chunk);
403
+ }
404
+ ```
405
+
293
406
  ### `observa.startTrace(options)`
294
407
 
295
408
  Start a new trace for manual trace management. Returns the trace ID.
package/dist/index.cjs CHANGED
@@ -104,11 +104,30 @@ async function* wrapStream(stream, onComplete, onError, provider = "openai") {
104
104
  if (text && typeof text === "string") {
105
105
  tokenCount += estimateTokens(text);
106
106
  }
107
+ } else if (provider === "vercel-ai") {
108
+ if (typeof chunk === "string") {
109
+ tokenCount += estimateTokens(chunk);
110
+ } else if (chunk?.textDelta) {
111
+ tokenCount += estimateTokens(chunk.textDelta);
112
+ }
107
113
  }
108
114
  chunks.push(chunk);
109
115
  yield chunk;
110
116
  }
111
- const fullResponse = provider === "openai" ? reconstructOpenAIResponse(chunks) : reconstructAnthropicResponse(chunks);
117
+ let fullResponse;
118
+ if (provider === "openai") {
119
+ fullResponse = reconstructOpenAIResponse(chunks);
120
+ } else if (provider === "anthropic") {
121
+ fullResponse = reconstructAnthropicResponse(chunks);
122
+ } else if (provider === "vercel-ai") {
123
+ const fullText = chunks.map((chunk) => typeof chunk === "string" ? chunk : chunk?.textDelta || "").join("");
124
+ fullResponse = {
125
+ text: fullText,
126
+ tokenCount
127
+ };
128
+ } else {
129
+ fullResponse = { chunks, tokenCount };
130
+ }
112
131
  Promise.resolve().then(() => {
113
132
  try {
114
133
  onComplete({
@@ -406,6 +425,216 @@ function recordError2(req, error, start, opts) {
406
425
  }
407
426
  }
408
427
 
428
+ // src/instrumentation/vercel-ai.ts
429
+ function extractProviderFromModel(model) {
430
+ if (!model) return "unknown";
431
+ const parts = model.split("/");
432
+ if (parts.length > 1) {
433
+ return parts[0].toLowerCase();
434
+ }
435
+ const modelLower = model.toLowerCase();
436
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
437
+ return "openai";
438
+ }
439
+ if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
440
+ return "anthropic";
441
+ }
442
+ if (modelLower.includes("gemini") || modelLower.includes("google")) {
443
+ return "google";
444
+ }
445
+ return "unknown";
446
+ }
447
+ async function traceGenerateText(originalFn, args, options) {
448
+ const startTime = Date.now();
449
+ const requestParams = args[0] || {};
450
+ const model = requestParams.model || "unknown";
451
+ const provider = extractProviderFromModel(model);
452
+ try {
453
+ const result = await originalFn(...args);
454
+ const responseText = result.text || "";
455
+ const usage = result.usage || {};
456
+ const finishReason = result.finishReason || null;
457
+ const responseId = result.response?.id || null;
458
+ recordTrace3(
459
+ {
460
+ model,
461
+ prompt: requestParams.prompt || requestParams.messages || null,
462
+ messages: requestParams.messages || null
463
+ },
464
+ {
465
+ text: responseText,
466
+ usage,
467
+ finishReason,
468
+ responseId,
469
+ model: result.model || model
470
+ },
471
+ startTime,
472
+ options,
473
+ null,
474
+ // No streaming for generateText
475
+ null,
476
+ provider
477
+ );
478
+ return result;
479
+ } catch (error) {
480
+ recordError3(
481
+ {
482
+ model,
483
+ prompt: requestParams.prompt || requestParams.messages || null
484
+ },
485
+ error,
486
+ startTime,
487
+ options
488
+ );
489
+ throw error;
490
+ }
491
+ }
492
+ async function traceStreamText(originalFn, args, options) {
493
+ const startTime = Date.now();
494
+ const requestParams = args[0] || {};
495
+ const model = requestParams.model || "unknown";
496
+ const provider = extractProviderFromModel(model);
497
+ try {
498
+ const result = await originalFn(...args);
499
+ if (result.textStream) {
500
+ const wrappedStream = wrapStream(
501
+ result.textStream,
502
+ (fullResponse) => {
503
+ recordTrace3(
504
+ {
505
+ model,
506
+ prompt: requestParams.prompt || requestParams.messages || null,
507
+ messages: requestParams.messages || null
508
+ },
509
+ fullResponse,
510
+ startTime,
511
+ options,
512
+ fullResponse.timeToFirstToken,
513
+ fullResponse.streamingDuration,
514
+ provider
515
+ );
516
+ },
517
+ (err) => recordError3(
518
+ {
519
+ model,
520
+ prompt: requestParams.prompt || requestParams.messages || null
521
+ },
522
+ err,
523
+ startTime,
524
+ options
525
+ ),
526
+ "vercel-ai"
527
+ );
528
+ return {
529
+ ...result,
530
+ textStream: wrappedStream
531
+ };
532
+ }
533
+ recordTrace3(
534
+ {
535
+ model,
536
+ prompt: requestParams.prompt || requestParams.messages || null,
537
+ messages: requestParams.messages || null
538
+ },
539
+ result,
540
+ startTime,
541
+ options,
542
+ null,
543
+ null,
544
+ provider
545
+ );
546
+ return result;
547
+ } catch (error) {
548
+ recordError3(
549
+ {
550
+ model,
551
+ prompt: requestParams.prompt || requestParams.messages || null
552
+ },
553
+ error,
554
+ startTime,
555
+ options
556
+ );
557
+ throw error;
558
+ }
559
+ }
560
+ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration, provider) {
561
+ const duration = Date.now() - start;
562
+ try {
563
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
564
+ const sanitizedRes = opts?.redact ? opts.redact(res) : req;
565
+ if (opts?.observa) {
566
+ let inputText = null;
567
+ if (sanitizedReq.prompt) {
568
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
569
+ } else if (sanitizedReq.messages) {
570
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
571
+ }
572
+ const outputText = sanitizedRes.text || sanitizedRes.content || null;
573
+ const usage = sanitizedRes.usage || {};
574
+ const inputTokens = usage.promptTokens || usage.inputTokens || null;
575
+ const outputTokens = usage.completionTokens || usage.outputTokens || null;
576
+ const totalTokens = usage.totalTokens || null;
577
+ opts.observa.trackLLMCall({
578
+ model: sanitizedReq.model || sanitizedRes.model || "unknown",
579
+ input: inputText,
580
+ output: outputText,
581
+ inputMessages: sanitizedReq.messages || null,
582
+ outputMessages: sanitizedRes.messages || null,
583
+ inputTokens,
584
+ outputTokens,
585
+ totalTokens,
586
+ latencyMs: duration,
587
+ timeToFirstTokenMs: timeToFirstToken || null,
588
+ streamingDurationMs: streamingDuration || null,
589
+ finishReason: sanitizedRes.finishReason || null,
590
+ responseId: sanitizedRes.responseId || sanitizedRes.id || null,
591
+ operationName: "generate_text",
592
+ providerName: provider || "vercel-ai",
593
+ responseModel: sanitizedRes.model || sanitizedReq.model || null,
594
+ temperature: sanitizedReq.temperature || null,
595
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
596
+ });
597
+ }
598
+ } catch (e) {
599
+ console.error("[Observa] Failed to record trace", e);
600
+ }
601
+ }
602
+ function recordError3(req, error, start, opts) {
603
+ try {
604
+ console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
605
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
606
+ if (opts?.observa) {
607
+ opts.observa.trackError({
608
+ errorType: error.name || "UnknownError",
609
+ errorMessage: error.message || "An unknown error occurred",
610
+ stackTrace: error.stack,
611
+ context: { request: sanitizedReq },
612
+ errorCategory: "llm_error"
613
+ });
614
+ }
615
+ } catch (e) {
616
+ }
617
+ }
618
+ function observeVercelAI(aiSdk, options) {
619
+ try {
620
+ const wrapped = { ...aiSdk };
621
+ if (aiSdk.generateText && typeof aiSdk.generateText === "function") {
622
+ wrapped.generateText = async function(...args) {
623
+ return traceGenerateText(aiSdk.generateText.bind(aiSdk), args, options);
624
+ };
625
+ }
626
+ if (aiSdk.streamText && typeof aiSdk.streamText === "function") {
627
+ wrapped.streamText = async function(...args) {
628
+ return traceStreamText(aiSdk.streamText.bind(aiSdk), args, options);
629
+ };
630
+ }
631
+ return wrapped;
632
+ } catch (error) {
633
+ console.error("[Observa] Failed to wrap Vercel AI SDK:", error);
634
+ return aiSdk;
635
+ }
636
+ }
637
+
409
638
  // src/index.ts
410
639
  var contextModule = null;
411
640
  try {
@@ -1384,6 +1613,38 @@ var Observa = class {
1384
1613
  return client;
1385
1614
  }
1386
1615
  }
1616
+ /**
1617
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
1618
+ *
1619
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1620
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
1621
+ * @returns Wrapped AI SDK with automatic tracing
1622
+ *
1623
+ * @example
1624
+ * ```typescript
1625
+ * import { generateText, streamText } from 'ai';
1626
+ * const observa = init({ apiKey: '...' });
1627
+ *
1628
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
1629
+ * name: 'my-app',
1630
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1631
+ * });
1632
+ *
1633
+ * // Use wrapped functions - automatically tracked!
1634
+ * const result = await ai.generateText({
1635
+ * model: 'openai/gpt-4',
1636
+ * prompt: 'Hello!'
1637
+ * });
1638
+ * ```
1639
+ */
1640
+ observeVercelAI(aiSdk, options) {
1641
+ try {
1642
+ return observeVercelAI(aiSdk, { ...options, observa: this });
1643
+ } catch (error) {
1644
+ console.error("[Observa] Failed to load Vercel AI SDK wrapper:", error);
1645
+ return aiSdk;
1646
+ }
1647
+ }
1387
1648
  async track(event, action, options) {
1388
1649
  if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
1389
1650
  return action();
package/dist/index.d.cts CHANGED
@@ -328,6 +328,41 @@ declare class Observa {
328
328
  sessionId?: string;
329
329
  redact?: (data: any) => any;
330
330
  }): any;
331
+ /**
332
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
333
+ *
334
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
335
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
336
+ * @returns Wrapped AI SDK with automatic tracing
337
+ *
338
+ * @example
339
+ * ```typescript
340
+ * import { generateText, streamText } from 'ai';
341
+ * const observa = init({ apiKey: '...' });
342
+ *
343
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
344
+ * name: 'my-app',
345
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
346
+ * });
347
+ *
348
+ * // Use wrapped functions - automatically tracked!
349
+ * const result = await ai.generateText({
350
+ * model: 'openai/gpt-4',
351
+ * prompt: 'Hello!'
352
+ * });
353
+ * ```
354
+ */
355
+ observeVercelAI(aiSdk: {
356
+ generateText?: any;
357
+ streamText?: any;
358
+ [key: string]: any;
359
+ }, options?: {
360
+ name?: string;
361
+ tags?: string[];
362
+ userId?: string;
363
+ sessionId?: string;
364
+ redact?: (data: any) => any;
365
+ }): any;
331
366
  track(event: TrackEventInput, action: () => Promise<Response>, options?: {
332
367
  trackBlocking?: boolean;
333
368
  }): Promise<any>;
package/dist/index.d.ts CHANGED
@@ -328,6 +328,41 @@ declare class Observa {
328
328
  sessionId?: string;
329
329
  redact?: (data: any) => any;
330
330
  }): any;
331
+ /**
332
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
333
+ *
334
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
335
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
336
+ * @returns Wrapped AI SDK with automatic tracing
337
+ *
338
+ * @example
339
+ * ```typescript
340
+ * import { generateText, streamText } from 'ai';
341
+ * const observa = init({ apiKey: '...' });
342
+ *
343
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
344
+ * name: 'my-app',
345
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
346
+ * });
347
+ *
348
+ * // Use wrapped functions - automatically tracked!
349
+ * const result = await ai.generateText({
350
+ * model: 'openai/gpt-4',
351
+ * prompt: 'Hello!'
352
+ * });
353
+ * ```
354
+ */
355
+ observeVercelAI(aiSdk: {
356
+ generateText?: any;
357
+ streamText?: any;
358
+ [key: string]: any;
359
+ }, options?: {
360
+ name?: string;
361
+ tags?: string[];
362
+ userId?: string;
363
+ sessionId?: string;
364
+ redact?: (data: any) => any;
365
+ }): any;
331
366
  track(event: TrackEventInput, action: () => Promise<Response>, options?: {
332
367
  trackBlocking?: boolean;
333
368
  }): Promise<any>;
package/dist/index.js CHANGED
@@ -84,11 +84,30 @@ async function* wrapStream(stream, onComplete, onError, provider = "openai") {
84
84
  if (text && typeof text === "string") {
85
85
  tokenCount += estimateTokens(text);
86
86
  }
87
+ } else if (provider === "vercel-ai") {
88
+ if (typeof chunk === "string") {
89
+ tokenCount += estimateTokens(chunk);
90
+ } else if (chunk?.textDelta) {
91
+ tokenCount += estimateTokens(chunk.textDelta);
92
+ }
87
93
  }
88
94
  chunks.push(chunk);
89
95
  yield chunk;
90
96
  }
91
- const fullResponse = provider === "openai" ? reconstructOpenAIResponse(chunks) : reconstructAnthropicResponse(chunks);
97
+ let fullResponse;
98
+ if (provider === "openai") {
99
+ fullResponse = reconstructOpenAIResponse(chunks);
100
+ } else if (provider === "anthropic") {
101
+ fullResponse = reconstructAnthropicResponse(chunks);
102
+ } else if (provider === "vercel-ai") {
103
+ const fullText = chunks.map((chunk) => typeof chunk === "string" ? chunk : chunk?.textDelta || "").join("");
104
+ fullResponse = {
105
+ text: fullText,
106
+ tokenCount
107
+ };
108
+ } else {
109
+ fullResponse = { chunks, tokenCount };
110
+ }
92
111
  Promise.resolve().then(() => {
93
112
  try {
94
113
  onComplete({
@@ -386,6 +405,216 @@ function recordError2(req, error, start, opts) {
386
405
  }
387
406
  }
388
407
 
408
+ // src/instrumentation/vercel-ai.ts
409
+ function extractProviderFromModel(model) {
410
+ if (!model) return "unknown";
411
+ const parts = model.split("/");
412
+ if (parts.length > 1) {
413
+ return parts[0].toLowerCase();
414
+ }
415
+ const modelLower = model.toLowerCase();
416
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
417
+ return "openai";
418
+ }
419
+ if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
420
+ return "anthropic";
421
+ }
422
+ if (modelLower.includes("gemini") || modelLower.includes("google")) {
423
+ return "google";
424
+ }
425
+ return "unknown";
426
+ }
427
+ async function traceGenerateText(originalFn, args, options) {
428
+ const startTime = Date.now();
429
+ const requestParams = args[0] || {};
430
+ const model = requestParams.model || "unknown";
431
+ const provider = extractProviderFromModel(model);
432
+ try {
433
+ const result = await originalFn(...args);
434
+ const responseText = result.text || "";
435
+ const usage = result.usage || {};
436
+ const finishReason = result.finishReason || null;
437
+ const responseId = result.response?.id || null;
438
+ recordTrace3(
439
+ {
440
+ model,
441
+ prompt: requestParams.prompt || requestParams.messages || null,
442
+ messages: requestParams.messages || null
443
+ },
444
+ {
445
+ text: responseText,
446
+ usage,
447
+ finishReason,
448
+ responseId,
449
+ model: result.model || model
450
+ },
451
+ startTime,
452
+ options,
453
+ null,
454
+ // No streaming for generateText
455
+ null,
456
+ provider
457
+ );
458
+ return result;
459
+ } catch (error) {
460
+ recordError3(
461
+ {
462
+ model,
463
+ prompt: requestParams.prompt || requestParams.messages || null
464
+ },
465
+ error,
466
+ startTime,
467
+ options
468
+ );
469
+ throw error;
470
+ }
471
+ }
472
+ async function traceStreamText(originalFn, args, options) {
473
+ const startTime = Date.now();
474
+ const requestParams = args[0] || {};
475
+ const model = requestParams.model || "unknown";
476
+ const provider = extractProviderFromModel(model);
477
+ try {
478
+ const result = await originalFn(...args);
479
+ if (result.textStream) {
480
+ const wrappedStream = wrapStream(
481
+ result.textStream,
482
+ (fullResponse) => {
483
+ recordTrace3(
484
+ {
485
+ model,
486
+ prompt: requestParams.prompt || requestParams.messages || null,
487
+ messages: requestParams.messages || null
488
+ },
489
+ fullResponse,
490
+ startTime,
491
+ options,
492
+ fullResponse.timeToFirstToken,
493
+ fullResponse.streamingDuration,
494
+ provider
495
+ );
496
+ },
497
+ (err) => recordError3(
498
+ {
499
+ model,
500
+ prompt: requestParams.prompt || requestParams.messages || null
501
+ },
502
+ err,
503
+ startTime,
504
+ options
505
+ ),
506
+ "vercel-ai"
507
+ );
508
+ return {
509
+ ...result,
510
+ textStream: wrappedStream
511
+ };
512
+ }
513
+ recordTrace3(
514
+ {
515
+ model,
516
+ prompt: requestParams.prompt || requestParams.messages || null,
517
+ messages: requestParams.messages || null
518
+ },
519
+ result,
520
+ startTime,
521
+ options,
522
+ null,
523
+ null,
524
+ provider
525
+ );
526
+ return result;
527
+ } catch (error) {
528
+ recordError3(
529
+ {
530
+ model,
531
+ prompt: requestParams.prompt || requestParams.messages || null
532
+ },
533
+ error,
534
+ startTime,
535
+ options
536
+ );
537
+ throw error;
538
+ }
539
+ }
540
+ function recordTrace3(req, res, start, opts, timeToFirstToken, streamingDuration, provider) {
541
+ const duration = Date.now() - start;
542
+ try {
543
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
544
+ const sanitizedRes = opts?.redact ? opts.redact(res) : req;
545
+ if (opts?.observa) {
546
+ let inputText = null;
547
+ if (sanitizedReq.prompt) {
548
+ inputText = typeof sanitizedReq.prompt === "string" ? sanitizedReq.prompt : JSON.stringify(sanitizedReq.prompt);
549
+ } else if (sanitizedReq.messages) {
550
+ inputText = sanitizedReq.messages.map((m) => m.content || m.text || "").filter(Boolean).join("\n");
551
+ }
552
+ const outputText = sanitizedRes.text || sanitizedRes.content || null;
553
+ const usage = sanitizedRes.usage || {};
554
+ const inputTokens = usage.promptTokens || usage.inputTokens || null;
555
+ const outputTokens = usage.completionTokens || usage.outputTokens || null;
556
+ const totalTokens = usage.totalTokens || null;
557
+ opts.observa.trackLLMCall({
558
+ model: sanitizedReq.model || sanitizedRes.model || "unknown",
559
+ input: inputText,
560
+ output: outputText,
561
+ inputMessages: sanitizedReq.messages || null,
562
+ outputMessages: sanitizedRes.messages || null,
563
+ inputTokens,
564
+ outputTokens,
565
+ totalTokens,
566
+ latencyMs: duration,
567
+ timeToFirstTokenMs: timeToFirstToken || null,
568
+ streamingDurationMs: streamingDuration || null,
569
+ finishReason: sanitizedRes.finishReason || null,
570
+ responseId: sanitizedRes.responseId || sanitizedRes.id || null,
571
+ operationName: "generate_text",
572
+ providerName: provider || "vercel-ai",
573
+ responseModel: sanitizedRes.model || sanitizedReq.model || null,
574
+ temperature: sanitizedReq.temperature || null,
575
+ maxTokens: sanitizedReq.maxTokens || sanitizedReq.max_tokens || null
576
+ });
577
+ }
578
+ } catch (e) {
579
+ console.error("[Observa] Failed to record trace", e);
580
+ }
581
+ }
582
+ function recordError3(req, error, start, opts) {
583
+ try {
584
+ console.error("[Observa] \u26A0\uFE0F Error Captured:", error.message);
585
+ const sanitizedReq = opts?.redact ? opts.redact(req) : req;
586
+ if (opts?.observa) {
587
+ opts.observa.trackError({
588
+ errorType: error.name || "UnknownError",
589
+ errorMessage: error.message || "An unknown error occurred",
590
+ stackTrace: error.stack,
591
+ context: { request: sanitizedReq },
592
+ errorCategory: "llm_error"
593
+ });
594
+ }
595
+ } catch (e) {
596
+ }
597
+ }
598
+ function observeVercelAI(aiSdk, options) {
599
+ try {
600
+ const wrapped = { ...aiSdk };
601
+ if (aiSdk.generateText && typeof aiSdk.generateText === "function") {
602
+ wrapped.generateText = async function(...args) {
603
+ return traceGenerateText(aiSdk.generateText.bind(aiSdk), args, options);
604
+ };
605
+ }
606
+ if (aiSdk.streamText && typeof aiSdk.streamText === "function") {
607
+ wrapped.streamText = async function(...args) {
608
+ return traceStreamText(aiSdk.streamText.bind(aiSdk), args, options);
609
+ };
610
+ }
611
+ return wrapped;
612
+ } catch (error) {
613
+ console.error("[Observa] Failed to wrap Vercel AI SDK:", error);
614
+ return aiSdk;
615
+ }
616
+ }
617
+
389
618
  // src/index.ts
390
619
  var contextModule = null;
391
620
  try {
@@ -1364,6 +1593,38 @@ var Observa = class {
1364
1593
  return client;
1365
1594
  }
1366
1595
  }
1596
+ /**
1597
+ * Observe Vercel AI SDK - wraps generateText and streamText functions
1598
+ *
1599
+ * @param aiSdk - Vercel AI SDK module (imported from 'ai')
1600
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
1601
+ * @returns Wrapped AI SDK with automatic tracing
1602
+ *
1603
+ * @example
1604
+ * ```typescript
1605
+ * import { generateText, streamText } from 'ai';
1606
+ * const observa = init({ apiKey: '...' });
1607
+ *
1608
+ * const ai = observa.observeVercelAI({ generateText, streamText }, {
1609
+ * name: 'my-app',
1610
+ * redact: (data) => ({ ...data, prompt: '[REDACTED]' })
1611
+ * });
1612
+ *
1613
+ * // Use wrapped functions - automatically tracked!
1614
+ * const result = await ai.generateText({
1615
+ * model: 'openai/gpt-4',
1616
+ * prompt: 'Hello!'
1617
+ * });
1618
+ * ```
1619
+ */
1620
+ observeVercelAI(aiSdk, options) {
1621
+ try {
1622
+ return observeVercelAI(aiSdk, { ...options, observa: this });
1623
+ } catch (error) {
1624
+ console.error("[Observa] Failed to load Vercel AI SDK wrapper:", error);
1625
+ return aiSdk;
1626
+ }
1627
+ }
1367
1628
  async track(event, action, options) {
1368
1629
  if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
1369
1630
  return action();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "observa-sdk",
3
- "version": "0.0.10",
3
+ "version": "0.0.11",
4
4
  "description": "Enterprise-grade observability SDK for AI applications. Track and monitor LLM interactions with zero friction.",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",
@@ -34,5 +34,12 @@
34
34
  "license": "MIT",
35
35
  "publishConfig": {
36
36
  "access": "public"
37
+ },
38
+ "dependencies": {
39
+ "observa-sdk": "^0.0.10"
40
+ },
41
+ "devDependencies": {
42
+ "tsup": "^8.5.1",
43
+ "typescript": "^5.9.3"
37
44
  }
38
45
  }