observa-sdk 0.0.8 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -408,7 +408,78 @@ var Observa = class {
408
408
  return this.currentTraceId;
409
409
  }
410
410
  /**
411
- * Track a tool call
411
+ * Track an LLM call with full OTEL support
412
+ * CRITICAL: This is the primary method for tracking LLM calls with all SOTA parameters
413
+ */
414
+ trackLLMCall(options) {
415
+ const spanId = crypto.randomUUID();
416
+ let providerName = options.providerName;
417
+ if (!providerName && options.model) {
418
+ const modelLower = options.model.toLowerCase();
419
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
420
+ providerName = "openai";
421
+ } else if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
422
+ providerName = "anthropic";
423
+ } else if (modelLower.includes("gemini") || modelLower.includes("google")) {
424
+ providerName = "google";
425
+ } else if (modelLower.includes("vertex")) {
426
+ providerName = "gcp.vertex_ai";
427
+ } else if (modelLower.includes("bedrock") || modelLower.includes("aws")) {
428
+ providerName = "aws.bedrock";
429
+ }
430
+ }
431
+ const operationName = options.operationName || "chat";
432
+ this.addEvent({
433
+ event_type: "llm_call",
434
+ span_id: spanId,
435
+ attributes: {
436
+ llm_call: {
437
+ model: options.model,
438
+ input: options.input || null,
439
+ output: options.output || null,
440
+ input_tokens: options.inputTokens || null,
441
+ output_tokens: options.outputTokens || null,
442
+ total_tokens: options.totalTokens || null,
443
+ latency_ms: options.latencyMs,
444
+ time_to_first_token_ms: options.timeToFirstTokenMs || null,
445
+ streaming_duration_ms: options.streamingDurationMs || null,
446
+ finish_reason: options.finishReason || null,
447
+ response_id: options.responseId || null,
448
+ system_fingerprint: options.systemFingerprint || null,
449
+ cost: options.cost || null,
450
+ temperature: options.temperature || null,
451
+ max_tokens: options.maxTokens || null,
452
+ // TIER 1: OTEL Semantic Conventions
453
+ operation_name: operationName,
454
+ provider_name: providerName || null,
455
+ response_model: options.responseModel || null,
456
+ // TIER 2: Sampling parameters
457
+ top_k: options.topK || null,
458
+ top_p: options.topP || null,
459
+ frequency_penalty: options.frequencyPenalty || null,
460
+ presence_penalty: options.presencePenalty || null,
461
+ stop_sequences: options.stopSequences || null,
462
+ seed: options.seed || null,
463
+ // TIER 2: Structured cost tracking
464
+ input_cost: options.inputCost || null,
465
+ output_cost: options.outputCost || null,
466
+ // TIER 1: Structured message objects
467
+ input_messages: options.inputMessages || null,
468
+ output_messages: options.outputMessages || null,
469
+ system_instructions: options.systemInstructions || null,
470
+ // TIER 2: Server metadata
471
+ server_address: options.serverAddress || null,
472
+ server_port: options.serverPort || null,
473
+ // TIER 2: Conversation grouping
474
+ conversation_id_otel: options.conversationIdOtel || null,
475
+ choice_count: options.choiceCount || null
476
+ }
477
+ }
478
+ });
479
+ return spanId;
480
+ }
481
+ /**
482
+ * Track a tool call with OTEL standardization
412
483
  */
413
484
  trackToolCall(options) {
414
485
  const spanId = crypto.randomUUID();
@@ -422,14 +493,21 @@ var Observa = class {
422
493
  result: options.result || null,
423
494
  result_status: options.resultStatus,
424
495
  latency_ms: options.latencyMs,
425
- error_message: options.errorMessage || null
496
+ error_message: options.errorMessage || null,
497
+ // TIER 2: OTEL Tool Standardization
498
+ operation_name: options.operationName || "execute_tool",
499
+ tool_type: options.toolType || null,
500
+ tool_description: options.toolDescription || null,
501
+ tool_call_id: options.toolCallId || null,
502
+ error_type: options.errorType || null,
503
+ error_category: options.errorCategory || null
426
504
  }
427
505
  }
428
506
  });
429
507
  return spanId;
430
508
  }
431
509
  /**
432
- * Track a retrieval operation
510
+ * Track a retrieval operation with vector metadata enrichment
433
511
  */
434
512
  trackRetrieval(options) {
435
513
  const spanId = crypto.randomUUID();
@@ -443,14 +521,23 @@ var Observa = class {
443
521
  k: options.k || null,
444
522
  top_k: options.k || null,
445
523
  similarity_scores: options.similarityScores || null,
446
- latency_ms: options.latencyMs
524
+ latency_ms: options.latencyMs,
525
+ // TIER 2: Retrieval enrichment
526
+ retrieval_context: options.retrievalContext || null,
527
+ embedding_model: options.embeddingModel || null,
528
+ embedding_dimensions: options.embeddingDimensions || null,
529
+ vector_metric: options.vectorMetric || null,
530
+ rerank_score: options.rerankScore || null,
531
+ fusion_method: options.fusionMethod || null,
532
+ deduplication_removed_count: options.deduplicationRemovedCount || null,
533
+ quality_score: options.qualityScore || null
447
534
  }
448
535
  }
449
536
  });
450
537
  return spanId;
451
538
  }
452
539
  /**
453
- * Track an error with stack trace support
540
+ * Track an error with structured error classification
454
541
  */
455
542
  trackError(options) {
456
543
  const spanId = crypto.randomUUID();
@@ -466,7 +553,10 @@ var Observa = class {
466
553
  error_type: options.errorType,
467
554
  error_message: options.errorMessage,
468
555
  stack_trace: stackTrace || null,
469
- context: options.context || null
556
+ context: options.context || null,
557
+ // TIER 2: Structured error classification
558
+ error_category: options.errorCategory || null,
559
+ error_code: options.errorCode || null
470
560
  }
471
561
  }
472
562
  });
@@ -520,6 +610,114 @@ var Observa = class {
520
610
  });
521
611
  return spanId;
522
612
  }
613
+ /**
614
+ * Track an embedding operation (TIER 1: Critical)
615
+ */
616
+ trackEmbedding(options) {
617
+ const spanId = crypto.randomUUID();
618
+ let providerName = options.providerName;
619
+ if (!providerName && options.model) {
620
+ const modelLower = options.model.toLowerCase();
621
+ if (modelLower.includes("text-embedding") || modelLower.includes("openai")) {
622
+ providerName = "openai";
623
+ } else if (modelLower.includes("textembedding") || modelLower.includes("google")) {
624
+ providerName = "google";
625
+ } else if (modelLower.includes("vertex")) {
626
+ providerName = "gcp.vertex_ai";
627
+ }
628
+ }
629
+ this.addEvent({
630
+ event_type: "embedding",
631
+ span_id: spanId,
632
+ attributes: {
633
+ embedding: {
634
+ model: options.model,
635
+ dimension_count: options.dimensionCount || null,
636
+ encoding_formats: options.encodingFormats || null,
637
+ input_tokens: options.inputTokens || null,
638
+ output_tokens: options.outputTokens || null,
639
+ latency_ms: options.latencyMs,
640
+ cost: options.cost || null,
641
+ input_text: options.inputText || null,
642
+ input_hash: options.inputHash || null,
643
+ embeddings: options.embeddings || null,
644
+ embeddings_hash: options.embeddingsHash || null,
645
+ operation_name: options.operationName || "embeddings",
646
+ provider_name: providerName || null
647
+ }
648
+ }
649
+ });
650
+ return spanId;
651
+ }
652
+ /**
653
+ * Track a vector database operation (TIER 3)
654
+ */
655
+ trackVectorDbOperation(options) {
656
+ const spanId = crypto.randomUUID();
657
+ this.addEvent({
658
+ event_type: "vector_db_operation",
659
+ span_id: spanId,
660
+ attributes: {
661
+ vector_db_operation: {
662
+ operation_type: options.operationType,
663
+ index_name: options.indexName || null,
664
+ index_version: options.indexVersion || null,
665
+ vector_dimensions: options.vectorDimensions || null,
666
+ vector_metric: options.vectorMetric || null,
667
+ results_count: options.resultsCount || null,
668
+ scores: options.scores || null,
669
+ latency_ms: options.latencyMs,
670
+ cost: options.cost || null,
671
+ api_version: options.apiVersion || null,
672
+ provider_name: options.providerName || null
673
+ }
674
+ }
675
+ });
676
+ return spanId;
677
+ }
678
+ /**
679
+ * Track a cache operation (TIER 3)
680
+ */
681
+ trackCacheOperation(options) {
682
+ const spanId = crypto.randomUUID();
683
+ this.addEvent({
684
+ event_type: "cache_operation",
685
+ span_id: spanId,
686
+ attributes: {
687
+ cache_operation: {
688
+ cache_backend: options.cacheBackend || null,
689
+ cache_key: options.cacheKey || null,
690
+ cache_namespace: options.cacheNamespace || null,
691
+ hit_status: options.hitStatus,
692
+ latency_ms: options.latencyMs,
693
+ saved_cost: options.savedCost || null,
694
+ ttl: options.ttl || null,
695
+ eviction_info: options.evictionInfo || null
696
+ }
697
+ }
698
+ });
699
+ return spanId;
700
+ }
701
+ /**
702
+ * Track agent creation (TIER 3)
703
+ */
704
+ trackAgentCreate(options) {
705
+ const spanId = crypto.randomUUID();
706
+ this.addEvent({
707
+ event_type: "agent_create",
708
+ span_id: spanId,
709
+ attributes: {
710
+ agent_create: {
711
+ agent_name: options.agentName,
712
+ agent_config: options.agentConfig || null,
713
+ tools_bound: options.toolsBound || null,
714
+ model_config: options.modelConfig || null,
715
+ operation_name: options.operationName || "create_agent"
716
+ }
717
+ }
718
+ });
719
+ return spanId;
720
+ }
523
721
  /**
524
722
  * Execute a function within a span context (for nested operations)
525
723
  * This allows tool calls to be nested under LLM calls, etc.
@@ -620,6 +818,21 @@ var Observa = class {
620
818
  });
621
819
  if (trace.model) {
622
820
  const llmSpanId = crypto.randomUUID();
821
+ let providerName = null;
822
+ if (trace.model) {
823
+ const modelLower = trace.model.toLowerCase();
824
+ if (modelLower.includes("gpt") || modelLower.includes("openai")) {
825
+ providerName = "openai";
826
+ } else if (modelLower.includes("claude") || modelLower.includes("anthropic")) {
827
+ providerName = "anthropic";
828
+ } else if (modelLower.includes("gemini") || modelLower.includes("google")) {
829
+ providerName = "google";
830
+ } else if (modelLower.includes("vertex")) {
831
+ providerName = "gcp.vertex_ai";
832
+ } else if (modelLower.includes("bedrock") || modelLower.includes("aws")) {
833
+ providerName = "aws.bedrock";
834
+ }
835
+ }
623
836
  events.push({
624
837
  ...baseEvent,
625
838
  span_id: llmSpanId,
@@ -640,8 +853,13 @@ var Observa = class {
640
853
  finish_reason: trace.finishReason || null,
641
854
  response_id: trace.responseId || null,
642
855
  system_fingerprint: trace.systemFingerprint || null,
643
- cost: null
856
+ cost: null,
644
857
  // Cost calculation handled by backend
858
+ // TIER 1: OTEL Semantic Conventions (auto-inferred)
859
+ operation_name: "chat",
860
+ // Default for legacy track() method
861
+ provider_name: providerName
862
+ // Other OTEL fields can be added via trackLLMCall() method
645
863
  }
646
864
  }
647
865
  });
@@ -733,6 +951,64 @@ var Observa = class {
733
951
  }
734
952
  await this.flush();
735
953
  }
954
+ /**
955
+ * Observe OpenAI client - wraps client with automatic tracing
956
+ *
957
+ * @param client - OpenAI client instance
958
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
959
+ * @returns Wrapped OpenAI client
960
+ *
961
+ * @example
962
+ * ```typescript
963
+ * import OpenAI from 'openai';
964
+ * const openai = new OpenAI({ apiKey: '...' });
965
+ * const wrapped = observa.observeOpenAI(openai, {
966
+ * name: 'my-app',
967
+ * redact: (data) => ({ ...data, messages: '[REDACTED]' })
968
+ * });
969
+ * ```
970
+ */
971
+ observeOpenAI(client, options) {
972
+ try {
973
+ const requireFn = globalThis.require || ((module2) => {
974
+ throw new Error("require is not available");
975
+ });
976
+ const { observeOpenAI: observeOpenAIFn } = requireFn("./instrumentation/openai");
977
+ return observeOpenAIFn(client, { ...options, observa: this });
978
+ } catch (error) {
979
+ console.error("[Observa] Failed to load OpenAI wrapper:", error);
980
+ return client;
981
+ }
982
+ }
983
+ /**
984
+ * Observe Anthropic client - wraps client with automatic tracing
985
+ *
986
+ * @param client - Anthropic client instance
987
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
988
+ * @returns Wrapped Anthropic client
989
+ *
990
+ * @example
991
+ * ```typescript
992
+ * import Anthropic from '@anthropic-ai/sdk';
993
+ * const anthropic = new Anthropic({ apiKey: '...' });
994
+ * const wrapped = observa.observeAnthropic(anthropic, {
995
+ * name: 'my-app',
996
+ * redact: (data) => ({ ...data, messages: '[REDACTED]' })
997
+ * });
998
+ * ```
999
+ */
1000
+ observeAnthropic(client, options) {
1001
+ try {
1002
+ const requireFn = globalThis.require || ((module2) => {
1003
+ throw new Error("require is not available");
1004
+ });
1005
+ const { observeAnthropic: observeAnthropicFn } = requireFn("./instrumentation/anthropic");
1006
+ return observeAnthropicFn(client, { ...options, observa: this });
1007
+ } catch (error) {
1008
+ console.error("[Observa] Failed to load Anthropic wrapper:", error);
1009
+ return client;
1010
+ }
1011
+ }
736
1012
  async track(event, action, options) {
737
1013
  if (this.sampleRate < 1 && Math.random() > this.sampleRate) {
738
1014
  return action();
package/dist/index.d.cts CHANGED
@@ -63,7 +63,64 @@ declare class Observa {
63
63
  userId?: string;
64
64
  }): string;
65
65
  /**
66
- * Track a tool call
66
+ * Track an LLM call with full OTEL support
67
+ * CRITICAL: This is the primary method for tracking LLM calls with all SOTA parameters
68
+ */
69
+ trackLLMCall(options: {
70
+ model: string;
71
+ input?: string | null;
72
+ output?: string | null;
73
+ inputTokens?: number | null;
74
+ outputTokens?: number | null;
75
+ totalTokens?: number | null;
76
+ latencyMs: number;
77
+ timeToFirstTokenMs?: number | null;
78
+ streamingDurationMs?: number | null;
79
+ finishReason?: string | null;
80
+ responseId?: string | null;
81
+ systemFingerprint?: string | null;
82
+ cost?: number | null;
83
+ temperature?: number | null;
84
+ maxTokens?: number | null;
85
+ operationName?: "chat" | "text_completion" | "generate_content" | string | null;
86
+ providerName?: string | null;
87
+ responseModel?: string | null;
88
+ topK?: number | null;
89
+ topP?: number | null;
90
+ frequencyPenalty?: number | null;
91
+ presencePenalty?: number | null;
92
+ stopSequences?: string[] | null;
93
+ seed?: number | null;
94
+ inputCost?: number | null;
95
+ outputCost?: number | null;
96
+ inputMessages?: Array<{
97
+ role: string;
98
+ content?: string | any;
99
+ parts?: Array<{
100
+ type: string;
101
+ content: any;
102
+ }>;
103
+ }> | null;
104
+ outputMessages?: Array<{
105
+ role: string;
106
+ content?: string | any;
107
+ parts?: Array<{
108
+ type: string;
109
+ content: any;
110
+ }>;
111
+ finish_reason?: string;
112
+ }> | null;
113
+ systemInstructions?: Array<{
114
+ type: string;
115
+ content: string | any;
116
+ }> | null;
117
+ serverAddress?: string | null;
118
+ serverPort?: number | null;
119
+ conversationIdOtel?: string | null;
120
+ choiceCount?: number | null;
121
+ }): string;
122
+ /**
123
+ * Track a tool call with OTEL standardization
67
124
  */
68
125
  trackToolCall(options: {
69
126
  toolName: string;
@@ -72,9 +129,15 @@ declare class Observa {
72
129
  resultStatus: "success" | "error" | "timeout";
73
130
  latencyMs: number;
74
131
  errorMessage?: string;
132
+ operationName?: "execute_tool" | string | null;
133
+ toolType?: "function" | "extension" | "datastore" | string | null;
134
+ toolDescription?: string | null;
135
+ toolCallId?: string | null;
136
+ errorType?: string | null;
137
+ errorCategory?: string | null;
75
138
  }): string;
76
139
  /**
77
- * Track a retrieval operation
140
+ * Track a retrieval operation with vector metadata enrichment
78
141
  */
79
142
  trackRetrieval(options: {
80
143
  contextIds?: string[];
@@ -82,9 +145,17 @@ declare class Observa {
82
145
  k?: number;
83
146
  similarityScores?: number[];
84
147
  latencyMs: number;
148
+ retrievalContext?: string | null;
149
+ embeddingModel?: string | null;
150
+ embeddingDimensions?: number | null;
151
+ vectorMetric?: "cosine" | "euclidean" | "dot_product" | string | null;
152
+ rerankScore?: number | null;
153
+ fusionMethod?: string | null;
154
+ deduplicationRemovedCount?: number | null;
155
+ qualityScore?: number | null;
85
156
  }): string;
86
157
  /**
87
- * Track an error with stack trace support
158
+ * Track an error with structured error classification
88
159
  */
89
160
  trackError(options: {
90
161
  errorType: string;
@@ -92,6 +163,8 @@ declare class Observa {
92
163
  stackTrace?: string;
93
164
  context?: Record<string, any>;
94
165
  error?: Error;
166
+ errorCategory?: string | null;
167
+ errorCode?: string | null;
95
168
  }): string;
96
169
  /**
97
170
  * Track user feedback
@@ -119,6 +192,63 @@ declare class Observa {
119
192
  finalOutput?: string;
120
193
  outputLength?: number;
121
194
  }): string;
195
+ /**
196
+ * Track an embedding operation (TIER 1: Critical)
197
+ */
198
+ trackEmbedding(options: {
199
+ model: string;
200
+ dimensionCount?: number | null;
201
+ encodingFormats?: string[] | null;
202
+ inputTokens?: number | null;
203
+ outputTokens?: number | null;
204
+ latencyMs: number;
205
+ cost?: number | null;
206
+ inputText?: string | null;
207
+ inputHash?: string | null;
208
+ embeddings?: number[][] | null;
209
+ embeddingsHash?: string | null;
210
+ operationName?: "embeddings" | string | null;
211
+ providerName?: string | null;
212
+ }): string;
213
+ /**
214
+ * Track a vector database operation (TIER 3)
215
+ */
216
+ trackVectorDbOperation(options: {
217
+ operationType: "vector_search" | "index_upsert" | "delete" | string;
218
+ indexName?: string | null;
219
+ indexVersion?: string | null;
220
+ vectorDimensions?: number | null;
221
+ vectorMetric?: "cosine" | "euclidean" | "dot_product" | string | null;
222
+ resultsCount?: number | null;
223
+ scores?: number[] | null;
224
+ latencyMs: number;
225
+ cost?: number | null;
226
+ apiVersion?: string | null;
227
+ providerName?: string | null;
228
+ }): string;
229
+ /**
230
+ * Track a cache operation (TIER 3)
231
+ */
232
+ trackCacheOperation(options: {
233
+ cacheBackend?: "redis" | "in_memory" | "memcached" | string | null;
234
+ cacheKey?: string | null;
235
+ cacheNamespace?: string | null;
236
+ hitStatus: "hit" | "miss";
237
+ latencyMs: number;
238
+ savedCost?: number | null;
239
+ ttl?: number | null;
240
+ evictionInfo?: Record<string, any> | null;
241
+ }): string;
242
+ /**
243
+ * Track agent creation (TIER 3)
244
+ */
245
+ trackAgentCreate(options: {
246
+ agentName: string;
247
+ agentConfig?: Record<string, any> | null;
248
+ toolsBound?: string[] | null;
249
+ modelConfig?: Record<string, any> | null;
250
+ operationName?: "create_agent" | string | null;
251
+ }): string;
122
252
  /**
123
253
  * Execute a function within a span context (for nested operations)
124
254
  * This allows tool calls to be nested under LLM calls, etc.
@@ -150,6 +280,54 @@ declare class Observa {
150
280
  * Cleanup (call when shutting down)
151
281
  */
152
282
  end(): Promise<void>;
283
+ /**
284
+ * Observe OpenAI client - wraps client with automatic tracing
285
+ *
286
+ * @param client - OpenAI client instance
287
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
288
+ * @returns Wrapped OpenAI client
289
+ *
290
+ * @example
291
+ * ```typescript
292
+ * import OpenAI from 'openai';
293
+ * const openai = new OpenAI({ apiKey: '...' });
294
+ * const wrapped = observa.observeOpenAI(openai, {
295
+ * name: 'my-app',
296
+ * redact: (data) => ({ ...data, messages: '[REDACTED]' })
297
+ * });
298
+ * ```
299
+ */
300
+ observeOpenAI(client: any, options?: {
301
+ name?: string;
302
+ tags?: string[];
303
+ userId?: string;
304
+ sessionId?: string;
305
+ redact?: (data: any) => any;
306
+ }): any;
307
+ /**
308
+ * Observe Anthropic client - wraps client with automatic tracing
309
+ *
310
+ * @param client - Anthropic client instance
311
+ * @param options - Observation options (name, tags, userId, sessionId, redact)
312
+ * @returns Wrapped Anthropic client
313
+ *
314
+ * @example
315
+ * ```typescript
316
+ * import Anthropic from '@anthropic-ai/sdk';
317
+ * const anthropic = new Anthropic({ apiKey: '...' });
318
+ * const wrapped = observa.observeAnthropic(anthropic, {
319
+ * name: 'my-app',
320
+ * redact: (data) => ({ ...data, messages: '[REDACTED]' })
321
+ * });
322
+ * ```
323
+ */
324
+ observeAnthropic(client: any, options?: {
325
+ name?: string;
326
+ tags?: string[];
327
+ userId?: string;
328
+ sessionId?: string;
329
+ redact?: (data: any) => any;
330
+ }): any;
153
331
  track(event: TrackEventInput, action: () => Promise<Response>, options?: {
154
332
  trackBlocking?: boolean;
155
333
  }): Promise<any>;