@elasticdash/tracing 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2090 @@
1
+ import * as _opentelemetry_api from '@opentelemetry/api';
2
+ import { Span, TimeInput, Attributes, TracerProvider, SpanContext } from '@opentelemetry/api';
3
+ import { OpenAiUsage } from '@elasticdash/core';
4
+ export { LangfuseOtelSpanAttributes, PropagateAttributesParams, propagateAttributes } from '@elasticdash/core';
5
+
6
+ /**
7
+ * Types of observations that can be created in Langfuse.
8
+ *
9
+ * - `span`: General-purpose observations for tracking operations, functions, or logical units of work
10
+ * - `generation`: Specialized observations for LLM calls with model parameters, usage, and costs
11
+ * - `event`: Point-in-time occurrences or log entries within a trace
12
+ *
13
+ * @public
14
+ */
15
+ type LangfuseObservationType = "span" | "generation" | "event" | "embedding" | "agent" | "tool" | "chain" | "retriever" | "evaluator" | "guardrail";
16
+ /**
17
+ * Severity levels for observations in Langfuse.
18
+ *
19
+ * Used to categorize the importance or severity of observations:
20
+ * - `DEBUG`: Detailed diagnostic information
21
+ * - `DEFAULT`: Normal operation information
22
+ * - `WARNING`: Potentially problematic situations
23
+ * - `ERROR`: Error conditions that need attention
24
+ *
25
+ * @public
26
+ */
27
+ type ObservationLevel = "DEBUG" | "DEFAULT" | "WARNING" | "ERROR";
28
+ /**
29
+ * Attributes for Langfuse span observations.
30
+ *
31
+ * Spans are used to track operations, functions, or logical units of work.
32
+ * They can contain other spans, generations, or events as children.
33
+ *
34
+ * @public
35
+ */
36
+ type LangfuseSpanAttributes = {
37
+ /** Input data for the operation being tracked */
38
+ input?: unknown;
39
+ /** Output data from the operation */
40
+ output?: unknown;
41
+ /** Additional metadata as key-value pairs */
42
+ metadata?: Record<string, unknown>;
43
+ /** Severity level of the observation */
44
+ level?: ObservationLevel;
45
+ /** Human-readable status message */
46
+ statusMessage?: string;
47
+ /** Version identifier for the code/model being tracked */
48
+ version?: string;
49
+ /** Environment where the operation is running (e.g., 'production', 'staging') */
50
+ environment?: string;
51
+ };
52
+ /**
53
+ * Attributes for Langfuse generation observations.
54
+ *
55
+ * Generations are specialized observations for tracking LLM interactions,
56
+ * including model parameters, usage metrics, costs, and prompt information.
57
+ *
58
+ * @public
59
+ */
60
+ type LangfuseGenerationAttributes = LangfuseSpanAttributes & {
61
+ /** Timestamp when the model started generating completion */
62
+ completionStartTime?: Date;
63
+ /** Name of the language model used (e.g., 'gpt-4', 'claude-3') */
64
+ model?: string;
65
+ /** Parameters passed to the model (temperature, max_tokens, etc.) */
66
+ modelParameters?: {
67
+ [key: string]: string | number;
68
+ };
69
+ /** Token usage and other model-specific usage metrics */
70
+ usageDetails?: {
71
+ [key: string]: number;
72
+ } | OpenAiUsage;
73
+ /** Cost breakdown for the generation (totalCost, etc.) */
74
+ costDetails?: {
75
+ [key: string]: number;
76
+ };
77
+ /** Information about the prompt used from Langfuse prompt management */
78
+ prompt?: {
79
+ /** Name of the prompt template */
80
+ name: string;
81
+ /** Version number of the prompt template */
82
+ version: number;
83
+ /** Whether this is a fallback prompt due to retrieval failure */
84
+ isFallback: boolean;
85
+ };
86
+ };
87
+ type LangfuseEventAttributes = LangfuseSpanAttributes;
88
+ type LangfuseAgentAttributes = LangfuseSpanAttributes;
89
+ type LangfuseToolAttributes = LangfuseSpanAttributes;
90
+ type LangfuseChainAttributes = LangfuseSpanAttributes;
91
+ type LangfuseRetrieverAttributes = LangfuseSpanAttributes;
92
+ type LangfuseEvaluatorAttributes = LangfuseSpanAttributes;
93
+ type LangfuseGuardrailAttributes = LangfuseSpanAttributes;
94
+ type LangfuseEmbeddingAttributes = LangfuseGenerationAttributes;
95
+ /**
96
+ * Union type representing any Langfuse observation attributes.
97
+ *
98
+ * This type is used when you need to accept any type of observation attributes.
99
+ *
100
+ * @public
101
+ */
102
+ type LangfuseObservationAttributes = LangfuseSpanAttributes & LangfuseGenerationAttributes & LangfuseEventAttributes & LangfuseAgentAttributes & LangfuseToolAttributes & LangfuseChainAttributes & LangfuseRetrieverAttributes & LangfuseEvaluatorAttributes & LangfuseGuardrailAttributes;
103
+ /**
104
+ * Attributes for Langfuse traces.
105
+ *
106
+ * Traces are the top-level containers that group related observations together.
107
+ * They represent a complete workflow, request, or user interaction.
108
+ *
109
+ * @public
110
+ */
111
+ type LangfuseTraceAttributes = {
112
+ /** Human-readable name for the trace */
113
+ name?: string;
114
+ /** Identifier for the user associated with this trace */
115
+ userId?: string;
116
+ /** Session identifier for grouping related traces */
117
+ sessionId?: string;
118
+ /** Version identifier for the code/application */
119
+ version?: string;
120
+ /** Release identifier for deployment tracking */
121
+ release?: string;
122
+ /** Input data that initiated the trace */
123
+ input?: unknown;
124
+ /** Final output data from the trace */
125
+ output?: unknown;
126
+ /** Additional metadata for the trace */
127
+ metadata?: unknown;
128
+ /** Tags for categorizing and filtering traces */
129
+ tags?: string[];
130
+ /** Whether this trace should be publicly visible */
131
+ public?: boolean;
132
+ /** Environment where the trace was captured */
133
+ environment?: string;
134
+ };
135
+
136
+ /**
137
+ * Union type representing any Langfuse observation wrapper.
138
+ *
139
+ * This type encompasses all observation types supported by Langfuse, providing
140
+ * a unified interface for handling different kinds of traced operations. It's
141
+ * particularly useful for generic functions that work with any observation type.
142
+ *
143
+ * ## Included Types
144
+ * - **LangfuseSpan**: General-purpose operations and workflows
145
+ * - **LangfuseGeneration**: LLM calls and AI model interactions
146
+ * - **LangfuseEmbedding**: Text embedding and vector operations
147
+ * - **LangfuseAgent**: AI agent workflows with tool usage
148
+ * - **LangfuseTool**: Individual tool calls and API requests
149
+ * - **LangfuseChain**: Multi-step processes and pipelines
150
+ * - **LangfuseRetriever**: Document retrieval and search operations
151
+ * - **LangfuseEvaluator**: Quality assessment and scoring
152
+ * - **LangfuseGuardrail**: Safety checks and content filtering
153
+ * - **LangfuseEvent**: Point-in-time occurrences and log entries
154
+ *
155
+ * @example
156
+ * ```typescript
157
+ * // Function accepting any observation type
158
+ * function logObservation(obs: LangfuseObservation) {
159
+ * console.log(`Observation ${obs.id} in trace ${obs.traceId}`);
160
+ *
161
+ * // All observations have common methods
162
+ * obs.updateTrace({ tags: ['logged'] });
163
+ * obs.end();
164
+ * }
165
+ *
166
+ * // Works with any observation type
167
+ * const span = startObservation('test-span');
168
+ * const generation = startObservation('llm-call', {}, { asType: 'generation' });
169
+ * const agent = startObservation('ai-agent', {}, { asType: 'agent' });
170
+ *
171
+ * logObservation(span);
172
+ * logObservation(generation);
173
+ * logObservation(agent);
174
+ * ```
175
+ *
176
+ * @public
177
+ */
178
+ type LangfuseObservation = LangfuseSpan | LangfuseGeneration | LangfuseEvent | LangfuseAgent | LangfuseTool | LangfuseChain | LangfuseRetriever | LangfuseEvaluator | LangfuseGuardrail | LangfuseEmbedding;
179
+ /**
180
+ * Parameters for creating a Langfuse observation wrapper.
181
+ *
182
+ * @internal
183
+ */
184
+ type LangfuseObservationParams = {
185
+ otelSpan: Span;
186
+ type: LangfuseObservationType;
187
+ attributes?: LangfuseSpanAttributes | LangfuseGenerationAttributes | LangfuseEventAttributes;
188
+ };
189
+ /**
190
+ * Base class for all Langfuse observation wrappers providing unified functionality.
191
+ *
192
+ * This abstract class serves as the foundation for all observation types in Langfuse,
193
+ * encapsulating common operations and properties shared across spans, generations,
194
+ * events, and specialized observation types like agents, tools, and chains.
195
+ *
196
+ * ## Core Capabilities
197
+ * - **OpenTelemetry Integration**: Wraps OTEL spans with Langfuse-specific functionality
198
+ * - **Unique Identification**: Provides span ID and trace ID for correlation
199
+ * - **Lifecycle Management**: Handles observation creation, updates, and completion
200
+ * - **Trace Context**: Enables updating trace-level attributes from any observation
201
+ * - **Hierarchical Structure**: Supports creating nested child observations
202
+ * - **Type Safety**: Ensures type-safe attribute handling based on observation type
203
+ *
204
+ * ## Common Properties
205
+ * - `id`: Unique identifier for this observation (OpenTelemetry span ID)
206
+ * - `traceId`: Identifier of the parent trace containing this observation
207
+ * - `otelSpan`: Direct access to the underlying OpenTelemetry span
208
+ * - `type`: The observation type (span, generation, event, etc.)
209
+ *
210
+ * ## Common Methods
211
+ * - `end()`: Marks the observation as complete with optional timestamp
212
+ * - `updateTrace()`: Sets trace-level attributes like user ID, session ID, tags
213
+ * - `startObservation()`: Creates child observations with inherited context
214
+ *
215
+ * @example
216
+ * ```typescript
217
+ * // All observation types share these common capabilities
218
+ * const observation: LangfuseObservation = startObservation('my-operation');
219
+ *
220
+ * // Common properties available on all observations
221
+ * console.log(`Observation ID: ${observation.id}`);
222
+ * console.log(`Trace ID: ${observation.traceId}`);
223
+ * console.log(`Type: ${observation.type}`);
224
+ *
225
+ * // Common methods available on all observations
226
+ * observation.updateTrace({
227
+ * userId: 'user-123',
228
+ * sessionId: 'session-456',
229
+ * tags: ['production', 'api-v2']
230
+ * });
231
+ *
232
+ * // Create child observations
233
+ * const child = observation.startObservation('child-operation', {
234
+ * input: { step: 'processing' }
235
+ * });
236
+ *
237
+ * // End observations
238
+ * child.end();
239
+ * observation.end();
240
+ * ```
241
+ *
242
+ * @internal
243
+ */
244
+ declare abstract class LangfuseBaseObservation {
245
+ /** The underlying OpenTelemetry span */
246
+ readonly otelSpan: Span;
247
+ /** The underlying OpenTelemetry span */
248
+ readonly type: LangfuseObservationType;
249
+ /** The span ID from the OpenTelemetry span context */
250
+ id: string;
251
+ /** The trace ID from the OpenTelemetry span context */
252
+ traceId: string;
253
+ constructor(params: LangfuseObservationParams);
254
+ /** Gets the Langfuse OpenTelemetry tracer instance */
255
+ protected get tracer(): _opentelemetry_api.Tracer;
256
+ /**
257
+ * Ends the observation, marking it as complete.
258
+ *
259
+ * @param endTime - Optional end time, defaults to current time
260
+ */
261
+ end(endTime?: TimeInput): void;
262
+ updateOtelSpanAttributes(attributes: LangfuseObservationAttributes): void;
263
+ /**
264
+ * Updates the parent trace with new attributes.
265
+ */
266
+ updateTrace(attributes: LangfuseTraceAttributes): this;
267
+ /**
268
+ * Creates a new child observation within this observation's context with full type safety.
269
+ *
270
+ * This method enables hierarchical tracing by creating child observations that inherit
271
+ * the parent's trace context. It supports all observation types with automatic TypeScript
272
+ * type inference based on the `asType` parameter, ensuring compile-time safety for
273
+ * attributes and return types.
274
+ *
275
+ * ## Hierarchy & Context
276
+ * - Child observations automatically inherit the parent's trace ID and span context
277
+ * - Creates proper parent-child relationships in the trace structure
278
+ * - Enables distributed tracing across nested operations
279
+ * - Maintains correlation between related operations
280
+ *
281
+ * ## Type Safety
282
+ * - Return type is automatically inferred from `asType` parameter
283
+ * - Attributes parameter is type-checked based on observation type
284
+ * - Compile-time validation prevents type mismatches
285
+ * - Full IntelliSense support for observation-specific attributes
286
+ *
287
+ * @param name - Descriptive name for the child observation
288
+ * @param attributes - Type-specific attributes (varies by observation type)
289
+ * @param options - Configuration including observation type (defaults to 'span')
290
+ * @returns Strongly-typed observation instance based on `asType`
291
+ *
292
+ * @example
293
+ * ```typescript
294
+ * // Within any observation (span, generation, agent, etc.)
295
+ * const parentObservation = startObservation('ai-workflow');
296
+ *
297
+ * // Create child span (default)
298
+ * const dataProcessing = parentObservation.startObservation('data-processing', {
299
+ * input: { userId: '123', dataSize: 1024 },
300
+ * metadata: { processor: 'fast-lane', version: '2.1' }
301
+ * }); // Returns LangfuseSpan
302
+ *
303
+ * // Create child generation with full LLM attributes
304
+ * const llmCall = parentObservation.startObservation('openai-gpt-4', {
305
+ * input: [{ role: 'system', content: 'You are a helpful assistant' },
306
+ * { role: 'user', content: 'Explain machine learning' }],
307
+ * model: 'gpt-4-turbo',
308
+ * modelParameters: {
309
+ * temperature: 0.7,
310
+ * maxTokens: 500,
311
+ * topP: 1.0
312
+ * },
313
+ * metadata: { priority: 'high', timeout: 30000 }
314
+ * }, { asType: 'generation' }); // Returns LangfuseGeneration
315
+ *
316
+ * // Create child agent for complex reasoning
317
+ * const reasoningAgent = parentObservation.startObservation('reasoning-agent', {
318
+ * input: {
319
+ * task: 'Analyze market trends',
320
+ * context: 'Q4 2024 financial data'
321
+ * },
322
+ * metadata: {
323
+ * model: 'gpt-4',
324
+ * tools: ['calculator', 'web-search', 'data-analysis'],
325
+ * maxIterations: 5
326
+ * }
327
+ * }, { asType: 'agent' }); // Returns LangfuseAgent
328
+ *
329
+ * // Create child tool for external API calls
330
+ * const apiCall = reasoningAgent.startObservation('market-data-api', {
331
+ * input: {
332
+ * endpoint: '/market/trends',
333
+ * params: { symbol: 'AAPL', period: '1Y' }
334
+ * },
335
+ * metadata: {
336
+ * provider: 'alpha-vantage',
337
+ * rateLimit: 5,
338
+ * timeout: 10000
339
+ * }
340
+ * }, { asType: 'tool' }); // Returns LangfuseTool
341
+ *
342
+ * // Create child retriever for document search
343
+ * const docSearch = parentObservation.startObservation('document-retrieval', {
344
+ * input: {
345
+ * query: 'sustainable energy solutions',
346
+ * filters: { year: '2024', category: 'research' },
347
+ * topK: 10
348
+ * },
349
+ * metadata: {
350
+ * vectorStore: 'pinecone',
351
+ * embeddingModel: 'text-embedding-ada-002',
352
+ * similarity: 'cosine'
353
+ * }
354
+ * }, { asType: 'retriever' }); // Returns LangfuseRetriever
355
+ *
356
+ * // Create child evaluator for quality assessment
357
+ * const qualityCheck = parentObservation.startObservation('response-evaluator', {
358
+ * input: {
359
+ * response: llmCall.output?.content,
360
+ * reference: 'Expected high-quality explanation',
361
+ * criteria: ['accuracy', 'clarity', 'completeness']
362
+ * },
363
+ * metadata: {
364
+ * evaluator: 'custom-bert-scorer',
365
+ * threshold: 0.8,
366
+ * metrics: ['bleu', 'rouge', 'semantic-similarity']
367
+ * }
368
+ * }, { asType: 'evaluator' }); // Returns LangfuseEvaluator
369
+ *
370
+ * // Create child guardrail for safety checking
371
+ * const safetyCheck = parentObservation.startObservation('content-guardrail', {
372
+ * input: {
373
+ * text: llmCall.output?.content,
374
+ * policies: ['no-harmful-content', 'no-personal-info', 'professional-tone']
375
+ * },
376
+ * metadata: {
377
+ * guardrailVersion: 'v2.1',
378
+ * strictMode: true,
379
+ * confidence: 0.95
380
+ * }
381
+ * }, { asType: 'guardrail' }); // Returns LangfuseGuardrail
382
+ *
383
+ * // Create child embedding for vector generation
384
+ * const textEmbedding = parentObservation.startObservation('text-embedder', {
385
+ * input: {
386
+ * texts: ['Document summary', 'Key insights', 'Conclusions'],
387
+ * batchSize: 3
388
+ * },
389
+ * model: 'text-embedding-ada-002',
390
+ * metadata: {
391
+ * dimensions: 1536,
392
+ * normalization: 'l2',
393
+ * purpose: 'semantic-search'
394
+ * }
395
+ * }, { asType: 'embedding' }); // Returns LangfuseEmbedding
396
+ *
397
+ * // Create child event for point-in-time logging
398
+ * const userAction = parentObservation.startObservation('user-interaction', {
399
+ * input: {
400
+ * action: 'button-click',
401
+ * element: 'generate-report',
402
+ * timestamp: new Date().toISOString()
403
+ * },
404
+ * level: 'DEFAULT',
405
+ * metadata: {
406
+ * sessionId: 'sess_123',
407
+ * userId: 'user_456',
408
+ * browser: 'Chrome 120.0'
409
+ * }
410
+ * }, { asType: 'event' }); // Returns LangfuseEvent (auto-ended)
411
+ *
412
+ * // Chain operations - each child inherits context
413
+ * dataProcessing.update({ output: { processed: true, records: 1000 } });
414
+ * dataProcessing.end();
415
+ *
416
+ * llmCall.update({
417
+ * output: { role: 'assistant', content: 'Machine learning is...' },
418
+ * usageDetails: { promptTokens: 25, completionTokens: 150 }
419
+ * });
420
+ * llmCall.end();
421
+ *
422
+ * parentObservation.update({
423
+ * output: {
424
+ * workflowCompleted: true,
425
+ * childOperations: 8,
426
+ * totalDuration: Date.now() - startTime
427
+ * }
428
+ * });
429
+ * parentObservation.end();
430
+ * ```
431
+ *
432
+ * @see {@link startObservation} for creating root-level observations
433
+ * @see {@link startActiveObservation} for function-scoped child observations
434
+ */
435
+ startObservation(name: string, attributes: LangfuseGenerationAttributes, options: {
436
+ asType: "generation";
437
+ }): LangfuseGeneration;
438
+ startObservation(name: string, attributes: LangfuseEventAttributes, options: {
439
+ asType: "event";
440
+ }): LangfuseEvent;
441
+ startObservation(name: string, attributes: LangfuseAgentAttributes, options: {
442
+ asType: "agent";
443
+ }): LangfuseAgent;
444
+ startObservation(name: string, attributes: LangfuseToolAttributes, options: {
445
+ asType: "tool";
446
+ }): LangfuseTool;
447
+ startObservation(name: string, attributes: LangfuseChainAttributes, options: {
448
+ asType: "chain";
449
+ }): LangfuseChain;
450
+ startObservation(name: string, attributes: LangfuseRetrieverAttributes, options: {
451
+ asType: "retriever";
452
+ }): LangfuseRetriever;
453
+ startObservation(name: string, attributes: LangfuseEvaluatorAttributes, options: {
454
+ asType: "evaluator";
455
+ }): LangfuseEvaluator;
456
+ startObservation(name: string, attributes: LangfuseGuardrailAttributes, options: {
457
+ asType: "guardrail";
458
+ }): LangfuseGuardrail;
459
+ startObservation(name: string, attributes: LangfuseEmbeddingAttributes, options: {
460
+ asType: "embedding";
461
+ }): LangfuseEmbedding;
462
+ startObservation(name: string, attributes?: LangfuseSpanAttributes, options?: {
463
+ asType?: "span";
464
+ }): LangfuseSpan;
465
+ }
466
+ type LangfuseSpanParams = {
467
+ otelSpan: Span;
468
+ attributes?: LangfuseSpanAttributes;
469
+ };
470
+ /**
471
+ * General-purpose observation wrapper for tracking operations, functions, and workflows.
472
+ *
473
+ * LangfuseSpan is the default and most versatile observation type, designed for tracing
474
+ * any operation that has a defined start and end time. It serves as the foundation for
475
+ * building hierarchical traces and can contain any other observation type as children.
476
+ *
477
+ * ## Primary Use Cases
478
+ * - **Business Logic**: User workflows, order processing, data transformations
479
+ * - **API Operations**: REST endpoint handling, database queries, external service calls
480
+ * - **System Operations**: File I/O, network requests, background jobs
481
+ * - **Pipeline Steps**: Data processing stages, validation steps, orchestration
482
+ * - **Application Functions**: Any measurable operation in your application
483
+ *
484
+ * ## Key Features
485
+ * - **Hierarchical Structure**: Can contain child observations of any type
486
+ * - **Flexible Attributes**: Supports arbitrary input, output, and metadata
487
+ * - **Duration Tracking**: Automatically measures execution time from start to end
488
+ * - **Status Monitoring**: Tracks success/failure states and error conditions
489
+ * - **Context Propagation**: Maintains trace context for distributed operations
490
+ *
491
+ * ## Span Lifecycle
492
+ * 1. **Creation**: Span starts automatically when created
493
+ * 2. **Updates**: Add input data, intermediate state, metadata as needed
494
+ * 3. **Child Operations**: Create nested observations for sub-operations
495
+ * 4. **Completion**: Update with final output and call `.end()` to finish
496
+ *
497
+ * @example
498
+ * ```typescript
499
+ * // Basic span tracking
500
+ * const span = startObservation('user-authentication', {
501
+ * input: { username: 'john_doe', method: 'oauth' },
502
+ * metadata: { provider: 'google' }
503
+ * });
504
+ *
505
+ * try {
506
+ * const user = await authenticateUser(credentials);
507
+ * span.update({
508
+ * output: { userId: user.id, success: true }
509
+ * });
510
+ * } catch (error) {
511
+ * span.update({
512
+ * level: 'ERROR',
513
+ * output: { success: false, error: error.message }
514
+ * });
515
+ * } finally {
516
+ * span.end();
517
+ * }
518
+ *
519
+ * // Nested operations
520
+ * const workflow = startObservation('order-processing', {
521
+ * input: { orderId: 'ord_123' }
522
+ * });
523
+ *
524
+ * const validation = workflow.startObservation('validation', {
525
+ * input: { items: cartItems }
526
+ * });
527
+ * validation.update({ output: { valid: true } });
528
+ * validation.end();
529
+ *
530
+ * const payment = workflow.startObservation('payment', {
531
+ * input: { amount: 100 }
532
+ * });
533
+ * payment.update({ output: { status: 'completed' } });
534
+ * payment.end();
535
+ *
536
+ * workflow.update({
537
+ * output: { status: 'confirmed', steps: 2 }
538
+ * });
539
+ * workflow.end();
540
+ * ```
541
+ *
542
+ * @see {@link startObservation} - Factory function for creating spans
543
+ * @see {@link startActiveObservation} - Function-scoped span creation
544
+ * @see {@link LangfuseGeneration} - For LLM and AI model interactions
545
+ * @see {@link LangfuseEvent} - For point-in-time occurrences
546
+ *
547
+ * @public
548
+ */
549
+ declare class LangfuseSpan extends LangfuseBaseObservation {
550
+ constructor(params: LangfuseSpanParams);
551
+ /**
552
+ * Updates this span with new attributes.
553
+ *
554
+ * @param attributes - Span attributes to set
555
+ * @returns This span for method chaining
556
+ *
557
+ * @example
558
+ * ```typescript
559
+ * span.update({
560
+ * output: { result: 'success' },
561
+ * level: 'DEFAULT',
562
+ * metadata: { duration: 150 }
563
+ * });
564
+ * ```
565
+ */
566
+ update(attributes: LangfuseSpanAttributes): LangfuseSpan;
567
+ }
568
+ type LangfuseAgentParams = {
569
+ otelSpan: Span;
570
+ attributes?: LangfuseAgentAttributes;
571
+ };
572
+ /**
573
+ * Specialized observation wrapper for tracking AI agent workflows and autonomous operations.
574
+ *
575
+ * LangfuseAgent is designed for observing intelligent agent systems that combine reasoning,
576
+ * tool usage, memory management, and decision-making in autonomous workflows. It captures
577
+ * the complex multi-step nature of agent operations, including planning, execution, and
578
+ * self-correction cycles typical in advanced AI agent architectures.
579
+ *
580
+ * ## Primary Use Cases
581
+ * - **Autonomous AI Agents**: ReAct, AutoGPT, LangGraph agent implementations
582
+ * - **Tool-Using Agents**: Function calling agents with external API access
583
+ * - **Multi-Step Reasoning**: Chain-of-thought, tree-of-thought agent workflows
584
+ * - **Planning Agents**: Goal decomposition and task planning systems
585
+ * - **Conversational Agents**: Multi-turn dialogue agents with memory
586
+ * - **Code Generation Agents**: AI assistants that write, test, and debug code
587
+ *
588
+ * ## Key Features
589
+ * - **Multi-Step Tracking**: Captures entire agent workflow from planning to execution
590
+ * - **Tool Integration**: Records all tool calls and their results within agent context
591
+ * - **Decision Logic**: Tracks reasoning steps, decisions, and strategy adaptations
592
+ * - **Memory Management**: Observes how agents maintain and use context across steps
593
+ * - **Error Recovery**: Monitors how agents handle failures and adapt their approach
594
+ * - **Performance Metrics**: Measures agent efficiency, success rates, and resource usage
595
+ *
596
+ * ## Agent-Specific Patterns
597
+ * - **Planning Phase**: Initial goal analysis and strategy formulation
598
+ * - **Execution Loop**: Iterative action-observation-reasoning cycles
599
+ * - **Tool Selection**: Dynamic tool choice based on context and goals
600
+ * - **Self-Correction**: Error detection and strategy adjustment
601
+ * - **Memory Updates**: Context retention and knowledge accumulation
602
+ * - **Final Synthesis**: Result compilation and quality assessment
603
+ *
604
+ * @example
605
+ * ```typescript
606
+ * // Basic agent workflow
607
+ * const agent = startObservation('research-agent', {
608
+ * input: {
609
+ * task: 'Research renewable energy trends',
610
+ * tools: ['web-search', 'summarizer'],
611
+ * maxIterations: 3
612
+ * }
613
+ * }, { asType: 'agent' });
614
+ *
615
+ * // Agent uses tools and makes decisions
616
+ * const searchTool = agent.startObservation('web-search', {
617
+ * input: { query: 'renewable energy 2024' }
618
+ * }, { asType: 'tool' });
619
+ *
620
+ * const results = await webSearch('renewable energy 2024');
621
+ * searchTool.update({ output: results });
622
+ * searchTool.end();
623
+ *
624
+ * // Agent generates final response
625
+ * const generation = agent.startObservation('synthesize-findings', {
626
+ * input: results,
627
+ * model: 'gpt-4'
628
+ * }, { asType: 'generation' });
629
+ *
630
+ * const response = await llm.generate(results);
631
+ * generation.update({ output: response });
632
+ * generation.end();
633
+ *
634
+ * agent.update({
635
+ * output: {
636
+ * completed: true,
637
+ * toolsUsed: 1,
638
+ * finalResponse: response
639
+ * }
640
+ * });
641
+ * agent.end();
642
+ * ```
643
+ *
644
+ * @see {@link startObservation} with `{ asType: 'agent' }` - Factory function
645
+ * @see {@link startActiveObservation} with `{ asType: 'agent' }` - Function-scoped agent
646
+ * @see {@link LangfuseTool} - For individual tool executions within agents
647
+ * @see {@link LangfuseChain} - For structured multi-step workflows
648
+ *
649
+ * @public
650
+ */
651
+ declare class LangfuseAgent extends LangfuseBaseObservation {
652
+ constructor(params: LangfuseAgentParams);
653
+ /**
654
+ * Updates this agent observation with new attributes.
655
+ *
656
+ * @param attributes - Agent attributes to set
657
+ * @returns This agent for method chaining
658
+ *
659
+ * @example
660
+ * ```typescript
661
+ * agent.update({
662
+ * output: {
663
+ * taskCompleted: true,
664
+ * iterationsUsed: 5,
665
+ * toolsInvoked: ['web-search', 'calculator', 'summarizer'],
666
+ * finalResult: 'Research completed with high confidence'
667
+ * },
668
+ * metadata: {
669
+ * efficiency: 0.85,
670
+ * qualityScore: 0.92,
671
+ * resourcesConsumed: { tokens: 15000, apiCalls: 12 }
672
+ * }
673
+ * });
674
+ * ```
675
+ */
676
+ update(attributes: LangfuseAgentAttributes): LangfuseAgent;
677
+ }
678
+ type LangfuseToolParams = {
679
+ otelSpan: Span;
680
+ attributes?: LangfuseToolAttributes;
681
+ };
682
+ /**
683
+ * Specialized observation wrapper for tracking individual tool calls and external API interactions.
684
+ *
685
+ * LangfuseTool is designed for observing discrete tool invocations within agent workflows,
686
+ * function calling scenarios, or standalone API integrations. It captures the input parameters,
687
+ * execution results, performance metrics, and error conditions of tool operations, making it
688
+ * essential for debugging tool reliability and optimizing tool selection strategies.
689
+ *
690
+ * ## Primary Use Cases
691
+ * - **Function Calling**: OpenAI function calls, Anthropic tool use, Claude function calling
692
+ * - **External APIs**: REST API calls, GraphQL queries, database operations
693
+ * - **System Tools**: File operations, shell commands, system integrations
694
+ * - **Data Processing Tools**: Calculators, converters, validators, parsers
695
+ * - **Search Tools**: Web search, vector search, document retrieval
696
+ * - **Content Tools**: Image generation, text processing, format conversion
697
+ *
698
+ * ## Key Features
699
+ * - **Parameter Tracking**: Complete input parameter logging and validation
700
+ * - **Result Capture**: Full output data and metadata from tool execution
701
+ * - **Performance Monitoring**: Execution time, success rates, retry attempts
702
+ * - **Error Analysis**: Detailed error tracking with context and recovery info
703
+ * - **Usage Analytics**: Frequency, patterns, and efficiency metrics
704
+ * - **Integration Health**: API status, rate limits, and service availability
705
+ *
706
+ * ## Tool-Specific Patterns
707
+ * - **Input Validation**: Parameter checking and sanitization before execution
708
+ * - **Execution Monitoring**: Real-time performance and status tracking
709
+ * - **Result Processing**: Output validation, transformation, and formatting
710
+ * - **Error Handling**: Retry logic, fallbacks, and graceful degradation
711
+ * - **Caching Integration**: Result caching and cache hit/miss tracking
712
+ * - **Rate Limiting**: Request throttling and quota management
713
+ *
714
+ * @example
715
+ * ```typescript
716
+ * // Web search tool
717
+ * const searchTool = startObservation('web-search', {
718
+ * input: {
719
+ * query: 'latest AI developments',
720
+ * maxResults: 10
721
+ * },
722
+ * metadata: { provider: 'google-api' }
723
+ * }, { asType: 'tool' });
724
+ *
725
+ * try {
726
+ * const results = await webSearch('latest AI developments');
727
+ *
728
+ * searchTool.update({
729
+ * output: {
730
+ * results: results,
731
+ * count: results.length
732
+ * },
733
+ * metadata: {
734
+ * latency: 1200,
735
+ * cacheHit: false
736
+ * }
737
+ * });
738
+ * } catch (error) {
739
+ * searchTool.update({
740
+ * level: 'ERROR',
741
+ * statusMessage: 'Search failed',
742
+ * output: { error: error.message }
743
+ * });
744
+ * } finally {
745
+ * searchTool.end();
746
+ * }
747
+ *
748
+ * // Database query tool
749
+ * const dbTool = startObservation('db-query', {
750
+ * input: {
751
+ * query: 'SELECT * FROM users WHERE active = true',
752
+ * timeout: 30000
753
+ * }
754
+ * }, { asType: 'tool' });
755
+ *
756
+ * const result = await db.query('SELECT * FROM users WHERE active = true');
757
+ * dbTool.update({
758
+ * output: { rowCount: result.length },
759
+ * metadata: { executionTime: 150 }
760
+ * });
761
+ * dbTool.end();
762
+ * ```
763
+ *
764
+ * @see {@link startObservation} with `{ asType: 'tool' }` - Factory function
765
+ * @see {@link startActiveObservation} with `{ asType: 'tool' }` - Function-scoped tool
766
+ * @see {@link LangfuseAgent} - For agent workflows that use multiple tools
767
+ * @see {@link LangfuseChain} - For orchestrated tool sequences
768
+ *
769
+ * @public
770
+ */
771
+ declare class LangfuseTool extends LangfuseBaseObservation {
772
+ constructor(params: LangfuseToolParams);
773
+ /**
774
+ * Updates this tool observation with new attributes.
775
+ *
776
+ * @param attributes - Tool attributes to set
777
+ * @returns This tool for method chaining
778
+ *
779
+ * @example
780
+ * ```typescript
781
+ * tool.update({
782
+ * output: {
783
+ * result: searchResults,
784
+ * count: searchResults.length,
785
+ * relevanceScore: 0.89,
786
+ * executionTime: 1250
787
+ * },
788
+ * metadata: {
789
+ * cacheHit: false,
790
+ * apiCost: 0.025,
791
+ * rateLimitRemaining: 950
792
+ * }
793
+ * });
794
+ * ```
795
+ */
796
+ update(attributes: LangfuseToolAttributes): LangfuseTool;
797
+ }
798
+ type LangfuseChainParams = {
799
+ otelSpan: Span;
800
+ attributes?: LangfuseChainAttributes;
801
+ };
802
+ /**
803
+ * Specialized observation wrapper for tracking structured multi-step workflows and process chains.
804
+ *
805
+ * LangfuseChain is designed for observing sequential, parallel, or conditional workflow orchestration
806
+ * where multiple operations are coordinated to achieve a larger goal. It captures the flow of data
807
+ * between steps, manages dependencies, tracks progress through complex pipelines, and provides
808
+ * insights into workflow performance and reliability patterns.
809
+ *
810
+ * ## Primary Use Cases
811
+ * - **Data Processing Pipelines**: ETL processes, data transformation workflows
812
+ * - **Business Process Automation**: Order processing, approval workflows, document processing
813
+ * - **LangChain Integration**: LangChain chain execution and orchestration
814
+ * - **RAG Pipelines**: Document retrieval → context preparation → generation → post-processing
815
+ * - **Multi-Model Workflows**: Preprocessing → model inference → post-processing → validation
816
+ * - **Content Production**: Research → drafting → review → editing → publishing
817
+ *
818
+ * ## Key Features
819
+ * - **Step Orchestration**: Sequential, parallel, and conditional step execution tracking
820
+ * - **Data Flow Management**: Input/output tracking between pipeline steps
821
+ * - **Dependency Resolution**: Manages complex step dependencies and prerequisites
822
+ * - **Progress Monitoring**: Real-time workflow progress and completion status
823
+ * - **Error Propagation**: Handles failures, retries, and recovery across workflow steps
824
+ * - **Performance Analytics**: Bottleneck identification and optimization insights
825
+ *
826
+ * ## Chain-Specific Patterns
827
+ * - **Pipeline Setup**: Workflow definition, step configuration, and dependency mapping
828
+ * - **Sequential Execution**: Step-by-step processing with state management
829
+ * - **Parallel Processing**: Concurrent step execution with synchronization
830
+ * - **Conditional Logic**: Dynamic branching based on intermediate results
831
+ * - **Error Recovery**: Failure handling, rollback, and alternative path execution
832
+ * - **Result Aggregation**: Combining outputs from multiple workflow branches
833
+ *
834
+ * @example
835
+ * ```typescript
836
+ * // RAG processing chain
837
+ * const ragChain = startObservation('rag-chain', {
838
+ * input: {
839
+ * query: 'What is renewable energy?',
840
+ * steps: ['retrieval', 'generation']
841
+ * }
842
+ * }, { asType: 'chain' });
843
+ *
844
+ * // Step 1: Document retrieval
845
+ * const retrieval = ragChain.startObservation('document-retrieval', {
846
+ * input: { query: 'renewable energy' }
847
+ * }, { asType: 'retriever' });
848
+ *
849
+ * const docs = await vectorSearch('renewable energy');
850
+ * retrieval.update({ output: { documents: docs, count: docs.length } });
851
+ * retrieval.end();
852
+ *
853
+ * // Step 2: Generate response
854
+ * const generation = ragChain.startObservation('response-generation', {
855
+ * input: {
856
+ * query: 'What is renewable energy?',
857
+ * context: docs
858
+ * },
859
+ * model: 'gpt-4'
860
+ * }, { asType: 'generation' });
861
+ *
862
+ * const response = await llm.generate({
863
+ * prompt: buildPrompt('What is renewable energy?', docs)
864
+ * });
865
+ *
866
+ * generation.update({ output: response });
867
+ * generation.end();
868
+ *
869
+ * ragChain.update({
870
+ * output: {
871
+ * finalResponse: response,
872
+ * stepsCompleted: 2,
873
+ * documentsUsed: docs.length
874
+ * }
875
+ * });
876
+ * ragChain.end();
877
+ * ```
878
+ *
879
+ * @see {@link startObservation} with `{ asType: 'chain' }` - Factory function
880
+ * @see {@link startActiveObservation} with `{ asType: 'chain' }` - Function-scoped chain
881
+ * @see {@link LangfuseSpan} - For individual workflow steps
882
+ * @see {@link LangfuseAgent} - For intelligent workflow orchestration
883
+ *
884
+ * @public
885
+ */
886
+ declare class LangfuseChain extends LangfuseBaseObservation {
887
+ constructor(params: LangfuseChainParams);
888
+ /**
889
+ * Updates this chain observation with new attributes.
890
+ *
891
+ * @param attributes - Chain attributes to set
892
+ * @returns This chain for method chaining
893
+ *
894
+ * @example
895
+ * ```typescript
896
+ * chain.update({
897
+ * output: {
898
+ * stepsCompleted: 5,
899
+ * stepsSuccessful: 4,
900
+ * finalResult: processedData,
901
+ * pipelineEfficiency: 0.87
902
+ * },
903
+ * metadata: {
904
+ * bottleneckStep: 'data-validation',
905
+ * parallelizationOpportunities: ['step-2', 'step-3'],
906
+ * optimizationSuggestions: ['cache-intermediate-results']
907
+ * }
908
+ * });
909
+ * ```
910
+ */
911
+ update(attributes: LangfuseChainAttributes): LangfuseChain;
912
+ }
913
+ type LangfuseRetrieverParams = {
914
+ otelSpan: Span;
915
+ attributes?: LangfuseRetrieverAttributes;
916
+ };
917
+ /**
918
+ * Specialized observation wrapper for tracking document retrieval and search operations.
919
+ *
920
+ * LangfuseRetriever is designed for observing information retrieval systems that search,
921
+ * filter, and rank content from various data sources. It captures search queries, retrieval
922
+ * strategies, result quality metrics, and performance characteristics of search operations,
923
+ * making it essential for RAG systems, knowledge bases, and content discovery workflows.
924
+ *
925
+ * ## Primary Use Cases
926
+ * - **Vector Search**: Semantic similarity search using embeddings and vector databases
927
+ * - **Document Retrieval**: Full-text search, keyword matching, and document filtering
928
+ * - **Knowledge Base Query**: FAQ systems, help documentation, and knowledge management
929
+ * - **RAG Systems**: Retrieval step in retrieval-augmented generation pipelines
930
+ * - **Recommendation Systems**: Content recommendations and similarity-based suggestions
931
+ * - **Data Mining**: Information extraction and content discovery from large datasets
932
+ *
933
+ * ## Key Features
934
+ * - **Query Analysis**: Input query processing, expansion, and optimization tracking
935
+ * - **Search Strategy**: Retrieval algorithms, ranking functions, and filtering criteria
936
+ * - **Result Quality**: Relevance scores, diversity metrics, and retrieval effectiveness
937
+ * - **Performance Metrics**: Search latency, index size, and throughput measurements
938
+ * - **Source Tracking**: Data source attribution and content provenance information
939
+ * - **Ranking Intelligence**: Personalization, context awareness, and result optimization
940
+ *
941
+ * @example
942
+ * ```typescript
943
+ * // Vector search retrieval
944
+ * const retriever = startObservation('vector-search', {
945
+ * input: {
946
+ * query: 'machine learning applications',
947
+ * topK: 10,
948
+ * similarityThreshold: 0.7
949
+ * },
950
+ * metadata: {
951
+ * vectorDB: 'pinecone',
952
+ * embeddingModel: 'text-embedding-ada-002'
953
+ * }
954
+ * }, { asType: 'retriever' });
955
+ *
956
+ * const results = await vectorDB.search({
957
+ * query: 'machine learning applications',
958
+ * topK: 10,
959
+ * threshold: 0.7
960
+ * });
961
+ *
962
+ * retriever.update({
963
+ * output: {
964
+ * documents: results,
965
+ * count: results.length,
966
+ * avgSimilarity: 0.89
967
+ * },
968
+ * metadata: {
969
+ * searchLatency: 150,
970
+ * cacheHit: false
971
+ * }
972
+ * });
973
+ * retriever.end();
974
+ * ```
975
+ *
976
+ * @see {@link startObservation} with `{ asType: 'retriever' }` - Factory function
977
+ * @see {@link LangfuseChain} - For multi-step RAG pipelines
978
+ * @see {@link LangfuseEmbedding} - For embedding generation used in vector search
979
+ *
980
+ * @public
981
+ */
982
+ declare class LangfuseRetriever extends LangfuseBaseObservation {
983
+ constructor(params: LangfuseRetrieverParams);
984
+ /**
985
+ * Updates this retriever observation with new attributes.
986
+ *
987
+ * @param attributes - Retriever attributes to set
988
+ * @returns This retriever for method chaining
989
+ */
990
+ update(attributes: LangfuseRetrieverAttributes): LangfuseRetriever;
991
+ }
992
+ type LangfuseEvaluatorParams = {
993
+ otelSpan: Span;
994
+ attributes?: LangfuseEvaluatorAttributes;
995
+ };
996
+ /**
997
+ * Specialized observation wrapper for tracking quality assessment and evaluation operations.
998
+ *
999
+ * LangfuseEvaluator is designed for observing evaluation systems that assess, score, and
1000
+ * validate the quality of AI outputs, content, or system performance. It captures evaluation
1001
+ * criteria, scoring methodologies, benchmark comparisons, and quality metrics, making it
1002
+ * essential for AI system validation, content moderation, and performance monitoring.
1003
+ *
1004
+ * ## Primary Use Cases
1005
+ * - **LLM Output Evaluation**: Response quality, factual accuracy, and relevance assessment
1006
+ * - **Content Quality Assessment**: Writing quality, tone analysis, and style validation
1007
+ * - **Automated Testing**: System performance validation and regression testing
1008
+ * - **Bias Detection**: Fairness evaluation and bias identification in AI systems
1009
+ * - **Safety Evaluation**: Content safety, harm detection, and compliance checking
1010
+ * - **Benchmark Comparison**: Performance comparison against reference standards
1011
+ *
1012
+ * ## Key Features
1013
+ * - **Multi-Criteria Scoring**: Comprehensive evaluation across multiple quality dimensions
1014
+ * - **Automated Assessment**: AI-powered evaluation using specialized models and algorithms
1015
+ * - **Human Evaluation**: Integration with human reviewers and expert assessment
1016
+ * - **Benchmark Tracking**: Performance comparison against established baselines
1017
+ * - **Quality Metrics**: Detailed scoring with confidence intervals and reliability measures
1018
+ * - **Trend Analysis**: Quality tracking over time with improvement recommendations
1019
+ *
1020
+ * @example
1021
+ * ```typescript
1022
+ * // Response quality evaluation
1023
+ * const evaluator = startObservation('response-quality-eval', {
1024
+ * input: {
1025
+ * response: 'Machine learning is a subset of artificial intelligence...',
1026
+ * criteria: ['accuracy', 'completeness', 'clarity']
1027
+ * }
1028
+ * }, { asType: 'evaluator' });
1029
+ *
1030
+ * const evaluation = await evaluateResponse({
1031
+ * response: 'Machine learning is a subset of artificial intelligence...',
1032
+ * criteria: ['accuracy', 'completeness', 'clarity']
1033
+ * });
1034
+ *
1035
+ * evaluator.update({
1036
+ * output: {
1037
+ * overallScore: 0.87,
1038
+ * criteriaScores: {
1039
+ * accuracy: 0.92,
1040
+ * completeness: 0.85,
1041
+ * clarity: 0.90
1042
+ * },
1043
+ * passed: true
1044
+ * }
1045
+ * });
1046
+ * evaluator.end();
1047
+ * ```
1048
+ *
1049
+ * @see {@link startObservation} with `{ asType: 'evaluator' }` - Factory function
1050
+ * @see {@link LangfuseGeneration} - For LLM outputs being evaluated
1051
+ * @see {@link LangfuseGuardrail} - For safety and compliance enforcement
1052
+ *
1053
+ * @public
1054
+ */
1055
+ declare class LangfuseEvaluator extends LangfuseBaseObservation {
1056
+ constructor(params: LangfuseEvaluatorParams);
1057
+ /**
1058
+ * Updates this evaluator observation with new attributes.
1059
+ *
1060
+ * @param attributes - Evaluator attributes to set
1061
+ * @returns This evaluator for method chaining
1062
+ */
1063
+ update(attributes: LangfuseEvaluatorAttributes): LangfuseEvaluator;
1064
+ }
1065
+ type LangfuseGuardrailParams = {
1066
+ otelSpan: Span;
1067
+ attributes?: LangfuseGuardrailAttributes;
1068
+ };
1069
+ /**
1070
+ * Specialized observation wrapper for tracking safety checks and compliance enforcement.
1071
+ *
1072
+ * LangfuseGuardrail is designed for observing safety and compliance systems that prevent,
1073
+ * detect, and mitigate harmful, inappropriate, or policy-violating content and behaviors
1074
+ * in AI applications. It captures safety policies, violation detection, risk assessment,
1075
+ * and mitigation actions, ensuring responsible AI deployment and regulatory compliance.
1076
+ *
1077
+ * ## Primary Use Cases
1078
+ * - **Content Moderation**: Harmful content detection and filtering in user inputs/outputs
1079
+ * - **Safety Enforcement**: PII detection, toxicity filtering, and inappropriate content blocking
1080
+ * - **Compliance Monitoring**: Regulatory compliance, industry standards, and policy enforcement
1081
+ * - **Bias Mitigation**: Fairness checks and bias prevention in AI decision-making
1082
+ * - **Privacy Protection**: Data privacy safeguards and sensitive information redaction
1083
+ * - **Behavioral Monitoring**: User behavior analysis and anomaly detection
1084
+ *
1085
+ * ## Key Features
1086
+ * - **Multi-Policy Enforcement**: Simultaneous checking against multiple safety policies
1087
+ * - **Risk Assessment**: Quantitative risk scoring with confidence intervals
1088
+ * - **Real-Time Detection**: Low-latency safety checks for interactive applications
1089
+ * - **Context Awareness**: Contextual safety evaluation considering user and application context
1090
+ * - **Mitigation Actions**: Automatic content blocking, filtering, and redaction capabilities
1091
+ * - **Audit Trail**: Comprehensive logging for compliance and safety incident investigation
1092
+ *
1093
+ * @example
1094
+ * ```typescript
1095
+ * // Content safety guardrail
1096
+ * const guardrail = startObservation('content-safety-check', {
1097
+ * input: {
1098
+ * content: userMessage,
1099
+ * policies: ['no-toxicity', 'no-hate-speech'],
1100
+ * strictMode: false
1101
+ * }
1102
+ * }, { asType: 'guardrail' });
1103
+ *
1104
+ * const safetyCheck = await checkContentSafety({
1105
+ * text: userMessage,
1106
+ * policies: ['no-toxicity', 'no-hate-speech']
1107
+ * });
1108
+ *
1109
+ * guardrail.update({
1110
+ * output: {
1111
+ * safe: safetyCheck.safe,
1112
+ * riskScore: 0.15,
1113
+ * violations: [],
1114
+ * action: 'allow'
1115
+ * }
1116
+ * });
1117
+ * guardrail.end();
1118
+ * ```
1119
+ *
1120
+ * @see {@link startObservation} with `{ asType: 'guardrail' }` - Factory function
1121
+ * @see {@link LangfuseEvaluator} - For detailed quality and safety assessment
1122
+ * @see {@link LangfuseGeneration} - For protecting LLM outputs with guardrails
1123
+ *
1124
+ * @public
1125
+ */
1126
+ declare class LangfuseGuardrail extends LangfuseBaseObservation {
1127
+ constructor(params: LangfuseGuardrailParams);
1128
+ /**
1129
+ * Updates this guardrail observation with new attributes.
1130
+ *
1131
+ * @param attributes - Guardrail attributes to set
1132
+ * @returns This guardrail for method chaining
1133
+ */
1134
+ update(attributes: LangfuseGuardrailAttributes): LangfuseGuardrail;
1135
+ }
1136
+ /**
1137
+ * Parameters for creating a Langfuse generation.
1138
+ *
1139
+ * @internal
1140
+ */
1141
+ type LangfuseGenerationParams = {
1142
+ otelSpan: Span;
1143
+ attributes?: LangfuseGenerationAttributes;
1144
+ };
1145
+ /**
1146
+ * Specialized observation wrapper for tracking LLM interactions, AI model calls, and text generation.
1147
+ *
1148
+ * LangfuseGeneration is purpose-built for observing AI model interactions, providing rich
1149
+ * metadata capture for prompts, completions, model parameters, token usage, and costs.
1150
+ * It's the go-to observation type for any operation involving language models, chat APIs,
1151
+ * completion APIs, or other generative AI services.
1152
+ *
1153
+ * ## Primary Use Cases
1154
+ * - **LLM API Calls**: OpenAI, Anthropic, Cohere, Azure OpenAI, AWS Bedrock
1155
+ * - **Chat Completions**: Multi-turn conversations and dialogue systems
1156
+ * - **Text Generation**: Content creation, summarization, translation
1157
+ * - **Code Generation**: AI-powered code completion and generation
1158
+ * - **RAG Systems**: Generation step in retrieval-augmented generation
1159
+ * - **AI Agents**: LLM reasoning and decision-making within agent workflows
1160
+ *
1161
+ * ## Key Features
1162
+ * - **Rich LLM Metadata**: Model name, parameters, prompts, completions
1163
+ * - **Usage Tracking**: Token counts (prompt, completion, total)
1164
+ * - **Cost Monitoring**: Automatic cost calculation and tracking
1165
+ * - **Performance Metrics**: Latency, throughput, tokens per second
1166
+ * - **Prompt Engineering**: Version control for prompts and templates
1167
+ * - **Error Handling**: Rate limits, timeouts, model-specific errors
1168
+ *
1169
+ * ## Generation-Specific Attributes
1170
+ * - `model`: Model identifier (e.g., 'gpt-4-turbo', 'claude-3-sonnet')
1171
+ * - `modelParameters`: Temperature, max tokens, top-p, frequency penalty
1172
+ * - `input`: Prompt or message array for the model
1173
+ * - `output`: Model response, completion, or generated content
1174
+ * - `usageDetails`: Token consumption (prompt, completion, total)
1175
+ * - `costDetails`: Financial cost breakdown and pricing
1176
+ * - `prompt`: Structured prompt object with name, version, variables
1177
+ *
1178
+ * @example
1179
+ * ```typescript
1180
+ * // Basic LLM generation tracking
1181
+ * const generation = startObservation('openai-completion', {
1182
+ * model: 'gpt-4-turbo',
1183
+ * input: [
1184
+ * { role: 'system', content: 'You are a helpful assistant.' },
1185
+ * { role: 'user', content: 'Explain quantum computing' }
1186
+ * ],
1187
+ * modelParameters: {
1188
+ * temperature: 0.7,
1189
+ * maxTokens: 500
1190
+ * }
1191
+ * }, { asType: 'generation' });
1192
+ *
1193
+ * try {
1194
+ * const response = await openai.chat.completions.create({
1195
+ * model: 'gpt-4-turbo',
1196
+ * messages: [
1197
+ * { role: 'system', content: 'You are a helpful assistant.' },
1198
+ * { role: 'user', content: 'Explain quantum computing' }
1199
+ * ],
1200
+ * temperature: 0.7,
1201
+ * max_tokens: 500
1202
+ * });
1203
+ *
1204
+ * generation.update({
1205
+ * output: response.choices[0].message,
1206
+ * usageDetails: {
1207
+ * promptTokens: response.usage.prompt_tokens,
1208
+ * completionTokens: response.usage.completion_tokens,
1209
+ * totalTokens: response.usage.total_tokens
1210
+ * },
1211
+ * costDetails: {
1212
+ * totalCost: 0.025,
1213
+ * currency: 'USD'
1214
+ * }
1215
+ * });
1216
+ * } catch (error) {
1217
+ * generation.update({
1218
+ * level: 'ERROR',
1219
+ * statusMessage: `API error: ${error.message}`,
1220
+ * output: { error: error.message }
1221
+ * });
1222
+ * } finally {
1223
+ * generation.end();
1224
+ * }
1225
+ *
1226
+ * // RAG generation example
1227
+ * const ragGeneration = startObservation('rag-response', {
1228
+ * model: 'gpt-4',
1229
+ * input: [
1230
+ * { role: 'system', content: 'Answer based on provided context.' },
1231
+ * { role: 'user', content: `Context: ${context}\n\nQuestion: ${question}` }
1232
+ * ],
1233
+ * modelParameters: { temperature: 0.1 }
1234
+ * }, { asType: 'generation' });
1235
+ *
1236
+ * const response = await llm.generate({ prompt, context });
1237
+ * ragGeneration.update({
1238
+ * output: response,
1239
+ * metadata: { contextSources: 3 }
1240
+ * });
1241
+ * ragGeneration.end();
1242
+ * ```
1243
+ *
1244
+ * @see {@link startObservation} with `{ asType: 'generation' }` - Factory function
1245
+ * @see {@link startActiveObservation} with `{ asType: 'generation' }` - Function-scoped generation
1246
+ * @see {@link LangfuseSpan} - For general-purpose operations
1247
+ * @see {@link LangfuseEmbedding} - For text embedding and vector operations
1248
+ *
1249
+ * @public
1250
+ */
1251
+ declare class LangfuseGeneration extends LangfuseBaseObservation {
1252
+ constructor(params: LangfuseGenerationParams);
1253
+ update(attributes: LangfuseGenerationAttributes): LangfuseGeneration;
1254
+ }
1255
+ type LangfuseEmbeddingParams = {
1256
+ otelSpan: Span;
1257
+ attributes?: LangfuseEmbeddingAttributes;
1258
+ };
1259
+ /**
1260
+ * Specialized observation wrapper for tracking text embedding and vector generation operations.
1261
+ *
1262
+ * LangfuseEmbedding is designed for observing embedding model interactions that convert
1263
+ * text, images, or other content into high-dimensional vector representations. It captures
1264
+ * embedding model parameters, input preprocessing, vector characteristics, and performance
1265
+ * metrics, making it essential for semantic search, RAG systems, and similarity-based applications.
1266
+ *
1267
+ * ## Primary Use Cases
1268
+ * - **Text Embeddings**: Converting text to vectors for semantic search and similarity
1269
+ * - **Document Indexing**: Creating vector representations for large document collections
1270
+ * - **Semantic Search**: Enabling similarity-based search and content discovery
1271
+ * - **RAG Preparation**: Embedding documents and queries for retrieval-augmented generation
1272
+ * - **Clustering Analysis**: Grouping similar content using vector representations
1273
+ * - **Recommendation Systems**: Content similarity for personalized recommendations
1274
+ *
1275
+ * ## Key Features
1276
+ * - **Model Tracking**: Embedding model selection, version, and parameter monitoring
1277
+ * - **Input Processing**: Text preprocessing, tokenization, and normalization tracking
1278
+ * - **Vector Analysis**: Dimensionality, magnitude, and quality metrics for generated embeddings
1279
+ * - **Batch Processing**: Efficient handling of multiple texts in single embedding operations
1280
+ * - **Performance Monitoring**: Embedding generation speed, cost tracking, and efficiency metrics
1281
+ * - **Quality Assessment**: Vector quality evaluation and embedding effectiveness measurement
1282
+ *
1283
+ * @example
1284
+ * ```typescript
1285
+ * // Text embedding generation
1286
+ * const embedding = startObservation('text-embedder', {
1287
+ * input: {
1288
+ * texts: [
1289
+ * 'Machine learning is a subset of AI',
1290
+ * 'Deep learning uses neural networks'
1291
+ * ],
1292
+ * batchSize: 2
1293
+ * },
1294
+ * model: 'text-embedding-ada-002'
1295
+ * }, { asType: 'embedding' });
1296
+ *
1297
+ * const embedResult = await generateEmbeddings({
1298
+ * texts: [
1299
+ * 'Machine learning is a subset of AI',
1300
+ * 'Deep learning uses neural networks'
1301
+ * ],
1302
+ * model: 'text-embedding-ada-002'
1303
+ * });
1304
+ *
1305
+ * embedding.update({
1306
+ * output: {
1307
+ * embeddings: embedResult.vectors,
1308
+ * count: embedResult.vectors.length,
1309
+ * dimensions: 1536
1310
+ * },
1311
+ * usageDetails: {
1312
+ * totalTokens: embedResult.tokenCount
1313
+ * },
1314
+ * metadata: {
1315
+ * processingTime: 340
1316
+ * }
1317
+ * });
1318
+ * embedding.end();
1319
+ * ```
1320
+ *
1321
+ * @see {@link startObservation} with `{ asType: 'embedding' }` - Factory function
1322
+ * @see {@link LangfuseRetriever} - For using embeddings in vector search
1323
+ * @see {@link LangfuseGeneration} - For LLM operations that may use embeddings
1324
+ *
1325
+ * @public
1326
+ */
1327
+ declare class LangfuseEmbedding extends LangfuseBaseObservation {
1328
+ constructor(params: LangfuseEmbeddingParams);
1329
+ /**
1330
+ * Updates this embedding observation with new attributes.
1331
+ *
1332
+ * @param attributes - Embedding attributes to set
1333
+ * @returns This embedding for method chaining
1334
+ */
1335
+ update(attributes: LangfuseEmbeddingAttributes): LangfuseEmbedding;
1336
+ }
1337
+ /**
1338
+ * Parameters for creating a Langfuse event.
1339
+ *
1340
+ * @internal
1341
+ */
1342
+ type LangfuseEventParams = {
1343
+ otelSpan: Span;
1344
+ attributes?: LangfuseEventAttributes;
1345
+ timestamp: TimeInput;
1346
+ };
1347
+ /**
1348
+ * Langfuse event wrapper for point-in-time observations.
1349
+ *
1350
+ * Events represent instantaneous occurrences or log entries within a trace.
1351
+ * Unlike spans and generations, they don't have duration and are automatically
1352
+ * ended when created.
1353
+ *
1354
+ * @public
1355
+ */
1356
+ declare class LangfuseEvent extends LangfuseBaseObservation {
1357
+ constructor(params: LangfuseEventParams);
1358
+ }
1359
+
1360
+ /**
1361
+ * Creates OpenTelemetry attributes from Langfuse trace attributes.
1362
+ *
1363
+ * Converts user-friendly trace attributes into the internal OpenTelemetry
1364
+ * attribute format required by the span processor.
1365
+ *
1366
+ * @param attributes - Langfuse trace attributes to convert
1367
+ * @returns OpenTelemetry attributes object with non-null values
1368
+ *
1369
+ * @example
1370
+ * ```typescript
1371
+ * import { createTraceAttributes } from '@elasticdash/tracing';
1372
+ *
1373
+ * const otelAttributes = createTraceAttributes({
1374
+ * name: 'user-checkout-flow',
1375
+ * userId: 'user-123',
1376
+ * sessionId: 'session-456',
1377
+ * tags: ['checkout', 'payment'],
1378
+ * metadata: { version: '2.1.0' }
1379
+ * });
1380
+ *
1381
+ * span.setAttributes(otelAttributes);
1382
+ * ```
1383
+ *
1384
+ * @public
1385
+ */
1386
+ declare function createTraceAttributes({ name, userId, sessionId, version, release, input, output, metadata, tags, environment, public: isPublic, }?: LangfuseTraceAttributes): Attributes;
1387
+ declare function createObservationAttributes(type: LangfuseObservationType, attributes: LangfuseObservationAttributes): Attributes;
1388
+
1389
+ /**
1390
+ * Sets an isolated TracerProvider for Langfuse tracing operations.
1391
+ *
1392
+ * This allows Langfuse to use its own TracerProvider instance, separate from
1393
+ * the global OpenTelemetry TracerProvider. This is useful for avoiding conflicts
1394
+ * with other OpenTelemetry instrumentation in the application.
1395
+ *
1396
+ * ⚠️ **Limitation: Span Context Sharing**
1397
+ *
1398
+ * While this function isolates span processing and export, it does NOT provide
1399
+ * complete trace isolation. OpenTelemetry context (trace IDs, parent spans) is
1400
+ * still shared between the global and isolated providers. This means:
1401
+ *
1402
+ * - Spans created with the isolated provider inherit trace IDs from global spans
1403
+ * - Spans created with the isolated provider inherit parent relationships from global spans
1404
+ * - This can result in spans from different providers being part of the same logical trace
1405
+ *
1406
+ * **Why this happens:**
1407
+ * OpenTelemetry uses a global context propagation mechanism that operates at the
1408
+ * JavaScript runtime level, independent of individual TracerProvider instances.
1409
+ * The context (containing trace ID, span ID) flows through async boundaries and
1410
+ * is inherited by all spans created within that context, regardless of which
1411
+ * TracerProvider creates them.
1412
+ *
1413
+ * @example
1414
+ * ```typescript
1415
+ * import { NodeTracerProvider } from '@opentelemetry/sdk-trace-node';
1416
+ * import { LangfuseSpanProcessor } from '@elasticdash/otel';
1417
+ * import { setLangfuseTracerProvider } from '@elasticdash/tracing';
1418
+ *
1419
+ * // Create provider with span processors in constructor
1420
+ * const provider = new NodeTracerProvider({
1421
+ * spanProcessors: [new LangfuseSpanProcessor()]
1422
+ * });
1423
+ *
1424
+ * setLangfuseTracerProvider(provider);
1425
+ *
1426
+ * // Note: Spans created with getLangfuseTracer() may still inherit
1427
+ * // context from spans created with the global tracer
1428
+ * ```
1429
+ *
1430
+ * @param provider - The TracerProvider instance to use, or null to clear the isolated provider
1431
+ * @public
1432
+ */
1433
+ declare function setLangfuseTracerProvider(provider: TracerProvider | null): void;
1434
+ /**
1435
+ * Gets the TracerProvider for Langfuse tracing operations.
1436
+ *
1437
+ * Returns the isolated TracerProvider if one has been set via setLangfuseTracerProvider(),
1438
+ * otherwise falls back to the global OpenTelemetry TracerProvider.
1439
+ *
1440
+ * @example
1441
+ * ```typescript
1442
+ * import { getLangfuseTracerProvider } from '@elasticdash/tracing';
1443
+ *
1444
+ * const provider = getLangfuseTracerProvider();
1445
+ * const tracer = provider.getTracer('my-tracer', '1.0.0');
1446
+ * ```
1447
+ *
1448
+ * @returns The TracerProvider instance to use for Langfuse tracing
1449
+ * @public
1450
+ */
1451
+ declare function getLangfuseTracerProvider(): TracerProvider;
1452
+ /**
1453
+ * Gets the OpenTelemetry tracer instance for Langfuse.
1454
+ *
1455
+ * This function returns a tracer specifically configured for Langfuse
1456
+ * with the correct tracer name and version. Used internally by all
1457
+ * Langfuse tracing functions to ensure consistent trace creation.
1458
+ *
1459
+ * @returns The Langfuse OpenTelemetry tracer instance
1460
+ *
1461
+ * @example
1462
+ * ```typescript
1463
+ * import { getLangfuseTracer } from '@elasticdash/tracing';
1464
+ *
1465
+ * const tracer = getLangfuseTracer();
1466
+ * const span = tracer.startSpan('my-operation');
1467
+ * ```
1468
+ *
1469
+ * @public
1470
+ */
1471
+ declare function getLangfuseTracer(): _opentelemetry_api.Tracer;
1472
+
1473
+ /**
1474
+ * Options for starting observations (spans, generations, events).
1475
+ *
1476
+ * @public
1477
+ */
1478
+ type StartObservationOptions = {
1479
+ /** Custom start time for the observation */
1480
+ startTime?: Date;
1481
+ /** Parent span context to attach this observation to */
1482
+ parentSpanContext?: SpanContext;
1483
+ };
1484
+ /**
1485
+ * Options for starting an observations set to active in context
1486
+ *
1487
+ * Extends StartObservationOptions with additional context-specific configuration.
1488
+ *
1489
+ * @public
1490
+ */
1491
+ type StartActiveObservationContext = StartObservationOptions & {
1492
+ /** Whether to automatically end the observation when exiting the context. Default is true */
1493
+ endOnExit?: boolean;
1494
+ };
1495
+ /**
1496
+ * Options for startObservation function.
1497
+ *
1498
+ * @public
1499
+ */
1500
+ type StartObservationOpts = StartObservationOptions & {
1501
+ /** Type of observation to create. Defaults to 'span' */
1502
+ asType?: LangfuseObservationType;
1503
+ };
1504
+ /**
1505
+ * Options for startActiveObservation function.
1506
+ *
1507
+ * @public
1508
+ */
1509
+ type StartActiveObservationOpts = StartActiveObservationContext & {
1510
+ /** Type of observation to create. Defaults to 'span' */
1511
+ asType?: LangfuseObservationType;
1512
+ };
1513
+ declare function startObservation(name: string, attributes: LangfuseGenerationAttributes, options: StartObservationOpts & {
1514
+ asType: "generation";
1515
+ }): LangfuseGeneration;
1516
+ declare function startObservation(name: string, attributes: LangfuseEventAttributes, options: StartObservationOpts & {
1517
+ asType: "event";
1518
+ }): LangfuseEvent;
1519
+ declare function startObservation(name: string, attributes: LangfuseAgentAttributes, options: StartObservationOpts & {
1520
+ asType: "agent";
1521
+ }): LangfuseAgent;
1522
+ declare function startObservation(name: string, attributes: LangfuseToolAttributes, options: StartObservationOpts & {
1523
+ asType: "tool";
1524
+ }): LangfuseTool;
1525
+ declare function startObservation(name: string, attributes: LangfuseChainAttributes, options: StartObservationOpts & {
1526
+ asType: "chain";
1527
+ }): LangfuseChain;
1528
+ declare function startObservation(name: string, attributes: LangfuseRetrieverAttributes, options: StartObservationOpts & {
1529
+ asType: "retriever";
1530
+ }): LangfuseRetriever;
1531
+ declare function startObservation(name: string, attributes: LangfuseEvaluatorAttributes, options: StartObservationOpts & {
1532
+ asType: "evaluator";
1533
+ }): LangfuseEvaluator;
1534
+ declare function startObservation(name: string, attributes: LangfuseGuardrailAttributes, options: StartObservationOpts & {
1535
+ asType: "guardrail";
1536
+ }): LangfuseGuardrail;
1537
+ declare function startObservation(name: string, attributes: LangfuseEmbeddingAttributes, options: StartObservationOpts & {
1538
+ asType: "embedding";
1539
+ }): LangfuseEmbedding;
1540
+ declare function startObservation(name: string, attributes?: LangfuseSpanAttributes, options?: StartObservationOpts & {
1541
+ asType?: "span";
1542
+ }): LangfuseSpan;
1543
+ declare function startActiveObservation<F extends (generation: LangfuseGeneration) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1544
+ asType: "generation";
1545
+ }): ReturnType<F>;
1546
+ declare function startActiveObservation<F extends (embedding: LangfuseEmbedding) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1547
+ asType: "embedding";
1548
+ }): ReturnType<F>;
1549
+ declare function startActiveObservation<F extends (agent: LangfuseAgent) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1550
+ asType: "agent";
1551
+ }): ReturnType<F>;
1552
+ declare function startActiveObservation<F extends (tool: LangfuseTool) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1553
+ asType: "tool";
1554
+ }): ReturnType<F>;
1555
+ declare function startActiveObservation<F extends (chain: LangfuseChain) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1556
+ asType: "chain";
1557
+ }): ReturnType<F>;
1558
+ declare function startActiveObservation<F extends (retriever: LangfuseRetriever) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1559
+ asType: "retriever";
1560
+ }): ReturnType<F>;
1561
+ declare function startActiveObservation<F extends (evaluator: LangfuseEvaluator) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1562
+ asType: "evaluator";
1563
+ }): ReturnType<F>;
1564
+ declare function startActiveObservation<F extends (guardrail: LangfuseGuardrail) => unknown>(name: string, fn: F, options: StartActiveObservationOpts & {
1565
+ asType: "guardrail";
1566
+ }): ReturnType<F>;
1567
+ declare function startActiveObservation<F extends (span: LangfuseSpan) => unknown>(name: string, fn: F, options?: StartActiveObservationOpts & {
1568
+ asType?: "span";
1569
+ }): ReturnType<F>;
1570
+ /**
1571
+ * Updates the currently active trace with new attributes.
1572
+ *
1573
+ * This function finds the currently active OpenTelemetry span and updates
1574
+ * it with trace-level attributes. If no active span is found, a warning is logged.
1575
+ *
1576
+ * @param attributes - Trace attributes to set
1577
+ *
1578
+ * @example
1579
+ * ```typescript
1580
+ * import { updateActiveTrace } from '@elasticdash/tracing';
1581
+ *
1582
+ * // Inside an active span context
1583
+ * updateActiveTrace({
1584
+ * name: 'user-workflow',
1585
+ * userId: '123',
1586
+ * sessionId: 'session-456',
1587
+ * tags: ['production', 'critical'],
1588
+ * public: true
1589
+ * });
1590
+ * ```
1591
+ *
1592
+ * @public
1593
+ */
1594
+ declare function updateActiveTrace(attributes: LangfuseTraceAttributes): void;
1595
+ /**
1596
+ * Updates the currently active observation with new attributes.
1597
+ *
1598
+ * This function finds the currently active OpenTelemetry span in the execution context
1599
+ * and updates it with Langfuse-specific attributes. It supports all observation types
1600
+ * through TypeScript overloads, providing type safety for attributes based on the
1601
+ * specified `asType` parameter. If no active span exists, the update is skipped with a warning.
1602
+ *
1603
+ * ## Type Safety
1604
+ * - Automatic type inference based on `asType` parameter
1605
+ * - Compile-time validation of attribute compatibility
1606
+ * - IntelliSense support for observation-specific attributes
1607
+ *
1608
+ * ## Context Requirements
1609
+ * - Must be called within an active OpenTelemetry span context
1610
+ * - Typically used inside `startActiveObservation` callbacks or manual span contexts
1611
+ * - Relies on OpenTelemetry's context propagation mechanism
1612
+ *
1613
+ * ## Supported Observation Types
1614
+ * - **span** (default): General-purpose operations and workflows
1615
+ * - **generation**: LLM calls and AI model interactions
1616
+ * - **agent**: AI agent workflows with tool usage
1617
+ * - **tool**: Individual tool calls and API requests
1618
+ * - **chain**: Multi-step processes and pipelines
1619
+ * - **retriever**: Document retrieval and search operations
1620
+ * - **evaluator**: Quality assessment and scoring
1621
+ * - **guardrail**: Safety checks and content filtering
1622
+ * - **embedding**: Text embedding and vector operations
1623
+ *
1624
+ * @param attributes - Observation-specific attributes to update (type varies by observation type)
1625
+ * @param options - Configuration specifying observation type (defaults to 'span')
1626
+ *
1627
+ * @example
1628
+ * ```typescript
1629
+ * import { updateActiveObservation, startActiveObservation } from '@elasticdash/tracing';
1630
+ *
1631
+ * // Update active span (default)
1632
+ * await startActiveObservation('data-processing', async (observation) => {
1633
+ * // Process data...
1634
+ * const result = await processData(inputData);
1635
+ *
1636
+ * // Update with results
1637
+ * updateActiveObservation({
1638
+ * output: { processedRecords: result.count },
1639
+ * metadata: { processingTime: result.duration }
1640
+ * });
1641
+ * });
1642
+ *
1643
+ * // Update active generation
1644
+ * await startActiveObservation('llm-call', async () => {
1645
+ * const response = await openai.chat.completions.create({
1646
+ * model: 'gpt-4',
1647
+ * messages: [{ role: 'user', content: 'Hello' }]
1648
+ * });
1649
+ *
1650
+ * // Update with LLM-specific attributes
1651
+ * updateActiveObservation({
1652
+ * output: response.choices[0].message,
1653
+ * usageDetails: {
1654
+ * promptTokens: response.usage.prompt_tokens,
1655
+ * completionTokens: response.usage.completion_tokens,
1656
+ * totalTokens: response.usage.total_tokens
1657
+ * },
1658
+ * costDetails: {
1659
+ * totalCost: 0.025,
1660
+ * currency: 'USD'
1661
+ * }
1662
+ * }, { asType: 'generation' });
1663
+ * }, {}, { asType: 'generation' });
1664
+ *
1665
+ * // Update active tool execution
1666
+ * await startActiveObservation('web-search', async () => {
1667
+ * const results = await searchAPI('latest news');
1668
+ *
1669
+ * updateActiveObservation({
1670
+ * output: {
1671
+ * results: results,
1672
+ * count: results.length,
1673
+ * relevanceScore: 0.89
1674
+ * },
1675
+ * metadata: {
1676
+ * searchLatency: 150,
1677
+ * cacheHit: false
1678
+ * }
1679
+ * }, { asType: 'tool' });
1680
+ * }, {}, { asType: 'tool' });
1681
+ *
1682
+ * // Update active agent workflow
1683
+ * await startActiveObservation('research-agent', async () => {
1684
+ * // Agent performs multiple operations...
1685
+ * const findings = await conductResearch();
1686
+ *
1687
+ * updateActiveObservation({
1688
+ * output: {
1689
+ * completed: true,
1690
+ * toolsUsed: ['web-search', 'summarizer'],
1691
+ * iterationsRequired: 3,
1692
+ * confidence: 0.92
1693
+ * },
1694
+ * metadata: {
1695
+ * efficiency: 0.85,
1696
+ * qualityScore: 0.88
1697
+ * }
1698
+ * }, { asType: 'agent' });
1699
+ * }, {}, { asType: 'agent' });
1700
+ *
1701
+ * // Update active chain workflow
1702
+ * await startActiveObservation('rag-pipeline', async () => {
1703
+ * // Execute multi-step RAG process...
1704
+ * const finalResponse = await executeRAGPipeline();
1705
+ *
1706
+ * updateActiveObservation({
1707
+ * output: {
1708
+ * finalResponse: finalResponse,
1709
+ * stepsCompleted: 4,
1710
+ * documentsRetrieved: 8,
1711
+ * qualityScore: 0.91
1712
+ * },
1713
+ * metadata: {
1714
+ * pipelineEfficiency: 0.87,
1715
+ * totalLatency: 3200
1716
+ * }
1717
+ * }, { asType: 'chain' });
1718
+ * }, {}, { asType: 'chain' });
1719
+ * ```
1720
+ *
1721
+ * @see {@link startActiveObservation} - For creating active observation contexts
1722
+ * @see {@link updateActiveTrace} - For updating trace-level attributes
1723
+ *
1724
+ * @public
1725
+ */
1726
+ declare function updateActiveObservation(attributes: LangfuseSpanAttributes, options?: {
1727
+ asType: "span";
1728
+ }): void;
1729
+ declare function updateActiveObservation(attributes: LangfuseGenerationAttributes, options: {
1730
+ asType: "generation";
1731
+ }): void;
1732
+ declare function updateActiveObservation(attributes: LangfuseAgentAttributes, options: {
1733
+ asType: "agent";
1734
+ }): void;
1735
+ declare function updateActiveObservation(attributes: LangfuseToolAttributes, options: {
1736
+ asType: "tool";
1737
+ }): void;
1738
+ declare function updateActiveObservation(attributes: LangfuseChainAttributes, options: {
1739
+ asType: "chain";
1740
+ }): void;
1741
+ declare function updateActiveObservation(attributes: LangfuseEmbeddingAttributes, options: {
1742
+ asType: "embedding";
1743
+ }): void;
1744
+ declare function updateActiveObservation(attributes: LangfuseEvaluatorAttributes, options: {
1745
+ asType: "evaluator";
1746
+ }): void;
1747
+ declare function updateActiveObservation(attributes: LangfuseGuardrailAttributes, options: {
1748
+ asType: "guardrail";
1749
+ }): void;
1750
+ declare function updateActiveObservation(attributes: LangfuseRetrieverAttributes, options: {
1751
+ asType: "retriever";
1752
+ }): void;
1753
+ /**
1754
+ * Options for the observe decorator function.
1755
+ *
1756
+ * @public
1757
+ */
1758
+ interface ObserveOptions {
1759
+ /** Name for the observation (defaults to function name) */
1760
+ name?: string;
1761
+ /** Type of observation to create */
1762
+ asType?: LangfuseObservationType;
1763
+ /** Whether to capture function input as observation input */
1764
+ captureInput?: boolean;
1765
+ /** Whether to capture function output as observation output */
1766
+ captureOutput?: boolean;
1767
+ /** Parent span context to attach this observation to */
1768
+ parentSpanContext?: SpanContext;
1769
+ /** Whether to automatically end the observation when exiting the context. Default is true */
1770
+ endOnExit?: boolean;
1771
+ }
1772
+ /**
1773
+ * Decorator function that automatically wraps any function with Langfuse observability.
1774
+ *
1775
+ * This higher-order function creates a traced version of your function that automatically
1776
+ * handles observation lifecycle, input/output capture, and error tracking. It's perfect
1777
+ * for instrumenting existing functions without modifying their internal logic.
1778
+ *
1779
+ * ## Key Features
1780
+ * - **Zero Code Changes**: Wrap existing functions without modifying their implementation
1781
+ * - **Automatic I/O Capture**: Optionally captures function arguments and return values
1782
+ * - **Error Tracking**: Automatically captures exceptions and sets error status
1783
+ * - **Type Preservation**: Maintains original function signature and return types
1784
+ * - **Async Support**: Works seamlessly with both sync and async functions
1785
+ * - **Flexible Configuration**: Control observation type, naming, and capture behavior
1786
+ *
1787
+ * ## Use Cases
1788
+ * - Instrumenting business logic functions
1789
+ * - Wrapping API calls and external service interactions
1790
+ * - Adding observability to utility functions
1791
+ * - Creating traced versions of third-party functions
1792
+ * - Decorating class methods for observability
1793
+ *
1794
+ * @param fn - The function to wrap with observability (preserves original signature)
1795
+ * @param options - Configuration for observation behavior and capture settings
1796
+ * @returns An instrumented version of the function with identical behavior plus tracing
1797
+ *
1798
+ * @example
1799
+ * ```typescript
1800
+ * import { observe } from '@elasticdash/tracing';
1801
+ *
1802
+ * // Basic function wrapping with automatic I/O capture
1803
+ * const processOrder = observe(
1804
+ * async (orderId: string, items: CartItem[]) => {
1805
+ * const validation = await validateOrder(orderId, items);
1806
+ * const payment = await processPayment(validation);
1807
+ * const shipping = await scheduleShipping(payment);
1808
+ * return { orderId, status: 'confirmed', trackingId: shipping.id };
1809
+ * },
1810
+ * {
1811
+ * name: 'process-order',
1812
+ * asType: 'span',
1813
+ * captureInput: true,
1814
+ * captureOutput: true
1815
+ * }
1816
+ * );
1817
+ *
1818
+ * // LLM function with generation tracking
1819
+ * const generateSummary = observe(
1820
+ * async (document: string, maxWords: number = 100) => {
1821
+ * const response = await openai.chat.completions.create({
1822
+ * model: 'gpt-4-turbo',
1823
+ * messages: [
1824
+ * { role: 'system', content: `Summarize in ${maxWords} words or less` },
1825
+ * { role: 'user', content: document }
1826
+ * ],
1827
+ * max_tokens: maxWords * 2
1828
+ * });
1829
+ * return response.choices[0].message.content;
1830
+ * },
1831
+ * {
1832
+ * name: 'document-summarizer',
1833
+ * asType: 'generation',
1834
+ * captureInput: true,
1835
+ * captureOutput: true
1836
+ * }
1837
+ * );
1838
+ *
1839
+ * // Database query with automatic error tracking
1840
+ * const fetchUserProfile = observe(
1841
+ * async (userId: string) => {
1842
+ * const user = await db.users.findUnique({ where: { id: userId } });
1843
+ * if (!user) throw new Error(`User ${userId} not found`);
1844
+ *
1845
+ * const preferences = await db.preferences.findMany({
1846
+ * where: { userId }
1847
+ * });
1848
+ *
1849
+ * return { ...user, preferences };
1850
+ * },
1851
+ * {
1852
+ * name: 'fetch-user-profile',
1853
+ * asType: 'span',
1854
+ * captureInput: false, // Don't capture sensitive user IDs
1855
+ * captureOutput: true
1856
+ * }
1857
+ * );
1858
+ *
1859
+ * // Vector search with retriever semantics
1860
+ * const searchDocuments = observe(
1861
+ * async (query: string, topK: number = 5) => {
1862
+ * const embedding = await embedText(query);
1863
+ * const results = await vectorDb.search(embedding, topK);
1864
+ * return results.map(r => ({
1865
+ * content: r.metadata.content,
1866
+ * score: r.score,
1867
+ * source: r.metadata.source
1868
+ * }));
1869
+ * },
1870
+ * {
1871
+ * name: 'document-search',
1872
+ * asType: 'retriever',
1873
+ * captureInput: true,
1874
+ * captureOutput: true
1875
+ * }
1876
+ * );
1877
+ *
1878
+ * // Quality evaluation function
1879
+ * const evaluateResponse = observe(
1880
+ * (response: string, reference: string, metric: string = 'similarity') => {
1881
+ * let score: number;
1882
+ *
1883
+ * switch (metric) {
1884
+ * case 'similarity':
1885
+ * score = calculateCosineSimilarity(response, reference);
1886
+ * break;
1887
+ * case 'bleu':
1888
+ * score = calculateBleuScore(response, reference);
1889
+ * break;
1890
+ * default:
1891
+ * throw new Error(`Unknown metric: ${metric}`);
1892
+ * }
1893
+ *
1894
+ * return {
1895
+ * score,
1896
+ * passed: score > 0.8,
1897
+ * metric,
1898
+ * grade: score > 0.9 ? 'excellent' : score > 0.7 ? 'good' : 'needs_improvement'
1899
+ * };
1900
+ * },
1901
+ * {
1902
+ * name: 'response-evaluator',
1903
+ * asType: 'evaluator',
1904
+ * captureInput: true,
1905
+ * captureOutput: true
1906
+ * }
1907
+ * );
1908
+ *
1909
+ * // Content moderation with guardrails
1910
+ * const moderateContent = observe(
1911
+ * async (text: string, policies: string[] = ['profanity', 'spam']) => {
1912
+ * const violations = [];
1913
+ *
1914
+ * for (const policy of policies) {
1915
+ * const result = await checkPolicy(text, policy);
1916
+ * if (result.violation) {
1917
+ * violations.push({ policy, severity: result.severity });
1918
+ * }
1919
+ * }
1920
+ *
1921
+ * return {
1922
+ * allowed: violations.length === 0,
1923
+ * violations,
1924
+ * confidence: 0.95
1925
+ * };
1926
+ * },
1927
+ * {
1928
+ * name: 'content-moderator',
1929
+ * asType: 'guardrail',
1930
+ * captureInput: true,
1931
+ * captureOutput: true
1932
+ * }
1933
+ * );
1934
+ *
1935
+ * // AI agent function with tool usage
1936
+ * const researchAgent = observe(
1937
+ * async (query: string, maxSources: number = 3) => {
1938
+ * // Search for relevant documents
1939
+ * const documents = await searchDocuments(query, maxSources * 2);
1940
+ *
1941
+ * // Filter and rank results
1942
+ * const topDocs = documents
1943
+ * .filter(d => d.score > 0.7)
1944
+ * .slice(0, maxSources);
1945
+ *
1946
+ * // Generate comprehensive answer
1947
+ * const context = topDocs.map(d => d.content).join('\n\n');
1948
+ * const answer = await generateSummary(
1949
+ * `Based on: ${context}\n\nQuestion: ${query}`,
1950
+ * 200
1951
+ * );
1952
+ *
1953
+ * return {
1954
+ * answer,
1955
+ * sources: topDocs.map(d => d.source),
1956
+ * confidence: Math.min(...topDocs.map(d => d.score))
1957
+ * };
1958
+ * },
1959
+ * {
1960
+ * name: 'research-agent',
1961
+ * asType: 'agent',
1962
+ * captureInput: true,
1963
+ * captureOutput: true
1964
+ * }
1965
+ * );
1966
+ *
1967
+ * // Class method decoration
1968
+ * class UserService {
1969
+ * private db: Database;
1970
+ *
1971
+ * // Wrap methods during class construction
1972
+ * constructor(database: Database) {
1973
+ * this.db = database;
1974
+ * this.createUser = observe(this.createUser.bind(this), {
1975
+ * name: 'create-user',
1976
+ * asType: 'span',
1977
+ * captureInput: false, // Sensitive data
1978
+ * captureOutput: true
1979
+ * });
1980
+ * }
1981
+ *
1982
+ * async createUser(userData: UserData) {
1983
+ * // Implementation automatically traced
1984
+ * return await this.db.users.create(userData);
1985
+ * }
1986
+ * }
1987
+ *
1988
+ * // Chain composition - functions remain composable
1989
+ * const processDocument = observe(
1990
+ * async (document: string) => {
1991
+ * const summary = await generateSummary(document, 150);
1992
+ * const moderation = await moderateContent(summary);
1993
+ * const evaluation = evaluateResponse(summary, document, 'similarity');
1994
+ *
1995
+ * return {
1996
+ * summary: moderation.allowed ? summary : '[Content Filtered]',
1997
+ * safe: moderation.allowed,
1998
+ * quality: evaluation.score
1999
+ * };
2000
+ * },
2001
+ * {
2002
+ * name: 'document-processor',
2003
+ * asType: 'chain',
2004
+ * captureInput: true,
2005
+ * captureOutput: true
2006
+ * }
2007
+ * );
2008
+ *
2009
+ * // Usage - functions work exactly as before, just with observability
2010
+ * const order = await processOrder('ord_123', cartItems);
2011
+ * const profile = await fetchUserProfile('user_456');
2012
+ * const research = await researchAgent('What is quantum computing?');
2013
+ * const processed = await processDocument(documentText);
2014
+ * ```
2015
+ *
2016
+ * @see {@link startObservation} for manual observation creation
2017
+ * @see {@link startActiveObservation} for function-scoped observations
2018
+ *
2019
+ * @public
2020
+ */
2021
+ declare function observe<T extends (...args: any[]) => any>(fn: T, options?: ObserveOptions): T;
2022
+ /**
2023
+ * Creates a trace ID for OpenTelemetry spans.
2024
+ *
2025
+ * @param seed - A seed string for deterministic trace ID generation.
2026
+ * If provided (non-empty), the same seed will always generate the same trace ID.
2027
+ * If empty or falsy, generates a random trace ID.
2028
+ *
2029
+ * Using a seed is especially useful when trying to correlate external,
2030
+ * non-W3C compliant IDs with Langfuse trace IDs. This allows you to later
2031
+ * have a method available for scoring the Langfuse trace given only the
2032
+ * external ID by regenerating the same trace ID from the external ID.
2033
+ *
2034
+ * @returns A Promise that resolves to a 32-character lowercase hexadecimal string suitable for use as an OpenTelemetry trace ID.
2035
+ *
2036
+ * @example
2037
+ * ```typescript
2038
+ * // Deterministic trace ID from seed
2039
+ * const traceId1 = await createTraceId("my-session-123");
2040
+ * const traceId2 = await createTraceId("my-session-123");
2041
+ * console.log(traceId1 === traceId2); // true
2042
+ *
2043
+ * // Random trace ID
2044
+ * const randomId1 = await createTraceId("");
2045
+ * const randomId2 = await createTraceId("");
2046
+ * console.log(randomId1 === randomId2); // false
2047
+ *
2048
+ * // Use with spans
2049
+ * const span = startObservation("my-span", {}, {
2050
+ * parentSpanContext: {
2051
+ * traceId: await createTraceId("session-456"),
2052
+ * spanId: "0123456789abcdef",
2053
+ * traceFlags: 1
2054
+ * }
2055
+ * });
2056
+ *
2057
+ * // Correlating external IDs with Langfuse traces
2058
+ * const externalId = "ext-12345-67890";
2059
+ * const traceId = await createTraceId(externalId);
2060
+ *
2061
+ * // Later, when you need to score this trace, regenerate the same ID
2062
+ * const scoringTraceId = await createTraceId(externalId);
2063
+ * console.log(traceId === scoringTraceId); // true - can now find and score the trace
2064
+ * ```
2065
+ *
2066
+ * @public
2067
+ */
2068
+ declare function createTraceId(seed?: string): Promise<string>;
2069
+ /**
2070
+ * Gets the current active trace ID.
2071
+ *
2072
+ * If there is no span in the current context, returns undefined.
2073
+ *
2074
+ * @returns The trace ID of the currently active span, or undefined if no span is active
2075
+ *
2076
+ * @public
2077
+ */
2078
+ declare function getActiveTraceId(): string | undefined;
2079
+ /**
2080
+ * Gets the current active observation ID.
2081
+ *
2082
+ * If there is no OTEL span in the current context, returns undefined.
2083
+ *
2084
+ * @returns The ID of the currently active OTEL span, or undefined if no OTEL span is active
2085
+ *
2086
+ * @public
2087
+ */
2088
+ declare function getActiveSpanId(): string | undefined;
2089
+
2090
+ export { LangfuseAgent, LangfuseChain, LangfuseEmbedding, LangfuseEvaluator, LangfuseEvent, type LangfuseEventAttributes, LangfuseGeneration, type LangfuseGenerationAttributes, LangfuseGuardrail, type LangfuseObservation, type LangfuseObservationAttributes, type LangfuseObservationType, LangfuseRetriever, LangfuseSpan, type LangfuseSpanAttributes, LangfuseTool, type LangfuseTraceAttributes, type ObservationLevel, type ObserveOptions, type StartActiveObservationContext, type StartActiveObservationOpts, type StartObservationOptions, type StartObservationOpts, createObservationAttributes, createTraceAttributes, createTraceId, getActiveSpanId, getActiveTraceId, getLangfuseTracer, getLangfuseTracerProvider, observe, setLangfuseTracerProvider, startActiveObservation, startObservation, updateActiveObservation, updateActiveTrace };