@mastra/mcp-docs-server 0.13.34 → 0.13.35-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +12 -12
  2. package/.docs/organized/changelogs/%40mastra%2Fcore.md +31 -31
  3. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +9 -9
  4. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +9 -9
  5. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +9 -9
  6. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +9 -9
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +19 -19
  8. package/.docs/organized/changelogs/%40mastra%2Flance.md +10 -10
  9. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +8 -8
  10. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +11 -11
  11. package/.docs/organized/changelogs/%40mastra%2Fpg.md +10 -10
  12. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +16 -16
  13. package/.docs/organized/changelogs/%40mastra%2Freact.md +10 -10
  14. package/.docs/organized/changelogs/%40mastra%2Fserver.md +14 -14
  15. package/.docs/organized/changelogs/create-mastra.md +3 -3
  16. package/.docs/organized/changelogs/mastra.md +9 -9
  17. package/.docs/organized/code-examples/memory-with-mongodb.md +208 -0
  18. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  19. package/.docs/raw/getting-started/studio.mdx +4 -4
  20. package/.docs/raw/memory/overview.mdx +1 -1
  21. package/.docs/raw/memory/semantic-recall.mdx +4 -3
  22. package/.docs/raw/memory/storage/memory-with-libsql.mdx +141 -0
  23. package/.docs/raw/memory/storage/memory-with-pg.mdx +138 -0
  24. package/.docs/raw/memory/storage/memory-with-upstash.mdx +147 -0
  25. package/.docs/raw/observability/ai-tracing/exporters/arize.mdx +201 -0
  26. package/.docs/raw/observability/ai-tracing/overview.mdx +12 -8
  27. package/.docs/raw/reference/observability/ai-tracing/exporters/arize.mdx +160 -0
  28. package/.docs/raw/reference/observability/ai-tracing/exporters/braintrust.mdx +2 -2
  29. package/.docs/raw/reference/observability/ai-tracing/exporters/langfuse.mdx +1 -1
  30. package/.docs/raw/reference/observability/ai-tracing/exporters/langsmith.mdx +2 -2
  31. package/.docs/raw/reference/observability/ai-tracing/exporters/otel.mdx +1 -1
  32. package/.docs/raw/reference/observability/ai-tracing/interfaces.mdx +48 -21
  33. package/.docs/raw/reference/storage/mongodb.mdx +146 -0
  34. package/.docs/raw/server-db/storage.mdx +1 -0
  35. package/.docs/raw/workflows/agents-and-tools.mdx +15 -1
  36. package/.docs/raw/workflows/human-in-the-loop.mdx +268 -0
  37. package/CHANGELOG.md +7 -0
  38. package/package.json +11 -4
@@ -0,0 +1,201 @@
1
+ ---
2
+ title: "Arize Exporter | AI Tracing | Observability | Mastra Docs"
3
+ description: "Send AI traces to Arize Phoenix or Arize AX using OpenTelemetry and OpenInference"
4
+ ---
5
+
6
+ import { Callout } from "nextra/components";
7
+
8
+ # Arize Exporter
9
+
10
+ [Arize](https://arize.com/) provides observability platforms for AI applications through [Phoenix](https://phoenix.arize.com/) (open-source) and [Arize AX](https://arize.com/generative-ai/) (enterprise). The Arize exporter sends AI traces using OpenTelemetry and [OpenInference](https://github.com/Arize-ai/openinference/tree/main/spec) semantic conventions, compatible with any OpenTelemetry platform that supports OpenInference.
11
+
12
+ ## When to Use Arize
13
+
14
+ Arize is ideal when you need:
15
+ - **OpenInference standards** - Industry-standard semantic conventions for AI traces
16
+ - **Flexible deployment** - Self-hosted Phoenix or managed Arize AX
17
+ - **OpenTelemetry compatibility** - Works with any OTLP-compatible platform
18
+ - **Comprehensive AI observability** - LLM traces, embeddings, and retrieval analytics
19
+ - **Open-source option** - Full-featured local deployment with Phoenix
20
+
21
+ ## Installation
22
+
23
+ ```bash npm2yarn
24
+ npm install @mastra/arize
25
+ ```
26
+
27
+ ## Configuration
28
+
29
+ ### Phoenix Setup
30
+
31
+ Phoenix is an open-source observability platform that can be self-hosted or used via Phoenix Cloud.
32
+
33
+ #### Prerequisites
34
+
35
+ 1. **Phoenix Instance**: Deploy using Docker or sign up at [Phoenix Cloud](https://app.phoenix.arize.com/login)
36
+ 2. **Endpoint**: Your Phoenix endpoint URL (ends in `/v1/traces`)
37
+ 3. **API Key**: Optional for unauthenticated instances, required for Phoenix Cloud
38
+ 4. **Environment Variables**: Set your configuration
39
+
40
+ ```bash filename=".env"
41
+ PHOENIX_ENDPOINT=http://localhost:6006/v1/traces # Or your Phoenix Cloud URL
42
+ PHOENIX_API_KEY=your-api-key # Optional for local instances
43
+ PHOENIX_PROJECT_NAME=mastra-service # Optional, defaults to 'mastra-service'
44
+ ```
45
+
46
+ #### Basic Setup
47
+
48
+ ```typescript filename="src/mastra/index.ts"
49
+ import { Mastra } from "@mastra/core";
50
+ import { ArizeExporter } from "@mastra/arize";
51
+
52
+ export const mastra = new Mastra({
53
+ observability: {
54
+ configs: {
55
+ arize: {
56
+ serviceName: process.env.PHOENIX_PROJECT_NAME || 'mastra-service',
57
+ exporters: [
58
+ new ArizeExporter({
59
+ endpoint: process.env.PHOENIX_ENDPOINT!,
60
+ apiKey: process.env.PHOENIX_API_KEY,
61
+ projectName: process.env.PHOENIX_PROJECT_NAME,
62
+ }),
63
+ ],
64
+ },
65
+ },
66
+ },
67
+ });
68
+ ```
69
+
70
+ <Callout type="info">
71
+ **Quick Start with Docker**
72
+
73
+ Test locally with an in-memory Phoenix instance:
74
+
75
+ ```bash
76
+ docker run --pull=always -d --name arize-phoenix -p 6006:6006 \
77
+ -e PHOENIX_SQL_DATABASE_URL="sqlite:///:memory:" \
78
+ arizephoenix/phoenix:latest
79
+ ```
80
+
81
+ Set `PHOENIX_ENDPOINT=http://localhost:6006/v1/traces` and run your Mastra agent to see traces at [localhost:6006](http://localhost:6006).
82
+ </Callout>
83
+
84
+ ### Arize AX Setup
85
+
86
+ Arize AX is an enterprise observability platform with advanced features for production AI systems.
87
+
88
+ #### Prerequisites
89
+
90
+ 1. **Arize AX Account**: Sign up at [app.arize.com](https://app.arize.com/)
91
+ 2. **Space ID**: Your organization's space identifier
92
+ 3. **API Key**: Generate in Arize AX settings
93
+ 4. **Environment Variables**: Set your credentials
94
+
95
+ ```bash filename=".env"
96
+ ARIZE_SPACE_ID=your-space-id
97
+ ARIZE_API_KEY=your-api-key
98
+ ARIZE_PROJECT_NAME=mastra-service # Optional
99
+ ```
100
+
101
+ #### Basic Setup
102
+
103
+ ```typescript filename="src/mastra/index.ts"
104
+ import { Mastra } from "@mastra/core";
105
+ import { ArizeExporter } from "@mastra/arize";
106
+
107
+ export const mastra = new Mastra({
108
+ observability: {
109
+ configs: {
110
+ arize: {
111
+ serviceName: process.env.ARIZE_PROJECT_NAME || 'mastra-service',
112
+ exporters: [
113
+ new ArizeExporter({
114
+ apiKey: process.env.ARIZE_API_KEY!,
115
+ spaceId: process.env.ARIZE_SPACE_ID!,
116
+ projectName: process.env.ARIZE_PROJECT_NAME,
117
+ }),
118
+ ],
119
+ },
120
+ },
121
+ },
122
+ });
123
+ ```
124
+
125
+ ## Configuration Options
126
+
127
+ The Arize exporter supports advanced configuration for fine-tuning OpenTelemetry behavior:
128
+
129
+ ### Complete Configuration
130
+
131
+ ```typescript
132
+ new ArizeExporter({
133
+ // Phoenix Configuration
134
+ endpoint: 'https://your-collector.example.com/v1/traces', // Required for Phoenix
135
+
136
+ // Arize AX Configuration
137
+ spaceId: 'your-space-id', // Required for Arize AX
138
+
139
+ // Shared Configuration
140
+ apiKey: 'your-api-key', // Required for authenticated endpoints
141
+ projectName: 'mastra-service', // Optional project name
142
+
143
+ // Optional OTLP settings
144
+ headers: {
145
+ 'x-custom-header': 'value', // Additional headers for OTLP requests
146
+ },
147
+
148
+ // Debug and performance tuning
149
+ logLevel: 'debug', // Logging: debug | info | warn | error
150
+ batchSize: 512, // Batch size before exporting spans
151
+ timeout: 30000, // Timeout in ms before exporting spans
152
+
153
+ // Custom resource attributes
154
+ resourceAttributes: {
155
+ 'deployment.environment': process.env.NODE_ENV,
156
+ 'service.version': process.env.APP_VERSION,
157
+ },
158
+ })
159
+ ```
160
+
161
+ ### Batch Processing Options
162
+
163
+ Control how traces are batched and exported:
164
+
165
+ ```typescript
166
+ new ArizeExporter({
167
+ endpoint: process.env.PHOENIX_ENDPOINT!,
168
+ apiKey: process.env.PHOENIX_API_KEY,
169
+
170
+ // Batch processing configuration
171
+ batchSize: 512, // Number of spans to batch (default: 512)
172
+ timeout: 30000, // Max time in ms to wait before export (default: 30000)
173
+ })
174
+ ```
175
+
176
+ ### Resource Attributes
177
+
178
+ Add custom attributes to all exported spans:
179
+
180
+ ```typescript
181
+ new ArizeExporter({
182
+ endpoint: process.env.PHOENIX_ENDPOINT!,
183
+ resourceAttributes: {
184
+ 'deployment.environment': process.env.NODE_ENV,
185
+ 'service.namespace': 'production',
186
+ 'service.instance.id': process.env.HOSTNAME,
187
+ 'custom.attribute': 'value',
188
+ },
189
+ })
190
+ ```
191
+
192
+ ## OpenInference Semantic Conventions
193
+
194
+ This exporter implements the [OpenInference Semantic Conventions](https://github.com/Arize-ai/openinference/tree/main/spec) for generative AI applications, providing standardized trace structure across different observability platforms.
195
+
196
+ ## Related
197
+
198
+ - [AI Tracing Overview](/docs/observability/ai-tracing/overview)
199
+ - [Phoenix Documentation](https://docs.arize.com/phoenix)
200
+ - [Arize AX Documentation](https://docs.arize.com/)
201
+ - [OpenInference Specification](https://github.com/Arize-ai/openinference/tree/main/spec)
@@ -89,14 +89,13 @@ Mastra provides two built-in exporters that work out of the box:
89
89
 
90
90
  In addition to the internal exporters, Mastra supports integration with popular observability platforms. These exporters allow you to leverage your existing monitoring infrastructure and take advantage of platform-specific features like alerting, dashboards, and correlation with other application metrics.
91
91
 
92
+ - **[Arize](/docs/observability/ai-tracing/exporters/arize)** - Exports traces to Arize Phoenix or Arize AX using OpenInference semantic conventions
92
93
  - **[Braintrust](/docs/observability/ai-tracing/exporters/braintrust)** - Exports traces to Braintrust's eval and observability platform
93
94
  - **[Langfuse](/docs/observability/ai-tracing/exporters/langfuse)** - Sends traces to the Langfuse open-source LLM engineering platform
94
- - **[LangSmith](/docs/observability/ai-tracing/exporters/langsmith)** - Pushes traces into LangSmiths observability and evaluation toolkit
95
+ - **[LangSmith](/docs/observability/ai-tracing/exporters/langsmith)** - Pushes traces into LangSmith's observability and evaluation toolkit
95
96
  - **[OpenTelemetry](/docs/observability/ai-tracing/exporters/otel)** - Deliver traces to any OpenTelemetry-compatible observability system
96
97
  - Supports: Dash0, Laminar, New Relic, SigNoz, Traceloop, Zipkin, and others!
97
98
 
98
- - **Arize** - Coming soon!
99
-
100
99
  ## Sampling Strategies
101
100
 
102
101
  Sampling allows you to control which traces are collected, helping you balance between observability needs and resource costs. In production environments with high traffic, collecting every trace can be expensive and unnecessary. Sampling strategies let you capture a representative subset of traces while ensuring you don't miss critical information about errors or important operations.
@@ -311,7 +310,7 @@ When creating a custom config with external exporters, you might lose access to
311
310
 
312
311
  ```ts filename="src/mastra/index.ts" showLineNumbers copy
313
312
  import { DefaultExporter, CloudExporter } from '@mastra/core/ai-tracing';
314
- import { LangfuseExporter } from '@mastra/langfuse';
313
+ import { ArizeExporter } from '@mastra/arize';
315
314
 
316
315
  export const mastra = new Mastra({
317
316
  observability: {
@@ -320,7 +319,10 @@ export const mastra = new Mastra({
320
319
  production: {
321
320
  serviceName: 'my-service',
322
321
  exporters: [
323
- new LangfuseExporter(), // External exporter
322
+ new ArizeExporter({ // External exporter
323
+ endpoint: process.env.PHOENIX_ENDPOINT,
324
+ apiKey: process.env.PHOENIX_API_KEY,
325
+ }),
324
326
  new DefaultExporter(), // Keep Playground access
325
327
  new CloudExporter(), // Keep Cloud access
326
328
  ],
@@ -331,7 +333,7 @@ export const mastra = new Mastra({
331
333
  ```
332
334
 
333
335
  This configuration sends traces to all three destinations simultaneously:
334
- - **Langfuse** for external observability
336
+ - **Arize Phoenix/AX** for external observability
335
337
  - **DefaultExporter** for local Playground access
336
338
  - **CloudExporter** for Mastra Cloud dashboard
337
339
 
@@ -759,8 +761,9 @@ Traces are available in multiple locations:
759
761
 
760
762
  - **Mastra Playground** - Local development environment
761
763
  - **Mastra Cloud** - Production monitoring dashboard
762
- - **Langfuse Dashboard** - When using Langfuse exporter
764
+ - **Arize Phoenix / Arize AX** - When using Arize exporter
763
765
  - **Braintrust Console** - When using Braintrust exporter
766
+ - **Langfuse Dashboard** - When using Langfuse exporter
764
767
 
765
768
  ## See Also
766
769
 
@@ -777,8 +780,9 @@ Traces are available in multiple locations:
777
780
  - [DefaultExporter](/reference/observability/ai-tracing/exporters/default-exporter) - Storage persistence
778
781
  - [CloudExporter](/reference/observability/ai-tracing/exporters/cloud-exporter) - Mastra Cloud integration
779
782
  - [ConsoleExporter](/reference/observability/ai-tracing/exporters/console-exporter) - Debug output
780
- - [Langfuse](/reference/observability/ai-tracing/exporters/langfuse) - Langfuse integration
783
+ - [Arize](/reference/observability/ai-tracing/exporters/arize) - Arize Phoenix and Arize AX integration
781
784
  - [Braintrust](/reference/observability/ai-tracing/exporters/braintrust) - Braintrust integration
785
+ - [Langfuse](/reference/observability/ai-tracing/exporters/langfuse) - Langfuse integration
782
786
  - [OpenTelemetry](/reference/observability/ai-tracing/exporters/otel) - OTEL-compatible platforms
783
787
 
784
788
  ### Processors
@@ -0,0 +1,160 @@
1
+ ---
2
+ title: "ArizeExporter | Exporters | AI Tracing | Reference"
3
+ description: Arize exporter for AI tracing using OpenInference
4
+ ---
5
+
6
+ import { PropertiesTable } from "@/components/properties-table";
7
+
8
+ # ArizeExporter
9
+
10
+ Sends AI tracing data to Arize Phoenix, Arize AX, or any OpenTelemetry-compatible observability platform that supports OpenInference semantic conventions.
11
+
12
+ ## Constructor
13
+
14
+ ```typescript
15
+ new ArizeExporter(config: ArizeExporterConfig)
16
+ ```
17
+
18
+ ## ArizeExporterConfig
19
+
20
+ ```typescript
21
+ interface ArizeExporterConfig {
22
+ // Phoenix / OpenTelemetry configuration
23
+ endpoint?: string;
24
+ apiKey?: string;
25
+
26
+ // Arize AX configuration
27
+ spaceId?: string;
28
+
29
+ // Common configuration
30
+ projectName?: string;
31
+ headers?: Record<string, string>;
32
+
33
+ // Inherited from OtelExporterConfig
34
+ timeout?: number;
35
+ batchSize?: number;
36
+ logLevel?: 'debug' | 'info' | 'warn' | 'error';
37
+ resourceAttributes?: Record<string, any>;
38
+ }
39
+ ```
40
+
41
+ <PropertiesTable
42
+ props={[
43
+ {
44
+ name: "endpoint",
45
+ type: "string",
46
+ description: "Collector endpoint for trace exports (e.g., 'http://localhost:6006/v1/traces' for Phoenix). Required for Phoenix. Optional for Arize AX (defaults to https://otlp.arize.com/v1/traces).",
47
+ required: false,
48
+ },
49
+ {
50
+ name: "apiKey",
51
+ type: "string",
52
+ description: "API key for authentication. Required for Phoenix Cloud and Arize AX. Optional for self-hosted Phoenix.",
53
+ required: false,
54
+ },
55
+ {
56
+ name: "spaceId",
57
+ type: "string",
58
+ description: "Arize AX space identifier. Required when sending traces to Arize AX.",
59
+ required: false,
60
+ },
61
+ {
62
+ name: "projectName",
63
+ type: "string",
64
+ description: "Project name added as OpenInference resource attribute",
65
+ required: false,
66
+ },
67
+ {
68
+ name: "headers",
69
+ type: "Record<string, string>",
70
+ description: "Additional headers for OTLP requests",
71
+ required: false,
72
+ },
73
+ {
74
+ name: "timeout",
75
+ type: "number",
76
+ description: "Timeout in milliseconds before exporting spans (default: 30000)",
77
+ required: false,
78
+ },
79
+ {
80
+ name: "batchSize",
81
+ type: "number",
82
+ description: "Number of spans to batch before export (default: 512)",
83
+ required: false,
84
+ },
85
+ {
86
+ name: "logLevel",
87
+ type: "'debug' | 'info' | 'warn' | 'error'",
88
+ description: "Logger level (default: 'warn')",
89
+ required: false,
90
+ },
91
+ {
92
+ name: "resourceAttributes",
93
+ type: "Record<string, any>",
94
+ description: "Custom resource attributes added to each span",
95
+ required: false,
96
+ },
97
+ ]}
98
+ />
99
+
100
+ ## Methods
101
+
102
+ ### exportEvent
103
+
104
+ ```typescript
105
+ async exportEvent(event: AITracingEvent): Promise<void>
106
+ ```
107
+
108
+ Exports a tracing event to the configured endpoint.
109
+
110
+ ### export
111
+
112
+ ```typescript
113
+ async export(spans: ReadOnlyAISpan[]): Promise<void>
114
+ ```
115
+
116
+ Batch exports spans using OpenTelemetry with OpenInference semantic conventions.
117
+
118
+ ### shutdown
119
+
120
+ ```typescript
121
+ async shutdown(): Promise<void>
122
+ ```
123
+
124
+ Flushes pending data and shuts down the client.
125
+
126
+ ## Usage
127
+
128
+ ### Phoenix Configuration
129
+
130
+ ```typescript
131
+ import { ArizeExporter } from '@mastra/arize';
132
+
133
+ const exporter = new ArizeExporter({
134
+ endpoint: 'http://localhost:6006/v1/traces',
135
+ apiKey: process.env.PHOENIX_API_KEY, // Optional for local Phoenix
136
+ projectName: 'my-ai-project',
137
+ });
138
+ ```
139
+
140
+ ### Arize AX Configuration
141
+
142
+ ```typescript
143
+ import { ArizeExporter } from '@mastra/arize';
144
+
145
+ const exporter = new ArizeExporter({
146
+ spaceId: process.env.ARIZE_SPACE_ID!,
147
+ apiKey: process.env.ARIZE_API_KEY!,
148
+ projectName: 'my-ai-project',
149
+ });
150
+ ```
151
+
152
+ ## OpenInference Semantic Conventions
153
+
154
+ The ArizeExporter implements [OpenInference Semantic Conventions](https://github.com/Arize-ai/openinference/tree/main/spec) for generative AI applications, providing standardized trace structure across different observability platforms.
155
+
156
+ ## Related
157
+
158
+ - [ArizeExporter Documentation](/docs/observability/ai-tracing/exporters/arize)
159
+ - [Phoenix Documentation](https://docs.arize.com/phoenix)
160
+ - [Arize AX Documentation](https://docs.arize.com/)
@@ -103,8 +103,8 @@ const exporter = new BraintrustExporter({
103
103
 
104
104
  | AI Span Type | Braintrust Type |
105
105
  |--------------|-----------------|
106
- | `LLM_GENERATION` | `llm` |
107
- | `LLM_CHUNK` | `llm` |
106
+ | `MODEL_GENERATION` | `llm` |
107
+ | `MODEL_CHUNK` | `llm` |
108
108
  | `TOOL_CALL` | `tool` |
109
109
  | `MCP_TOOL_CALL` | `tool` |
110
110
  | `WORKFLOW_CONDITIONAL_EVAL` | `function` |
@@ -111,6 +111,6 @@ const exporter = new LangfuseExporter({
111
111
  ## Span Mapping
112
112
 
113
113
  - Root spans → Langfuse traces
114
- - `LLM_GENERATION` spans → Langfuse generations
114
+ - `MODEL_GENERATION` spans → Langfuse generations
115
115
  - All other spans → Langfuse spans
116
116
  - Event spans → Langfuse events
@@ -105,8 +105,8 @@ const exporter = new LangSmithExporter({
105
105
 
106
106
  | AI Span Type | LangSmith Type |
107
107
  |--------------|----------------|
108
- | `LLM_GENERATION` | `llm` |
109
- | `LLM_CHUNK` | `llm` |
108
+ | `MODEL_GENERATION` | `llm` |
109
+ | `MODEL_CHUNK` | `llm` |
110
110
  | `TOOL_CALL` | `tool` |
111
111
  | `MCP_TOOL_CALL` | `tool` |
112
112
  | All others | `chain` |
@@ -317,7 +317,7 @@ const exporter = new OtelExporter({
317
317
  The exporter maps Mastra AI spans to OpenTelemetry spans following GenAI semantic conventions:
318
318
 
319
319
  ### Span Names
320
- - `LLM_GENERATION` → `chat {model}` or `tool_selection {model}`
320
+ - `MODEL_GENERATION` → `chat {model}` or `tool_selection {model}`
321
321
  - `TOOL_CALL` → `tool.execute {tool_name}`
322
322
  - `AGENT_RUN` → `agent.{agent_id}`
323
323
  - `WORKFLOW_RUN` → `workflow.{workflow_id}`
@@ -43,8 +43,9 @@ Mapping of span types to their corresponding attribute interfaces.
43
43
  interface AISpanTypeMap {
44
44
  AGENT_RUN: AgentRunAttributes;
45
45
  WORKFLOW_RUN: WorkflowRunAttributes;
46
- LLM_GENERATION: LLMGenerationAttributes;
47
- LLM_CHUNK: LLMChunkAttributes;
46
+ MODEL_GENERATION: ModelGenerationAttributes;
47
+ MODEL_STEP: ModelStepAttributes;
48
+ MODEL_CHUNK: ModelChunkAttributes;
48
49
  TOOL_CALL: ToolCallAttributes;
49
50
  MCP_TOOL_CALL: MCPToolCallAttributes;
50
51
  WORKFLOW_STEP: WorkflowStepAttributes;
@@ -160,16 +161,19 @@ AI-specific span types with their associated metadata.
160
161
  enum AISpanType {
161
162
  /** Agent run - root span for agent processes */
162
163
  AGENT_RUN = 'agent_run',
163
-
164
+
164
165
  /** Generic span for custom operations */
165
166
  GENERIC = 'generic',
166
-
167
- /** LLM generation with model calls, token usage, prompts, completions */
168
- LLM_GENERATION = 'llm_generation',
169
-
170
- /** Individual LLM streaming chunk/event */
171
- LLM_CHUNK = 'llm_chunk',
172
-
167
+
168
+ /** Model generation with model calls, token usage, prompts, completions */
169
+ MODEL_GENERATION = 'model_generation',
170
+
171
+ /** Single model execution step within a generation (one API call) */
172
+ MODEL_STEP = 'model_step',
173
+
174
+ /** Individual model streaming chunk/event */
175
+ MODEL_CHUNK = 'model_chunk',
176
+
173
177
  /** MCP (Model Context Protocol) tool execution */
174
178
  MCP_TOOL_CALL = 'mcp_tool_call',
175
179
 
@@ -235,19 +239,19 @@ interface AgentRunAttributes {
235
239
  }
236
240
  ```
237
241
 
238
- ### LLMGenerationAttributes
242
+ ### ModelGenerationAttributes
239
243
 
240
- LLM Generation attributes.
244
+ Model Generation attributes.
241
245
 
242
246
  ```typescript
243
- interface LLMGenerationAttributes {
247
+ interface ModelGenerationAttributes {
244
248
  /** Model name (e.g., 'gpt-4', 'claude-3') */
245
249
  model?: string;
246
250
 
247
251
  /** Model provider (e.g., 'openai', 'anthropic') */
248
252
  provider?: string;
249
253
 
250
- /** Type of result/output this LLM call produced */
254
+ /** Type of result/output this model call produced */
251
255
  resultType?: 'tool_selection' | 'response_generation' | 'reasoning' | 'planning';
252
256
 
253
257
  /** Token usage statistics */
@@ -280,12 +284,35 @@ interface LLMGenerationAttributes {
280
284
  }
281
285
  ```
282
286
 
283
- ### LLMChunkAttributes
287
+ ### ModelStepAttributes
284
288
 
285
- LLM Chunk attributes - for individual streaming chunks/events.
289
+ Model Step attributes - for a single model execution within a generation.
286
290
 
287
291
  ```typescript
288
- interface LLMChunkAttributes {
292
+ interface ModelStepAttributes {
293
+ /** Index of this step in the generation (0, 1, 2, ...) */
294
+ stepIndex?: number;
295
+
296
+ /** Token usage statistics */
297
+ usage?: UsageStats;
298
+
299
+ /** Reason this step finished (stop, tool-calls, length, etc.) */
300
+ finishReason?: string;
301
+
302
+ /** Should execution continue */
303
+ isContinued?: boolean;
304
+
305
+ /** Result warnings */
306
+ warnings?: Record<string, any>;
307
+ }
308
+ ```
309
+
310
+ ### ModelChunkAttributes
311
+
312
+ Model Chunk attributes - for individual streaming chunks/events.
313
+
314
+ ```typescript
315
+ interface ModelChunkAttributes {
289
316
  /** Type of chunk (text-delta, reasoning-delta, tool-call, etc.) */
290
317
  chunkType?: string;
291
318
 
@@ -626,10 +653,10 @@ enum InternalSpans {
626
653
 
627
654
  /** Tool spans are marked internal */
628
655
  TOOL = 1 << 2,
629
-
630
- /** LLM spans are marked internal */
631
- LLM = 1 << 3,
632
-
656
+
657
+ /** Model spans are marked internal */
658
+ MODEL = 1 << 3,
659
+
633
660
  /** All spans are marked internal */
634
661
  ALL = (1 << 4) - 1,
635
662
  }