@mastra/mcp-docs-server 0.13.34 → 0.13.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/.docs/organized/changelogs/%40internal%2Fchangeset-cli.md +2 -0
  2. package/.docs/organized/changelogs/%40internal%2Fexternal-types.md +2 -0
  3. package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +9 -9
  4. package/.docs/organized/changelogs/%40internal%2Ftypes-builder.md +2 -0
  5. package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +23 -23
  6. package/.docs/organized/changelogs/%40mastra%2Fcore.md +61 -61
  7. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +17 -17
  8. package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +17 -17
  9. package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +17 -17
  10. package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +17 -17
  11. package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +37 -37
  12. package/.docs/organized/changelogs/%40mastra%2Flance.md +19 -19
  13. package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +15 -15
  14. package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +19 -19
  15. package/.docs/organized/changelogs/%40mastra%2Fpg.md +19 -19
  16. package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +31 -31
  17. package/.docs/organized/changelogs/%40mastra%2Freact.md +19 -19
  18. package/.docs/organized/changelogs/%40mastra%2Fserver.md +27 -27
  19. package/.docs/organized/changelogs/create-mastra.md +5 -5
  20. package/.docs/organized/changelogs/mastra.md +17 -17
  21. package/.docs/organized/code-examples/memory-with-mongodb.md +208 -0
  22. package/.docs/raw/getting-started/project-structure.mdx +1 -1
  23. package/.docs/raw/getting-started/studio.mdx +4 -4
  24. package/.docs/raw/memory/overview.mdx +1 -1
  25. package/.docs/raw/memory/semantic-recall.mdx +4 -3
  26. package/.docs/raw/memory/storage/memory-with-libsql.mdx +141 -0
  27. package/.docs/raw/memory/storage/memory-with-pg.mdx +138 -0
  28. package/.docs/raw/memory/storage/memory-with-upstash.mdx +147 -0
  29. package/.docs/raw/observability/ai-tracing/exporters/arize.mdx +201 -0
  30. package/.docs/raw/observability/ai-tracing/overview.mdx +12 -8
  31. package/.docs/raw/reference/observability/ai-tracing/exporters/arize.mdx +160 -0
  32. package/.docs/raw/reference/observability/ai-tracing/exporters/braintrust.mdx +2 -2
  33. package/.docs/raw/reference/observability/ai-tracing/exporters/langfuse.mdx +1 -1
  34. package/.docs/raw/reference/observability/ai-tracing/exporters/langsmith.mdx +2 -2
  35. package/.docs/raw/reference/observability/ai-tracing/exporters/otel.mdx +1 -1
  36. package/.docs/raw/reference/observability/ai-tracing/interfaces.mdx +48 -21
  37. package/.docs/raw/reference/storage/mongodb.mdx +146 -0
  38. package/.docs/raw/server-db/storage.mdx +1 -0
  39. package/.docs/raw/workflows/agents-and-tools.mdx +15 -1
  40. package/.docs/raw/workflows/human-in-the-loop.mdx +268 -0
  41. package/CHANGELOG.md +14 -0
  42. package/package.json +11 -4
@@ -0,0 +1,141 @@
1
+ ---
2
+ title: "Example: Memory with LibSQL | Memory | Mastra Docs"
3
+ description: Example for how to use Mastra's memory system with LibSQL storage and vector database backend.
4
+ ---
5
+
6
+ # Memory with LibSQL
7
+
8
+ This example demonstrates how to use Mastra's memory system with LibSQL as the storage backend.
9
+
10
+ ## Prerequisites
11
+
12
+ This example uses the `openai` model. Make sure to add `OPENAI_API_KEY` to your `.env` file.
13
+
14
+ ```bash filename=".env" copy
15
+ OPENAI_API_KEY=<your-api-key>
16
+ ```
17
+
18
+ And install the following package:
19
+
20
+ ```bash copy
21
+ npm install @mastra/libsql
22
+ ```
23
+
24
+ ## Adding memory to an agent
25
+
26
+ To add LibSQL memory to an agent use the `Memory` class and create a new `storage` key using `LibSQLStore`. The `url` can either by a remote location, or a local file system resource.
27
+
28
+ ```typescript filename="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
29
+ import { Memory } from "@mastra/memory";
30
+ import { Agent } from "@mastra/core/agent";
31
+ import { openai } from "@ai-sdk/openai";
32
+ import { LibSQLStore } from "@mastra/libsql";
33
+
34
+ export const libsqlAgent = new Agent({
35
+ name: "libsql-agent",
36
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
37
+ model: openai("gpt-4o"),
38
+ memory: new Memory({
39
+ storage: new LibSQLStore({
40
+ url: "file:libsql-agent.db"
41
+ }),
42
+ options: {
43
+ threads: {
44
+ generateTitle: true
45
+ }
46
+ }
47
+ })
48
+ });
49
+ ```
50
+
51
+ ## Local embeddings with fastembed
52
+
53
+ Embeddings are numeric vectors used by memory’s `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
54
+
55
+ Install `fastembed` to get started:
56
+
57
+ ```bash copy
58
+ npm install @mastra/fastembed
59
+ ```
60
+
61
+ Add the following to your agent:
62
+
63
+ ```typescript filename="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
64
+ import { Memory } from "@mastra/memory";
65
+ import { Agent } from "@mastra/core/agent";
66
+ import { openai } from "@ai-sdk/openai";
67
+ import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
68
+ import { fastembed } from "@mastra/fastembed";
69
+
70
+ export const libsqlAgent = new Agent({
71
+ name: "libsql-agent",
72
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
73
+ model: openai("gpt-4o"),
74
+ memory: new Memory({
75
+ storage: new LibSQLStore({
76
+ url: "file:libsql-agent.db"
77
+ }),
78
+ vector: new LibSQLVector({
79
+ connectionUrl: "file:libsql-agent.db"
80
+ }),
81
+ embedder: fastembed,
82
+ options: {
83
+ lastMessages: 10,
84
+ semanticRecall: {
85
+ topK: 3,
86
+ messageRange: 2
87
+ },
88
+ threads: {
89
+ generateTitle: true
90
+ }
91
+ }
92
+ })
93
+ });
94
+ ```
95
+
96
+ ## Usage example
97
+
98
+ Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
99
+
100
+ ```typescript filename="src/test-libsql-agent.ts" showLineNumbers copy
101
+ import "dotenv/config";
102
+
103
+ import { mastra } from "./mastra";
104
+
105
+ const threadId = "123";
106
+ const resourceId = "user-456";
107
+
108
+ const agent = mastra.getAgent("libsqlAgent");
109
+
110
+ const message = await agent.stream("My name is Mastra", {
111
+ memory: {
112
+ thread: threadId,
113
+ resource: resourceId
114
+ }
115
+ });
116
+
117
+ await message.textStream.pipeTo(new WritableStream());
118
+
119
+ const stream = await agent.stream("What's my name?", {
120
+ memory: {
121
+ thread: threadId,
122
+ resource: resourceId
123
+ },
124
+ memoryOptions: {
125
+ lastMessages: 5,
126
+ semanticRecall: {
127
+ topK: 3,
128
+ messageRange: 2
129
+ }
130
+ }
131
+ });
132
+
133
+ for await (const chunk of stream.textStream) {
134
+ process.stdout.write(chunk);
135
+ }
136
+ ```
137
+
138
+
139
+ ## Related
140
+
141
+ - [Calling Agents](../agents/calling-agents.mdx)
@@ -0,0 +1,138 @@
1
+ ---
2
+ title: "Example: Memory with PostgreSQL | Memory | Mastra Docs"
3
+ description: Example for how to use Mastra's memory system with PostgreSQL storage and vector capabilities.
4
+ ---
5
+
6
+ # Memory with Postgres
7
+
8
+ This example demonstrates how to use Mastra's memory system with PostgreSQL as the storage backend.
9
+
10
+ ## Prerequisites
11
+
12
+ This example uses the `openai` model and requires a PostgreSQL database with the `pgvector` extension. Make sure to add the following to your `.env` file:
13
+
14
+ ```bash filename=".env" copy
15
+ OPENAI_API_KEY=<your-api-key>
16
+ DATABASE_URL=<your-connection-string>
17
+ ```
18
+
19
+ And install the following package:
20
+
21
+ ```bash copy
22
+ npm install @mastra/pg
23
+ ```
24
+
25
+ ## Adding memory to an agent
26
+
27
+ To add PostgreSQL memory to an agent use the `Memory` class and create a new `storage` key using `PostgresStore`. The `connectionString` can either be a remote location, or a local database connection.
28
+
29
+ ```typescript filename="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
30
+ import { Memory } from "@mastra/memory";
31
+ import { Agent } from "@mastra/core/agent";
32
+ import { openai } from "@ai-sdk/openai";
33
+ import { PostgresStore } from "@mastra/pg";
34
+
35
+ export const pgAgent = new Agent({
36
+ name: "pg-agent",
37
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
38
+ model: openai("gpt-4o"),
39
+ memory: new Memory({
40
+ storage: new PostgresStore({
41
+ connectionString: process.env.DATABASE_URL!
42
+ }),
43
+ options: {
44
+ threads: {
45
+ generateTitle: true
46
+ }
47
+ }
48
+ })
49
+ });
50
+ ```
51
+
52
+ ## Local embeddings with fastembed
53
+
54
+ Embeddings are numeric vectors used by memory’s `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
55
+
56
+ Install `fastembed` to get started:
57
+
58
+ ```bash copy
59
+ npm install @mastra/fastembed
60
+ ```
61
+
62
+ Add the following to your agent:
63
+
64
+ ```typescript filename="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
65
+ import { Memory } from "@mastra/memory";
66
+ import { Agent } from "@mastra/core/agent";
67
+ import { openai } from "@ai-sdk/openai";
68
+ import { PostgresStore, PgVector } from "@mastra/pg";
69
+ import { fastembed } from "@mastra/fastembed";
70
+
71
+ export const pgAgent = new Agent({
72
+ name: "pg-agent",
73
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
74
+ model: openai("gpt-4o"),
75
+ memory: new Memory({
76
+ storage: new PostgresStore({
77
+ connectionString: process.env.DATABASE_URL!
78
+ }),
79
+ vector: new PgVector({
80
+ connectionString: process.env.DATABASE_URL!
81
+ }),
82
+ embedder: fastembed,
83
+ options: {
84
+ lastMessages: 10,
85
+ semanticRecall: {
86
+ topK: 3,
87
+ messageRange: 2
88
+ }
89
+ }
90
+ })
91
+ });
92
+ ```
93
+
94
+ ## Usage example
95
+
96
+ Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
97
+
98
+ ```typescript filename="src/test-pg-agent.ts" showLineNumbers copy
99
+ import "dotenv/config";
100
+
101
+ import { mastra } from "./mastra";
102
+
103
+ const threadId = "123";
104
+ const resourceId = "user-456";
105
+
106
+ const agent = mastra.getAgent("pgAgent");
107
+
108
+ const message = await agent.stream("My name is Mastra", {
109
+ memory: {
110
+ thread: threadId,
111
+ resource: resourceId
112
+ }
113
+ });
114
+
115
+ await message.textStream.pipeTo(new WritableStream());
116
+
117
+ const stream = await agent.stream("What's my name?", {
118
+ memory: {
119
+ thread: threadId,
120
+ resource: resourceId
121
+ },
122
+ memoryOptions: {
123
+ lastMessages: 5,
124
+ semanticRecall: {
125
+ topK: 3,
126
+ messageRange: 2
127
+ }
128
+ }
129
+ });
130
+
131
+ for await (const chunk of stream.textStream) {
132
+ process.stdout.write(chunk);
133
+ }
134
+ ```
135
+
136
+ ## Related
137
+
138
+ - [Calling Agents](../agents/calling-agents.mdx)
@@ -0,0 +1,147 @@
1
+ ---
2
+ title: "Example: Memory with Upstash | Memory | Mastra Docs"
3
+ description: Example for how to use Mastra's memory system with Upstash Redis storage and vector capabilities.
4
+ ---
5
+
6
+ # Memory with Upstash
7
+
8
+ This example demonstrates how to use Mastra's memory system with Upstash as the storage backend.
9
+
10
+ ## Prerequisites
11
+
12
+ This example uses the `openai` model and requires both Upstash Redis and Upstash Vector services. Make sure to add the following to your `.env` file:
13
+
14
+ ```bash filename=".env" copy
15
+ OPENAI_API_KEY=<your-api-key>
16
+ UPSTASH_REDIS_REST_URL=<your-redis-url>
17
+ UPSTASH_REDIS_REST_TOKEN=<your-redis-token>
18
+ UPSTASH_VECTOR_REST_URL=<your-vector-index-url>
19
+ UPSTASH_VECTOR_REST_TOKEN=<your-vector-index-token>
20
+ ```
21
+
22
+ You can get your Upstash credentials by signing up at [upstash.com](https://upstash.com) and creating both Redis and Vector databases.
23
+
24
+ And install the following package:
25
+
26
+ ```bash copy
27
+ npm install @mastra/upstash
28
+ ```
29
+
30
+ ## Adding memory to an agent
31
+
32
+ To add Upstash memory to an agent use the `Memory` class and create a new `storage` key using `UpstashStore` and a new `vector` key using `UpstashVector`. The configuration can point to either a remote service or a local setup.
33
+
34
+ ```typescript filename="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
35
+ import { Memory } from "@mastra/memory";
36
+ import { Agent } from "@mastra/core/agent";
37
+ import { openai } from "@ai-sdk/openai";
38
+ import { UpstashStore } from "@mastra/upstash";
39
+
40
+ export const upstashAgent = new Agent({
41
+ name: "upstash-agent",
42
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
43
+ model: openai("gpt-4o"),
44
+ memory: new Memory({
45
+ storage: new UpstashStore({
46
+ url: process.env.UPSTASH_REDIS_REST_URL!,
47
+ token: process.env.UPSTASH_REDIS_REST_TOKEN!
48
+ }),
49
+ options: {
50
+ threads: {
51
+ generateTitle: true
52
+ }
53
+ }
54
+ })
55
+ });
56
+ ```
57
+
58
+
59
+ ## Local embeddings with fastembed
60
+
61
+ Embeddings are numeric vectors used by memory’s `semanticRecall` to retrieve related messages by meaning (not keywords). This setup uses `@mastra/fastembed` to generate vector embeddings.
62
+
63
+ Install `fastembed` to get started:
64
+
65
+ ```bash copy
66
+ npm install @mastra/fastembed
67
+ ```
68
+
69
+ Add the following to your agent:
70
+
71
+ ```typescript filename="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
72
+ import { Memory } from "@mastra/memory";
73
+ import { Agent } from "@mastra/core/agent";
74
+ import { openai } from "@ai-sdk/openai";
75
+ import { UpstashStore, UpstashVector } from "@mastra/upstash";
76
+ import { fastembed } from "@mastra/fastembed";
77
+
78
+ export const upstashAgent = new Agent({
79
+ name: "upstash-agent",
80
+ instructions: "You are an AI agent with the ability to automatically recall memories from previous interactions.",
81
+ model: openai("gpt-4o"),
82
+ memory: new Memory({
83
+ storage: new UpstashStore({
84
+ url: process.env.UPSTASH_REDIS_REST_URL!,
85
+ token: process.env.UPSTASH_REDIS_REST_TOKEN!
86
+ }),
87
+ vector: new UpstashVector({
88
+ url: process.env.UPSTASH_VECTOR_REST_URL!,
89
+ token: process.env.UPSTASH_VECTOR_REST_TOKEN!
90
+ }),
91
+ embedder: fastembed,
92
+ options: {
93
+ lastMessages: 10,
94
+ semanticRecall: {
95
+ topK: 3,
96
+ messageRange: 2
97
+ }
98
+ }
99
+ })
100
+ });
101
+ ```
102
+
103
+ ## Usage example
104
+
105
+ Use `memoryOptions` to scope recall for this request. Set `lastMessages: 5` to limit recency-based recall, and use `semanticRecall` to fetch the `topK: 3` most relevant messages, including `messageRange: 2` neighboring messages for context around each match.
106
+
107
+ ```typescript filename="src/test-upstash-agent.ts" showLineNumbers copy
108
+ import "dotenv/config";
109
+
110
+ import { mastra } from "./mastra";
111
+
112
+ const threadId = "123";
113
+ const resourceId = "user-456";
114
+
115
+ const agent = mastra.getAgent("upstashAgent");
116
+
117
+ const message = await agent.stream("My name is Mastra", {
118
+ memory: {
119
+ thread: threadId,
120
+ resource: resourceId
121
+ }
122
+ });
123
+
124
+ await message.textStream.pipeTo(new WritableStream());
125
+
126
+ const stream = await agent.stream("What's my name?", {
127
+ memory: {
128
+ thread: threadId,
129
+ resource: resourceId
130
+ },
131
+ memoryOptions: {
132
+ lastMessages: 5,
133
+ semanticRecall: {
134
+ topK: 3,
135
+ messageRange: 2
136
+ }
137
+ }
138
+ });
139
+
140
+ for await (const chunk of stream.textStream) {
141
+ process.stdout.write(chunk);
142
+ }
143
+ ```
144
+
145
+ ## Related
146
+
147
+ - [Calling Agents](../agents/calling-agents.mdx)
@@ -0,0 +1,201 @@
1
+ ---
2
+ title: "Arize Exporter | AI Tracing | Observability | Mastra Docs"
3
+ description: "Send AI traces to Arize Phoenix or Arize AX using OpenTelemetry and OpenInference"
4
+ ---
5
+
6
+ import { Callout } from "nextra/components";
7
+
8
+ # Arize Exporter
9
+
10
+ [Arize](https://arize.com/) provides observability platforms for AI applications through [Phoenix](https://phoenix.arize.com/) (open-source) and [Arize AX](https://arize.com/generative-ai/) (enterprise). The Arize exporter sends AI traces using OpenTelemetry and [OpenInference](https://github.com/Arize-ai/openinference/tree/main/spec) semantic conventions, compatible with any OpenTelemetry platform that supports OpenInference.
11
+
12
+ ## When to Use Arize
13
+
14
+ Arize is ideal when you need:
15
+ - **OpenInference standards** - Industry-standard semantic conventions for AI traces
16
+ - **Flexible deployment** - Self-hosted Phoenix or managed Arize AX
17
+ - **OpenTelemetry compatibility** - Works with any OTLP-compatible platform
18
+ - **Comprehensive AI observability** - LLM traces, embeddings, and retrieval analytics
19
+ - **Open-source option** - Full-featured local deployment with Phoenix
20
+
21
+ ## Installation
22
+
23
+ ```bash npm2yarn
24
+ npm install @mastra/arize
25
+ ```
26
+
27
+ ## Configuration
28
+
29
+ ### Phoenix Setup
30
+
31
+ Phoenix is an open-source observability platform that can be self-hosted or used via Phoenix Cloud.
32
+
33
+ #### Prerequisites
34
+
35
+ 1. **Phoenix Instance**: Deploy using Docker or sign up at [Phoenix Cloud](https://app.phoenix.arize.com/login)
36
+ 2. **Endpoint**: Your Phoenix endpoint URL (ends in `/v1/traces`)
37
+ 3. **API Key**: Optional for unauthenticated instances, required for Phoenix Cloud
38
+ 4. **Environment Variables**: Set your configuration
39
+
40
+ ```bash filename=".env"
41
+ PHOENIX_ENDPOINT=http://localhost:6006/v1/traces # Or your Phoenix Cloud URL
42
+ PHOENIX_API_KEY=your-api-key # Optional for local instances
43
+ PHOENIX_PROJECT_NAME=mastra-service # Optional, defaults to 'mastra-service'
44
+ ```
45
+
46
+ #### Basic Setup
47
+
48
+ ```typescript filename="src/mastra/index.ts"
49
+ import { Mastra } from "@mastra/core";
50
+ import { ArizeExporter } from "@mastra/arize";
51
+
52
+ export const mastra = new Mastra({
53
+ observability: {
54
+ configs: {
55
+ arize: {
56
+ serviceName: process.env.PHOENIX_PROJECT_NAME || 'mastra-service',
57
+ exporters: [
58
+ new ArizeExporter({
59
+ endpoint: process.env.PHOENIX_ENDPOINT!,
60
+ apiKey: process.env.PHOENIX_API_KEY,
61
+ projectName: process.env.PHOENIX_PROJECT_NAME,
62
+ }),
63
+ ],
64
+ },
65
+ },
66
+ },
67
+ });
68
+ ```
69
+
70
+ <Callout type="info">
71
+ **Quick Start with Docker**
72
+
73
+ Test locally with an in-memory Phoenix instance:
74
+
75
+ ```bash
76
+ docker run --pull=always -d --name arize-phoenix -p 6006:6006 \
77
+ -e PHOENIX_SQL_DATABASE_URL="sqlite:///:memory:" \
78
+ arizephoenix/phoenix:latest
79
+ ```
80
+
81
+ Set `PHOENIX_ENDPOINT=http://localhost:6006/v1/traces` and run your Mastra agent to see traces at [localhost:6006](http://localhost:6006).
82
+ </Callout>
83
+
84
+ ### Arize AX Setup
85
+
86
+ Arize AX is an enterprise observability platform with advanced features for production AI systems.
87
+
88
+ #### Prerequisites
89
+
90
+ 1. **Arize AX Account**: Sign up at [app.arize.com](https://app.arize.com/)
91
+ 2. **Space ID**: Your organization's space identifier
92
+ 3. **API Key**: Generate in Arize AX settings
93
+ 4. **Environment Variables**: Set your credentials
94
+
95
+ ```bash filename=".env"
96
+ ARIZE_SPACE_ID=your-space-id
97
+ ARIZE_API_KEY=your-api-key
98
+ ARIZE_PROJECT_NAME=mastra-service # Optional
99
+ ```
100
+
101
+ #### Basic Setup
102
+
103
+ ```typescript filename="src/mastra/index.ts"
104
+ import { Mastra } from "@mastra/core";
105
+ import { ArizeExporter } from "@mastra/arize";
106
+
107
+ export const mastra = new Mastra({
108
+ observability: {
109
+ configs: {
110
+ arize: {
111
+ serviceName: process.env.ARIZE_PROJECT_NAME || 'mastra-service',
112
+ exporters: [
113
+ new ArizeExporter({
114
+ apiKey: process.env.ARIZE_API_KEY!,
115
+ spaceId: process.env.ARIZE_SPACE_ID!,
116
+ projectName: process.env.ARIZE_PROJECT_NAME,
117
+ }),
118
+ ],
119
+ },
120
+ },
121
+ },
122
+ });
123
+ ```
124
+
125
+ ## Configuration Options
126
+
127
+ The Arize exporter supports advanced configuration for fine-tuning OpenTelemetry behavior:
128
+
129
+ ### Complete Configuration
130
+
131
+ ```typescript
132
+ new ArizeExporter({
133
+ // Phoenix Configuration
134
+ endpoint: 'https://your-collector.example.com/v1/traces', // Required for Phoenix
135
+
136
+ // Arize AX Configuration
137
+ spaceId: 'your-space-id', // Required for Arize AX
138
+
139
+ // Shared Configuration
140
+ apiKey: 'your-api-key', // Required for authenticated endpoints
141
+ projectName: 'mastra-service', // Optional project name
142
+
143
+ // Optional OTLP settings
144
+ headers: {
145
+ 'x-custom-header': 'value', // Additional headers for OTLP requests
146
+ },
147
+
148
+ // Debug and performance tuning
149
+ logLevel: 'debug', // Logging: debug | info | warn | error
150
+ batchSize: 512, // Batch size before exporting spans
151
+ timeout: 30000, // Timeout in ms before exporting spans
152
+
153
+ // Custom resource attributes
154
+ resourceAttributes: {
155
+ 'deployment.environment': process.env.NODE_ENV,
156
+ 'service.version': process.env.APP_VERSION,
157
+ },
158
+ })
159
+ ```
160
+
161
+ ### Batch Processing Options
162
+
163
+ Control how traces are batched and exported:
164
+
165
+ ```typescript
166
+ new ArizeExporter({
167
+ endpoint: process.env.PHOENIX_ENDPOINT!,
168
+ apiKey: process.env.PHOENIX_API_KEY,
169
+
170
+ // Batch processing configuration
171
+ batchSize: 512, // Number of spans to batch (default: 512)
172
+ timeout: 30000, // Max time in ms to wait before export (default: 30000)
173
+ })
174
+ ```
175
+
176
+ ### Resource Attributes
177
+
178
+ Add custom attributes to all exported spans:
179
+
180
+ ```typescript
181
+ new ArizeExporter({
182
+ endpoint: process.env.PHOENIX_ENDPOINT!,
183
+ resourceAttributes: {
184
+ 'deployment.environment': process.env.NODE_ENV,
185
+ 'service.namespace': 'production',
186
+ 'service.instance.id': process.env.HOSTNAME,
187
+ 'custom.attribute': 'value',
188
+ },
189
+ })
190
+ ```
191
+
192
+ ## OpenInference Semantic Conventions
193
+
194
+ This exporter implements the [OpenInference Semantic Conventions](https://github.com/Arize-ai/openinference/tree/main/spec) for generative AI applications, providing standardized trace structure across different observability platforms.
195
+
196
+ ## Related
197
+
198
+ - [AI Tracing Overview](/docs/observability/ai-tracing/overview)
199
+ - [Phoenix Documentation](https://docs.arize.com/phoenix)
200
+ - [Arize AX Documentation](https://docs.arize.com/)
201
+ - [OpenInference Specification](https://github.com/Arize-ai/openinference/tree/main/spec)
@@ -89,14 +89,13 @@ Mastra provides two built-in exporters that work out of the box:
89
89
 
90
90
  In addition to the internal exporters, Mastra supports integration with popular observability platforms. These exporters allow you to leverage your existing monitoring infrastructure and take advantage of platform-specific features like alerting, dashboards, and correlation with other application metrics.
91
91
 
92
+ - **[Arize](/docs/observability/ai-tracing/exporters/arize)** - Exports traces to Arize Phoenix or Arize AX using OpenInference semantic conventions
92
93
  - **[Braintrust](/docs/observability/ai-tracing/exporters/braintrust)** - Exports traces to Braintrust's eval and observability platform
93
94
  - **[Langfuse](/docs/observability/ai-tracing/exporters/langfuse)** - Sends traces to the Langfuse open-source LLM engineering platform
94
- - **[LangSmith](/docs/observability/ai-tracing/exporters/langsmith)** - Pushes traces into LangSmiths observability and evaluation toolkit
95
+ - **[LangSmith](/docs/observability/ai-tracing/exporters/langsmith)** - Pushes traces into LangSmith's observability and evaluation toolkit
95
96
  - **[OpenTelemetry](/docs/observability/ai-tracing/exporters/otel)** - Deliver traces to any OpenTelemetry-compatible observability system
96
97
  - Supports: Dash0, Laminar, New Relic, SigNoz, Traceloop, Zipkin, and others!
97
98
 
98
- - **Arize** - Coming soon!
99
-
100
99
  ## Sampling Strategies
101
100
 
102
101
  Sampling allows you to control which traces are collected, helping you balance between observability needs and resource costs. In production environments with high traffic, collecting every trace can be expensive and unnecessary. Sampling strategies let you capture a representative subset of traces while ensuring you don't miss critical information about errors or important operations.
@@ -311,7 +310,7 @@ When creating a custom config with external exporters, you might lose access to
311
310
 
312
311
  ```ts filename="src/mastra/index.ts" showLineNumbers copy
313
312
  import { DefaultExporter, CloudExporter } from '@mastra/core/ai-tracing';
314
- import { LangfuseExporter } from '@mastra/langfuse';
313
+ import { ArizeExporter } from '@mastra/arize';
315
314
 
316
315
  export const mastra = new Mastra({
317
316
  observability: {
@@ -320,7 +319,10 @@ export const mastra = new Mastra({
320
319
  production: {
321
320
  serviceName: 'my-service',
322
321
  exporters: [
323
- new LangfuseExporter(), // External exporter
322
+ new ArizeExporter({ // External exporter
323
+ endpoint: process.env.PHOENIX_ENDPOINT,
324
+ apiKey: process.env.PHOENIX_API_KEY,
325
+ }),
324
326
  new DefaultExporter(), // Keep Playground access
325
327
  new CloudExporter(), // Keep Cloud access
326
328
  ],
@@ -331,7 +333,7 @@ export const mastra = new Mastra({
331
333
  ```
332
334
 
333
335
  This configuration sends traces to all three destinations simultaneously:
334
- - **Langfuse** for external observability
336
+ - **Arize Phoenix/AX** for external observability
335
337
  - **DefaultExporter** for local Playground access
336
338
  - **CloudExporter** for Mastra Cloud dashboard
337
339
 
@@ -759,8 +761,9 @@ Traces are available in multiple locations:
759
761
 
760
762
  - **Mastra Playground** - Local development environment
761
763
  - **Mastra Cloud** - Production monitoring dashboard
762
- - **Langfuse Dashboard** - When using Langfuse exporter
764
+ - **Arize Phoenix / Arize AX** - When using Arize exporter
763
765
  - **Braintrust Console** - When using Braintrust exporter
766
+ - **Langfuse Dashboard** - When using Langfuse exporter
764
767
 
765
768
  ## See Also
766
769
 
@@ -777,8 +780,9 @@ Traces are available in multiple locations:
777
780
  - [DefaultExporter](/reference/observability/ai-tracing/exporters/default-exporter) - Storage persistence
778
781
  - [CloudExporter](/reference/observability/ai-tracing/exporters/cloud-exporter) - Mastra Cloud integration
779
782
  - [ConsoleExporter](/reference/observability/ai-tracing/exporters/console-exporter) - Debug output
780
- - [Langfuse](/reference/observability/ai-tracing/exporters/langfuse) - Langfuse integration
783
+ - [Arize](/reference/observability/ai-tracing/exporters/arize) - Arize Phoenix and Arize AX integration
781
784
  - [Braintrust](/reference/observability/ai-tracing/exporters/braintrust) - Braintrust integration
785
+ - [Langfuse](/reference/observability/ai-tracing/exporters/langfuse) - Langfuse integration
782
786
  - [OpenTelemetry](/reference/observability/ai-tracing/exporters/otel) - OTEL-compatible platforms
783
787
 
784
788
  ### Processors