@mastra/mcp-docs-server 1.0.0-beta.4 → 1.0.0-beta.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40internal%2Fstorage-test-utils.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +326 -126
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-vercel.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flance.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp-registry-registry.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Frag.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Freact.md +80 -1
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +36 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-azure.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-cloudflare.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-deepgram.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-elevenlabs.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-gladia.md +92 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google-gemini-live.md +67 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-murf.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai-realtime.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-openai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-playai.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-sarvam.md +201 -1
- package/.docs/organized/changelogs/%40mastra%2Fvoice-speechify.md +201 -1
- package/.docs/organized/changelogs/create-mastra.md +201 -1
- package/.docs/organized/changelogs/mastra.md +201 -1
- package/.docs/organized/code-examples/memory-with-processors.md +1 -1
- package/.docs/organized/code-examples/quick-start.md +1 -1
- package/.docs/raw/agents/adding-voice.mdx +7 -10
- package/.docs/raw/agents/guardrails.mdx +19 -20
- package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +6 -5
- package/.docs/raw/agents/networks.mdx +1 -2
- package/.docs/raw/agents/overview.mdx +5 -5
- package/.docs/raw/agents/using-tools.mdx +4 -5
- package/.docs/raw/course/01-first-agent/05-running-playground.md +5 -5
- package/.docs/raw/course/01-first-agent/09-testing-your-agent.md +3 -3
- package/.docs/raw/course/01-first-agent/13-testing-your-tool.md +3 -3
- package/.docs/raw/course/01-first-agent/17-testing-memory.md +2 -2
- package/.docs/raw/course/04-workflows/07-using-playground.md +1 -1
- package/.docs/raw/deployment/building-mastra.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/amazon-ec2.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/aws-lambda.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/azure-app-services.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/digital-ocean.mdx +1 -1
- package/.docs/raw/deployment/cloud-providers/index.mdx +1 -1
- package/.docs/raw/deployment/mastra-cloud/observability.mdx +19 -17
- package/.docs/raw/deployment/mastra-cloud/setting-up.mdx +1 -1
- package/.docs/raw/deployment/overview.mdx +2 -2
- package/.docs/raw/deployment/web-framework.mdx +5 -5
- package/.docs/raw/evals/custom-scorers.mdx +3 -5
- package/.docs/raw/evals/overview.mdx +2 -3
- package/.docs/raw/getting-started/project-structure.mdx +1 -1
- package/.docs/raw/getting-started/start.mdx +72 -0
- package/.docs/raw/getting-started/studio.mdx +1 -1
- package/.docs/raw/{frameworks/agentic-uis/ai-sdk.mdx → guides/build-your-ui/ai-sdk-ui.mdx} +105 -11
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/astro.mdx +23 -25
- package/.docs/raw/{frameworks/servers → guides/getting-started}/express.mdx +3 -4
- package/.docs/raw/guides/{guide → getting-started}/manual-install.mdx +1 -1
- package/.docs/raw/guides/{quickstarts/nextjs.mdx → getting-started/next-js.mdx} +11 -11
- package/.docs/raw/guides/{quickstarts/standalone-server.mdx → getting-started/quickstart.mdx} +7 -7
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/sveltekit.mdx +23 -25
- package/.docs/raw/{frameworks/web-frameworks → guides/getting-started}/vite-react.mdx +7 -7
- package/.docs/raw/guides/guide/ai-recruiter.mdx +2 -3
- package/.docs/raw/guides/guide/chef-michel.mdx +2 -3
- package/.docs/raw/guides/guide/notes-mcp-server.mdx +2 -2
- package/.docs/raw/guides/guide/research-assistant.mdx +7 -8
- package/.docs/raw/guides/guide/stock-agent.mdx +4 -6
- package/.docs/raw/guides/guide/web-search.mdx +12 -10
- package/.docs/raw/guides/migrations/agentnetwork.mdx +4 -4
- package/.docs/raw/guides/migrations/ai-sdk-v4-to-v5.mdx +1 -1
- package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +29 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/tools.mdx +5 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +22 -0
- package/.docs/raw/guides/migrations/vnext-to-standard-apis.mdx +2 -2
- package/.docs/raw/index.mdx +2 -2
- package/.docs/raw/mcp/overview.mdx +3 -5
- package/.docs/raw/memory/memory-processors.mdx +1 -2
- package/.docs/raw/memory/semantic-recall.mdx +7 -7
- package/.docs/raw/memory/storage/memory-with-libsql.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-mongodb.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-pg.mdx +2 -4
- package/.docs/raw/memory/storage/memory-with-upstash.mdx +2 -4
- package/.docs/raw/memory/threads-and-resources.mdx +3 -3
- package/.docs/raw/memory/working-memory.mdx +4 -5
- package/.docs/raw/{logging.mdx → observability/logging.mdx} +1 -1
- package/.docs/raw/observability/overview.mdx +2 -2
- package/.docs/raw/observability/tracing/exporters/otel.mdx +21 -2
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +107 -0
- package/.docs/raw/observability/tracing/overview.mdx +3 -2
- package/.docs/raw/rag/chunking-and-embedding.mdx +16 -17
- package/.docs/raw/rag/overview.mdx +3 -2
- package/.docs/raw/rag/retrieval.mdx +20 -32
- package/.docs/raw/reference/agents/agent.mdx +7 -10
- package/.docs/raw/reference/agents/generateLegacy.mdx +2 -2
- package/.docs/raw/reference/agents/getLLM.mdx +1 -1
- package/.docs/raw/reference/agents/network.mdx +2 -3
- package/.docs/raw/reference/cli/mastra.mdx +2 -1
- package/.docs/raw/reference/client-js/agents.mdx +3 -3
- package/.docs/raw/reference/core/getLogger.mdx +1 -1
- package/.docs/raw/reference/core/listLogs.mdx +1 -1
- package/.docs/raw/reference/core/listLogsByRunId.mdx +1 -1
- package/.docs/raw/reference/core/mastra-model-gateway.mdx +5 -19
- package/.docs/raw/reference/core/setLogger.mdx +1 -1
- package/.docs/raw/reference/core/setTelemetry.mdx +1 -1
- package/.docs/raw/reference/evals/answer-relevancy.mdx +28 -98
- package/.docs/raw/reference/evals/answer-similarity.mdx +12 -258
- package/.docs/raw/reference/evals/bias.mdx +29 -87
- package/.docs/raw/reference/evals/completeness.mdx +31 -90
- package/.docs/raw/reference/evals/content-similarity.mdx +28 -88
- package/.docs/raw/reference/evals/context-precision.mdx +28 -130
- package/.docs/raw/reference/evals/context-relevance.mdx +11 -11
- package/.docs/raw/reference/evals/faithfulness.mdx +28 -101
- package/.docs/raw/reference/evals/hallucination.mdx +28 -103
- package/.docs/raw/reference/evals/keyword-coverage.mdx +28 -107
- package/.docs/raw/reference/evals/noise-sensitivity.mdx +11 -11
- package/.docs/raw/reference/evals/prompt-alignment.mdx +15 -15
- package/.docs/raw/reference/evals/textual-difference.mdx +27 -100
- package/.docs/raw/reference/evals/tone-consistency.mdx +25 -98
- package/.docs/raw/reference/evals/tool-call-accuracy.mdx +7 -7
- package/.docs/raw/reference/evals/toxicity.mdx +29 -92
- package/.docs/raw/reference/memory/memory-class.mdx +5 -7
- package/.docs/raw/reference/observability/tracing/exporters/posthog.mdx +132 -0
- package/.docs/raw/reference/processors/batch-parts-processor.mdx +1 -1
- package/.docs/raw/reference/processors/language-detector.mdx +1 -1
- package/.docs/raw/reference/processors/moderation-processor.mdx +2 -2
- package/.docs/raw/reference/processors/pii-detector.mdx +2 -2
- package/.docs/raw/reference/processors/prompt-injection-detector.mdx +1 -1
- package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -3
- package/.docs/raw/reference/processors/token-limiter-processor.mdx +2 -2
- package/.docs/raw/reference/processors/unicode-normalizer.mdx +1 -1
- package/.docs/raw/reference/rag/embeddings.mdx +5 -5
- package/.docs/raw/reference/rag/rerank.mdx +1 -2
- package/.docs/raw/reference/rag/rerankWithScorer.mdx +0 -1
- package/.docs/raw/reference/streaming/agents/stream.mdx +8 -1
- package/.docs/raw/reference/templates/overview.mdx +1 -4
- package/.docs/raw/reference/tools/client.mdx +1 -2
- package/.docs/raw/reference/tools/create-tool.mdx +132 -0
- package/.docs/raw/reference/tools/graph-rag-tool.mdx +5 -5
- package/.docs/raw/reference/tools/mcp-client.mdx +2 -4
- package/.docs/raw/reference/tools/mcp-server.mdx +1 -2
- package/.docs/raw/reference/tools/vector-query-tool.mdx +14 -15
- package/.docs/raw/reference/vectors/chroma.mdx +81 -1
- package/.docs/raw/reference/vectors/couchbase.mdx +24 -17
- package/.docs/raw/reference/vectors/lance.mdx +38 -22
- package/.docs/raw/reference/vectors/libsql.mdx +35 -2
- package/.docs/raw/reference/vectors/mongodb.mdx +35 -2
- package/.docs/raw/reference/vectors/opensearch.mdx +37 -16
- package/.docs/raw/reference/vectors/pg.mdx +43 -36
- package/.docs/raw/reference/vectors/pinecone.mdx +48 -1
- package/.docs/raw/reference/vectors/qdrant.mdx +36 -1
- package/.docs/raw/reference/vectors/turbopuffer.mdx +74 -0
- package/.docs/raw/reference/voice/openai-realtime.mdx +2 -2
- package/.docs/raw/reference/voice/voice.addInstructions.mdx +2 -3
- package/.docs/raw/reference/voice/voice.addTools.mdx +1 -1
- package/.docs/raw/reference/voice/voice.answer.mdx +1 -1
- package/.docs/raw/reference/voice/voice.close.mdx +1 -1
- package/.docs/raw/reference/voice/voice.connect.mdx +1 -1
- package/.docs/raw/reference/voice/voice.off.mdx +1 -1
- package/.docs/raw/reference/voice/voice.on.mdx +1 -1
- package/.docs/raw/reference/voice/voice.send.mdx +1 -1
- package/.docs/raw/reference/voice/voice.updateConfig.mdx +1 -1
- package/.docs/raw/server-db/mastra-client.mdx +1 -2
- package/.docs/raw/streaming/overview.mdx +20 -9
- package/.docs/raw/streaming/tool-streaming.mdx +47 -4
- package/.docs/raw/tools-mcp/advanced-usage.mdx +1 -2
- package/.docs/raw/tools-mcp/mcp-overview.mdx +3 -5
- package/.docs/raw/voice/overview.mdx +21 -41
- package/.docs/raw/voice/speech-to-speech.mdx +4 -4
- package/.docs/raw/voice/speech-to-text.mdx +1 -2
- package/.docs/raw/voice/text-to-speech.mdx +1 -2
- package/.docs/raw/workflows/control-flow.mdx +180 -0
- package/CHANGELOG.md +10 -0
- package/dist/{chunk-5NJC7NRO.js → chunk-4CM2BQNP.js} +24 -4
- package/dist/prepare-docs/package-changes.d.ts.map +1 -1
- package/dist/prepare-docs/prepare.js +1 -1
- package/dist/stdio.js +1 -1
- package/package.json +7 -7
- package/.docs/raw/frameworks/agentic-uis/cedar-os.mdx +0 -102
- package/.docs/raw/frameworks/agentic-uis/openrouter.mdx +0 -179
- package/.docs/raw/frameworks/web-frameworks/next-js.mdx +0 -379
- package/.docs/raw/getting-started/quickstart.mdx +0 -27
- package/.docs/raw/getting-started/templates.mdx +0 -73
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/assistant-ui.mdx +0 -0
- /package/.docs/raw/{frameworks/agentic-uis → guides/build-your-ui}/copilotkit.mdx +0 -0
|
@@ -30,16 +30,15 @@ After getting a response from the LLM, all new messages (user, assistant, and to
|
|
|
30
30
|
|
|
31
31
|
Semantic recall is enabled by default, so if you give your agent memory it will be included:
|
|
32
32
|
|
|
33
|
-
```typescript {
|
|
33
|
+
```typescript {8}
|
|
34
34
|
import { Agent } from "@mastra/core/agent";
|
|
35
35
|
import { Memory } from "@mastra/memory";
|
|
36
|
-
import { openai } from "@ai-sdk/openai";
|
|
37
36
|
|
|
38
37
|
const agent = new Agent({
|
|
39
38
|
id: "support-agent",
|
|
40
39
|
name: "SupportAgent",
|
|
41
40
|
instructions: "You are a helpful support agent.",
|
|
42
|
-
model: openai
|
|
41
|
+
model: "openai/gpt-5.1",
|
|
43
42
|
memory: new Memory(),
|
|
44
43
|
});
|
|
45
44
|
```
|
|
@@ -109,11 +108,12 @@ The simplest way is to use a `provider/model` string with autocomplete support:
|
|
|
109
108
|
```ts {7}
|
|
110
109
|
import { Memory } from "@mastra/memory";
|
|
111
110
|
import { Agent } from "@mastra/core/agent";
|
|
111
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
112
112
|
|
|
113
113
|
const agent = new Agent({
|
|
114
114
|
memory: new Memory({
|
|
115
115
|
// ... other memory options
|
|
116
|
-
embedder: "openai/text-embedding-3-small",
|
|
116
|
+
embedder: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
117
117
|
}),
|
|
118
118
|
});
|
|
119
119
|
```
|
|
@@ -129,15 +129,15 @@ The model router automatically handles API key detection from environment variab
|
|
|
129
129
|
|
|
130
130
|
You can also use AI SDK embedding models directly:
|
|
131
131
|
|
|
132
|
-
```ts {
|
|
132
|
+
```ts {2,7}
|
|
133
133
|
import { Memory } from "@mastra/memory";
|
|
134
134
|
import { Agent } from "@mastra/core/agent";
|
|
135
|
-
import {
|
|
135
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
136
136
|
|
|
137
137
|
const agent = new Agent({
|
|
138
138
|
memory: new Memory({
|
|
139
139
|
// ... other memory options
|
|
140
|
-
embedder:
|
|
140
|
+
embedder: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
141
141
|
}),
|
|
142
142
|
});
|
|
143
143
|
```
|
|
@@ -28,7 +28,6 @@ To add LibSQL memory to an agent use the `Memory` class and create a new `storag
|
|
|
28
28
|
```typescript title="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
|
|
29
29
|
import { Memory } from "@mastra/memory";
|
|
30
30
|
import { Agent } from "@mastra/core/agent";
|
|
31
|
-
import { openai } from "@ai-sdk/openai";
|
|
32
31
|
import { LibSQLStore } from "@mastra/libsql";
|
|
33
32
|
|
|
34
33
|
export const libsqlAgent = new Agent({
|
|
@@ -36,7 +35,7 @@ export const libsqlAgent = new Agent({
|
|
|
36
35
|
name: "LibSQL Agent",
|
|
37
36
|
instructions:
|
|
38
37
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
39
|
-
model: openai
|
|
38
|
+
model: "openai/gpt-5.1",
|
|
40
39
|
memory: new Memory({
|
|
41
40
|
storage: new LibSQLStore({
|
|
42
41
|
id: 'libsql-agent-storage',
|
|
@@ -64,7 +63,6 @@ Add the following to your agent:
|
|
|
64
63
|
```typescript title="src/mastra/agents/example-libsql-agent.ts" showLineNumbers copy
|
|
65
64
|
import { Memory } from "@mastra/memory";
|
|
66
65
|
import { Agent } from "@mastra/core/agent";
|
|
67
|
-
import { openai } from "@ai-sdk/openai";
|
|
68
66
|
import { LibSQLStore, LibSQLVector } from "@mastra/libsql";
|
|
69
67
|
import { fastembed } from "@mastra/fastembed";
|
|
70
68
|
|
|
@@ -73,7 +71,7 @@ export const libsqlAgent = new Agent({
|
|
|
73
71
|
name: "LibSQL Agent",
|
|
74
72
|
instructions:
|
|
75
73
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
76
|
-
model: openai
|
|
74
|
+
model: "openai/gpt-5.1",
|
|
77
75
|
memory: new Memory({
|
|
78
76
|
storage: new LibSQLStore({
|
|
79
77
|
id: 'libsql-agent-storage',
|
|
@@ -30,7 +30,6 @@ To add MongoDB memory to an agent use the `Memory` class and create a new `stora
|
|
|
30
30
|
```typescript title="src/mastra/agents/example-mongodb-agent.ts" showLineNumbers copy
|
|
31
31
|
import { Memory } from "@mastra/memory";
|
|
32
32
|
import { Agent } from "@mastra/core/agent";
|
|
33
|
-
import { openai } from "@ai-sdk/openai";
|
|
34
33
|
import { MongoDBStore } from "@mastra/mongodb";
|
|
35
34
|
|
|
36
35
|
export const mongodbAgent = new Agent({
|
|
@@ -38,7 +37,7 @@ export const mongodbAgent = new Agent({
|
|
|
38
37
|
name: "mongodb-agent",
|
|
39
38
|
instructions:
|
|
40
39
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
41
|
-
model: openai
|
|
40
|
+
model: "openai/gpt-5.1",
|
|
42
41
|
memory: new Memory({
|
|
43
42
|
storage: new MongoDBStore({
|
|
44
43
|
url: process.env.MONGODB_URI!,
|
|
@@ -71,7 +70,6 @@ Add the following to your agent:
|
|
|
71
70
|
```typescript title="src/mastra/agents/example-mongodb-agent.ts" showLineNumbers copy
|
|
72
71
|
import { Memory } from "@mastra/memory";
|
|
73
72
|
import { Agent } from "@mastra/core/agent";
|
|
74
|
-
import { openai } from "@ai-sdk/openai";
|
|
75
73
|
import { MongoDBStore, MongoDBVector } from "@mastra/mongodb";
|
|
76
74
|
import { fastembed } from "@mastra/fastembed";
|
|
77
75
|
|
|
@@ -80,7 +78,7 @@ export const mongodbAgent = new Agent({
|
|
|
80
78
|
name: "mongodb-agent",
|
|
81
79
|
instructions:
|
|
82
80
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
83
|
-
model: openai
|
|
81
|
+
model: "openai/gpt-5.1",
|
|
84
82
|
memory: new Memory({
|
|
85
83
|
storage: new MongoDBStore({
|
|
86
84
|
url: process.env.MONGODB_URI!,
|
|
@@ -29,7 +29,6 @@ To add PostgreSQL memory to an agent use the `Memory` class and create a new `st
|
|
|
29
29
|
```typescript title="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
|
|
30
30
|
import { Memory } from "@mastra/memory";
|
|
31
31
|
import { Agent } from "@mastra/core/agent";
|
|
32
|
-
import { openai } from "@ai-sdk/openai";
|
|
33
32
|
import { PostgresStore } from "@mastra/pg";
|
|
34
33
|
|
|
35
34
|
export const pgAgent = new Agent({
|
|
@@ -37,7 +36,7 @@ export const pgAgent = new Agent({
|
|
|
37
36
|
name: "PG Agent",
|
|
38
37
|
instructions:
|
|
39
38
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
40
|
-
model: openai
|
|
39
|
+
model: "openai/gpt-5.1",
|
|
41
40
|
memory: new Memory({
|
|
42
41
|
storage: new PostgresStore({
|
|
43
42
|
id: 'pg-agent-storage',
|
|
@@ -65,7 +64,6 @@ Add the following to your agent:
|
|
|
65
64
|
```typescript title="src/mastra/agents/example-pg-agent.ts" showLineNumbers copy
|
|
66
65
|
import { Memory } from "@mastra/memory";
|
|
67
66
|
import { Agent } from "@mastra/core/agent";
|
|
68
|
-
import { openai } from "@ai-sdk/openai";
|
|
69
67
|
import { PostgresStore, PgVector } from "@mastra/pg";
|
|
70
68
|
import { fastembed } from "@mastra/fastembed";
|
|
71
69
|
|
|
@@ -74,7 +72,7 @@ export const pgAgent = new Agent({
|
|
|
74
72
|
name: "PG Agent",
|
|
75
73
|
instructions:
|
|
76
74
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
77
|
-
model: openai
|
|
75
|
+
model: "openai/gpt-5.1",
|
|
78
76
|
memory: new Memory({
|
|
79
77
|
storage: new PostgresStore({
|
|
80
78
|
id: 'pg-agent-storage',
|
|
@@ -34,7 +34,6 @@ To add Upstash memory to an agent use the `Memory` class and create a new `stora
|
|
|
34
34
|
```typescript title="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
|
|
35
35
|
import { Memory } from "@mastra/memory";
|
|
36
36
|
import { Agent } from "@mastra/core/agent";
|
|
37
|
-
import { openai } from "@ai-sdk/openai";
|
|
38
37
|
import { UpstashStore } from "@mastra/upstash";
|
|
39
38
|
|
|
40
39
|
export const upstashAgent = new Agent({
|
|
@@ -42,7 +41,7 @@ export const upstashAgent = new Agent({
|
|
|
42
41
|
name: "Upstash Agent",
|
|
43
42
|
instructions:
|
|
44
43
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
45
|
-
model: openai
|
|
44
|
+
model: "openai/gpt-5.1",
|
|
46
45
|
memory: new Memory({
|
|
47
46
|
storage: new UpstashStore({
|
|
48
47
|
id: 'upstash-agent-storage',
|
|
@@ -71,7 +70,6 @@ Add the following to your agent:
|
|
|
71
70
|
```typescript title="src/mastra/agents/example-upstash-agent.ts" showLineNumbers copy
|
|
72
71
|
import { Memory } from "@mastra/memory";
|
|
73
72
|
import { Agent } from "@mastra/core/agent";
|
|
74
|
-
import { openai } from "@ai-sdk/openai";
|
|
75
73
|
import { UpstashStore, UpstashVector } from "@mastra/upstash";
|
|
76
74
|
import { fastembed } from "@mastra/fastembed";
|
|
77
75
|
|
|
@@ -80,7 +78,7 @@ export const upstashAgent = new Agent({
|
|
|
80
78
|
name: "Upstash Agent",
|
|
81
79
|
instructions:
|
|
82
80
|
"You are an AI agent with the ability to automatically recall memories from previous interactions.",
|
|
83
|
-
model: openai
|
|
81
|
+
model: "openai/gpt-5.1",
|
|
84
82
|
memory: new Memory({
|
|
85
83
|
storage: new UpstashStore({
|
|
86
84
|
id: 'upstash-agent-storage',
|
|
@@ -56,7 +56,7 @@ export const testAgent = new Agent({
|
|
|
56
56
|
options: {
|
|
57
57
|
threads: {
|
|
58
58
|
generateTitle: {
|
|
59
|
-
model: openai
|
|
59
|
+
model: "openai/gpt-4.1-nano",
|
|
60
60
|
instructions:
|
|
61
61
|
"Generate a concise title based on the user's first message",
|
|
62
62
|
},
|
|
@@ -80,8 +80,8 @@ export const testAgent = new Agent({
|
|
|
80
80
|
model: ({ requestContext }) => {
|
|
81
81
|
const userTier = requestContext.get("userTier");
|
|
82
82
|
return userTier === "premium"
|
|
83
|
-
? openai
|
|
84
|
-
: openai
|
|
83
|
+
? "openai/gpt-5.1"
|
|
84
|
+
: "openai/gpt-4.1-nano";
|
|
85
85
|
},
|
|
86
86
|
instructions: ({ requestContext }) => {
|
|
87
87
|
const language = requestContext.get("userLanguage") || "English";
|
|
@@ -24,17 +24,16 @@ Working memory can persist at two different scopes:
|
|
|
24
24
|
|
|
25
25
|
Here's a minimal example of setting up an agent with working memory:
|
|
26
26
|
|
|
27
|
-
```typescript {
|
|
27
|
+
```typescript {11-14}
|
|
28
28
|
import { Agent } from "@mastra/core/agent";
|
|
29
29
|
import { Memory } from "@mastra/memory";
|
|
30
|
-
import { openai } from "@ai-sdk/openai";
|
|
31
30
|
|
|
32
31
|
// Create agent with working memory enabled
|
|
33
32
|
const agent = new Agent({
|
|
34
33
|
id: "personal-assistant",
|
|
35
34
|
name: "PersonalAssistant",
|
|
36
35
|
instructions: "You are a helpful personal assistant.",
|
|
37
|
-
model: openai
|
|
36
|
+
model: "openai/gpt-5.1",
|
|
38
37
|
memory: new Memory({
|
|
39
38
|
options: {
|
|
40
39
|
workingMemory: {
|
|
@@ -378,6 +377,6 @@ await memory.updateWorkingMemory({
|
|
|
378
377
|
|
|
379
378
|
## Examples
|
|
380
379
|
|
|
381
|
-
- [Working memory with template](/
|
|
382
|
-
- [Working memory with schema](/
|
|
380
|
+
- [Working memory with template](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-template)
|
|
381
|
+
- [Working memory with schema](https://github.com/mastra-ai/mastra/tree/main/examples/memory-with-schema)
|
|
383
382
|
- [Per-resource working memory](https://github.com/mastra-ai/mastra/tree/main/examples/memory-per-resource-example) - Complete example showing resource-scoped memory persistence
|
|
@@ -11,7 +11,7 @@ When deploying to Mastra Cloud, logs are shown on the [Logs](/docs/v1/deployment
|
|
|
11
11
|
|
|
12
12
|
## Configuring logs with PinoLogger
|
|
13
13
|
|
|
14
|
-
When [initializing a new Mastra project](/guides/v1/
|
|
14
|
+
When [initializing a new Mastra project](/guides/v1/getting-started/quickstart) using the CLI, `PinoLogger` is included by default.
|
|
15
15
|
|
|
16
16
|
```typescript title="src/mastra/index.ts" showLineNumbers copy
|
|
17
17
|
import { Mastra } from "@mastra/core/mastra";
|
|
@@ -43,11 +43,11 @@ export const mastra = new Mastra({
|
|
|
43
43
|
|
|
44
44
|
With this basic setup, you will see Traces and Logs in both Studio and in Mastra Cloud.
|
|
45
45
|
|
|
46
|
-
We also support various external tracing providers like Langfuse, Braintrust, and any OpenTelemetry-compatible platform (Datadog, New Relic, SigNoz, etc.). See more about this in the [Tracing](/docs/v1/observability/tracing/overview) documentation.
|
|
46
|
+
We also support various external tracing providers like MLflow, Langfuse, Braintrust, and any OpenTelemetry-compatible platform (Datadog, New Relic, SigNoz, etc.). See more about this in the [Tracing](/docs/v1/observability/tracing/overview) documentation.
|
|
47
47
|
|
|
48
48
|
## What's Next?
|
|
49
49
|
|
|
50
50
|
- **[Set up Tracing](/docs/v1/observability/tracing/overview)**: Configure tracing for your application
|
|
51
|
-
- **[Configure Logging](/docs/v1/logging)**: Add structured logging
|
|
51
|
+
- **[Configure Logging](/docs/v1/observability/logging)**: Add structured logging
|
|
52
52
|
- **[View Examples](/examples/v1/observability/basic-ai-tracing)**: See observability in action
|
|
53
53
|
- **[API Reference](/reference/v1/observability/tracing/instances)**: Detailed configuration options
|
|
@@ -11,13 +11,13 @@ The OpenTelemetry exporter is currently **experimental**. APIs and configuration
|
|
|
11
11
|
|
|
12
12
|
:::
|
|
13
13
|
|
|
14
|
-
The OpenTelemetry (OTEL) exporter sends your traces to any OTEL-compatible observability platform using standardized [OpenTelemetry Semantic Conventions for GenAI](https://opentelemetry.io/docs/specs/semconv/gen-ai/). This ensures broad compatibility with platforms like Datadog, New Relic, SigNoz, Dash0, Traceloop, Laminar, and more.
|
|
14
|
+
The OpenTelemetry (OTEL) exporter sends your traces to any OTEL-compatible observability platform using standardized [OpenTelemetry Semantic Conventions for GenAI](https://opentelemetry.io/docs/specs/semconv/gen-ai/). This ensures broad compatibility with platforms like Datadog, New Relic, SigNoz, MLflow, Dash0, Traceloop, Laminar, and more.
|
|
15
15
|
|
|
16
16
|
## Installation
|
|
17
17
|
|
|
18
18
|
Each provider requires specific protocol packages. Install the base exporter plus the protocol package for your provider:
|
|
19
19
|
|
|
20
|
-
### For HTTP/Protobuf Providers (SigNoz, New Relic, Laminar)
|
|
20
|
+
### For HTTP/Protobuf Providers (SigNoz, New Relic, Laminar, MLflow)
|
|
21
21
|
|
|
22
22
|
```bash npm2yarn
|
|
23
23
|
npm install @mastra/otel-exporter@beta @opentelemetry/exporter-trace-otlp-proto
|
|
@@ -37,6 +37,25 @@ npm install @mastra/otel-exporter@beta @opentelemetry/exporter-trace-otlp-http
|
|
|
37
37
|
|
|
38
38
|
## Provider Configurations
|
|
39
39
|
|
|
40
|
+
|
|
41
|
+
### MLflow
|
|
42
|
+
|
|
43
|
+
[MLflow](https://mlflow.org/docs/latest/genai/tracing/integrations/listing/mastra) supports native Mastra tracing through its OTLP endpoint at `/v1/traces`. Use the `custom` provider with HTTP/Protobuf and include the experiment header so traces land in the correct MLflow experiment:
|
|
44
|
+
|
|
45
|
+
```typescript title="src/mastra/index.ts"
|
|
46
|
+
new OtelExporter({
|
|
47
|
+
provider: {
|
|
48
|
+
custom: {
|
|
49
|
+
endpoint: `${process.env.MLFLOW_TRACKING_URI}/v1/traces`,
|
|
50
|
+
protocol: "http/protobuf",
|
|
51
|
+
headers: {
|
|
52
|
+
"x-mlflow-experiment-id": process.env.MLFLOW_EXPERIMENT_ID,
|
|
53
|
+
},
|
|
54
|
+
},
|
|
55
|
+
},
|
|
56
|
+
})
|
|
57
|
+
```
|
|
58
|
+
|
|
40
59
|
### Dash0
|
|
41
60
|
|
|
42
61
|
[Dash0](https://www.dash0.com/) provides real-time observability with automatic insights.
|
|
@@ -0,0 +1,107 @@
|
|
|
1
|
+
---
|
|
2
|
+
title: "PostHog Exporter | Tracing | Observability"
|
|
3
|
+
description: "Send traces to PostHog for AI observability and analytics"
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# PostHog Exporter
|
|
7
|
+
|
|
8
|
+
[PostHog](https://posthog.com/) is an analytics platform with AI observability features for monitoring LLM applications. The PostHog exporter sends your traces to PostHog as structured events, providing insights into token usage, costs, latency, and conversation flows.
|
|
9
|
+
|
|
10
|
+
## Installation
|
|
11
|
+
|
|
12
|
+
```bash npm2yarn
|
|
13
|
+
npm install @mastra/posthog@beta
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
## Configuration
|
|
17
|
+
|
|
18
|
+
### Prerequisites
|
|
19
|
+
|
|
20
|
+
1. **PostHog Account**: Sign up at [posthog.com](https://posthog.com/)
|
|
21
|
+
2. **Project API Key**: Get your project API key from PostHog Settings → Project API Key
|
|
22
|
+
3. **Environment Variables**: Set your credentials
|
|
23
|
+
|
|
24
|
+
```bash title=".env"
|
|
25
|
+
POSTHOG_API_KEY=phc_xxxxxxxxxxxxxxxx
|
|
26
|
+
POSTHOG_HOST=https://us.i.posthog.com # Optional: EU region or self-hosted URL
|
|
27
|
+
```
|
|
28
|
+
|
|
29
|
+
### Basic Setup
|
|
30
|
+
|
|
31
|
+
```typescript title="src/mastra/index.ts"
|
|
32
|
+
import { Mastra } from "@mastra/core";
|
|
33
|
+
import { Observability } from "@mastra/observability";
|
|
34
|
+
import { PosthogExporter } from "@mastra/posthog";
|
|
35
|
+
|
|
36
|
+
export const mastra = new Mastra({
|
|
37
|
+
observability: new Observability({
|
|
38
|
+
configs: {
|
|
39
|
+
posthog: {
|
|
40
|
+
serviceName: "my-service",
|
|
41
|
+
exporters: [
|
|
42
|
+
new PosthogExporter({
|
|
43
|
+
apiKey: process.env.POSTHOG_API_KEY,
|
|
44
|
+
}),
|
|
45
|
+
],
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
}),
|
|
49
|
+
});
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
## Configuration Options
|
|
53
|
+
|
|
54
|
+
### Complete Configuration
|
|
55
|
+
|
|
56
|
+
```typescript
|
|
57
|
+
new PosthogExporter({
|
|
58
|
+
// Required credentials
|
|
59
|
+
apiKey: process.env.POSTHOG_API_KEY!,
|
|
60
|
+
|
|
61
|
+
// Optional settings
|
|
62
|
+
host: "https://us.i.posthog.com", // Default: US region
|
|
63
|
+
// or "https://eu.i.posthog.com" for EU region
|
|
64
|
+
// or your self-hosted URL
|
|
65
|
+
|
|
66
|
+
// Batching configuration
|
|
67
|
+
flushAt: 20, // Batch size (default: 20)
|
|
68
|
+
flushInterval: 10000, // Flush interval in ms (default: 10000)
|
|
69
|
+
serverless: false, // Serverless mode: flushAt=10, flushInterval=2000
|
|
70
|
+
|
|
71
|
+
// User identification
|
|
72
|
+
defaultDistinctId: "anonymous", // Fallback if no userId in metadata
|
|
73
|
+
|
|
74
|
+
// Privacy settings
|
|
75
|
+
enablePrivacyMode: false, // Excludes input/output from generation events
|
|
76
|
+
|
|
77
|
+
// Diagnostic logging
|
|
78
|
+
logLevel: "info", // debug | info | warn | error
|
|
79
|
+
});
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
### Serverless Mode
|
|
83
|
+
|
|
84
|
+
Optimized batching for serverless environments:
|
|
85
|
+
|
|
86
|
+
```typescript
|
|
87
|
+
new PosthogExporter({
|
|
88
|
+
apiKey: process.env.POSTHOG_API_KEY!,
|
|
89
|
+
serverless: true, // Configures smaller batches for faster flushing
|
|
90
|
+
});
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Privacy Mode
|
|
94
|
+
|
|
95
|
+
Exclude input/output data from generation events while preserving token metrics:
|
|
96
|
+
|
|
97
|
+
```typescript
|
|
98
|
+
new PosthogExporter({
|
|
99
|
+
apiKey: process.env.POSTHOG_API_KEY!,
|
|
100
|
+
enablePrivacyMode: true, // Removes $ai_input and $ai_output_choices
|
|
101
|
+
});
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
## Related
|
|
105
|
+
|
|
106
|
+
- [Tracing Overview](/docs/v1/observability/tracing/overview)
|
|
107
|
+
- [PostHog Documentation](https://posthog.com/docs)
|
|
@@ -97,7 +97,7 @@ In addition to the internal exporters, Mastra supports integration with popular
|
|
|
97
97
|
- **[Langfuse](/docs/v1/observability/tracing/exporters/langfuse)** - Sends traces to the Langfuse open-source LLM engineering platform
|
|
98
98
|
- **[LangSmith](/docs/v1/observability/tracing/exporters/langsmith)** - Pushes traces into LangSmith's observability and evaluation toolkit
|
|
99
99
|
- **[OpenTelemetry](/docs/v1/observability/tracing/exporters/otel)** - Deliver traces to any OpenTelemetry-compatible observability system
|
|
100
|
-
- Supports: Dash0, Laminar, New Relic, SigNoz, Traceloop, Zipkin, and others!
|
|
100
|
+
- Supports: Dash0, MLflow, Laminar, New Relic, SigNoz, Traceloop, Zipkin, and others!
|
|
101
101
|
|
|
102
102
|
## Sampling Strategies
|
|
103
103
|
|
|
@@ -619,7 +619,7 @@ console.log("Trace ID:", finalState.traceId);
|
|
|
619
619
|
Once you have a trace ID, you can:
|
|
620
620
|
|
|
621
621
|
1. **Look up traces in Studio**: Navigate to the traces view and search by ID
|
|
622
|
-
2. **Query traces in external platforms**: Use the ID in Langfuse, Braintrust, or your observability platform
|
|
622
|
+
2. **Query traces in external platforms**: Use the ID in Langfuse, Braintrust, MLflow, or your observability platform
|
|
623
623
|
3. **Correlate with logs**: Include the trace ID in your application logs for cross-referencing
|
|
624
624
|
4. **Share for debugging**: Provide trace IDs to support teams or developers for investigation
|
|
625
625
|
|
|
@@ -770,6 +770,7 @@ Mastra automatically creates spans for:
|
|
|
770
770
|
- [Arize](/reference/v1/observability/tracing/exporters/arize) - Arize Phoenix and Arize AX integration
|
|
771
771
|
- [Braintrust](/reference/v1/observability/tracing/exporters/braintrust) - Braintrust integration
|
|
772
772
|
- [Langfuse](/reference/v1/observability/tracing/exporters/langfuse) - Langfuse integration
|
|
773
|
+
- [MLflow](/reference/v1/observability/tracing/exporters/otel#mlflow) - MLflow OTLP endpoint setup
|
|
773
774
|
- [OpenTelemetry](/reference/v1/observability/tracing/exporters/otel) - OTEL-compatible platforms
|
|
774
775
|
|
|
775
776
|
### Processors
|
|
@@ -83,12 +83,8 @@ The simplest way is to use Mastra's model router with `provider/model` strings:
|
|
|
83
83
|
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
84
84
|
import { embedMany } from "ai";
|
|
85
85
|
|
|
86
|
-
const embeddingModel = new ModelRouterEmbeddingModel(
|
|
87
|
-
"openai/text-embedding-3-small",
|
|
88
|
-
);
|
|
89
|
-
|
|
90
86
|
const { embeddings } = await embedMany({
|
|
91
|
-
model:
|
|
87
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
92
88
|
values: chunks.map((chunk) => chunk.text),
|
|
93
89
|
});
|
|
94
90
|
```
|
|
@@ -105,11 +101,11 @@ The model router automatically handles API key detection from environment variab
|
|
|
105
101
|
You can also use AI SDK embedding models directly:
|
|
106
102
|
|
|
107
103
|
```ts showLineNumbers copy
|
|
108
|
-
import { openai } from "@ai-sdk/openai";
|
|
109
104
|
import { embedMany } from "ai";
|
|
105
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
110
106
|
|
|
111
107
|
const { embeddings } = await embedMany({
|
|
112
|
-
model:
|
|
108
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
113
109
|
values: chunks.map((chunk) => chunk.text),
|
|
114
110
|
});
|
|
115
111
|
```
|
|
@@ -129,19 +125,22 @@ Here are some supported models:
|
|
|
129
125
|
OpenAI (text-embedding-3 models):
|
|
130
126
|
|
|
131
127
|
```ts
|
|
128
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
129
|
+
|
|
132
130
|
const { embeddings } = await embedMany({
|
|
133
|
-
model:
|
|
131
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
132
|
+
options: {
|
|
134
133
|
dimensions: 256, // Only supported in text-embedding-3 and later
|
|
135
|
-
}
|
|
134
|
+
},
|
|
136
135
|
values: chunks.map((chunk) => chunk.text),
|
|
137
136
|
});
|
|
138
137
|
```
|
|
139
138
|
|
|
140
|
-
Google (text-embedding-
|
|
139
|
+
Google (text-embedding-001):
|
|
141
140
|
|
|
142
141
|
```ts
|
|
143
142
|
const { embeddings } = await embedMany({
|
|
144
|
-
model: google
|
|
143
|
+
model: "google/gemini-embedding-001", {
|
|
145
144
|
outputDimensionality: 256, // Truncates excessive values from the end
|
|
146
145
|
}),
|
|
147
146
|
values: chunks.map((chunk) => chunk.text),
|
|
@@ -158,8 +157,6 @@ Here's an example showing document processing and embedding generation with both
|
|
|
158
157
|
|
|
159
158
|
```ts showLineNumbers copy
|
|
160
159
|
import { embedMany } from "ai";
|
|
161
|
-
import { openai } from "@ai-sdk/openai";
|
|
162
|
-
import { cohere } from "@ai-sdk/cohere";
|
|
163
160
|
|
|
164
161
|
import { MDocument } from "@mastra/rag";
|
|
165
162
|
|
|
@@ -177,16 +174,18 @@ const chunks = await doc.chunk({
|
|
|
177
174
|
});
|
|
178
175
|
|
|
179
176
|
// Generate embeddings with OpenAI
|
|
180
|
-
|
|
181
|
-
|
|
177
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
178
|
+
|
|
179
|
+
const { embeddings } = await embedMany({
|
|
180
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small"),
|
|
182
181
|
values: chunks.map((chunk) => chunk.text),
|
|
183
182
|
});
|
|
184
183
|
|
|
185
184
|
// OR
|
|
186
185
|
|
|
187
186
|
// Generate embeddings with Cohere
|
|
188
|
-
const { embeddings
|
|
189
|
-
model: cohere
|
|
187
|
+
const { embeddings } = await embedMany({
|
|
188
|
+
model: "cohere/embed-english-v3.0",
|
|
190
189
|
values: chunks.map((chunk) => chunk.text),
|
|
191
190
|
});
|
|
192
191
|
|
|
@@ -20,7 +20,6 @@ To implement RAG, you process your documents into chunks, create embeddings, sto
|
|
|
20
20
|
|
|
21
21
|
```ts showLineNumbers copy
|
|
22
22
|
import { embedMany } from "ai";
|
|
23
|
-
import { openai } from "@ai-sdk/openai";
|
|
24
23
|
import { PgVector } from "@mastra/pg";
|
|
25
24
|
import { MDocument } from "@mastra/rag";
|
|
26
25
|
import { z } from "zod";
|
|
@@ -36,9 +35,11 @@ const chunks = await doc.chunk({
|
|
|
36
35
|
});
|
|
37
36
|
|
|
38
37
|
// 3. Generate embeddings; we need to pass the text of each chunk
|
|
38
|
+
import { ModelRouterEmbeddingModel } from "@mastra/core/llm";
|
|
39
|
+
|
|
39
40
|
const { embeddings } = await embedMany({
|
|
40
41
|
values: chunks.map((chunk) => chunk.text),
|
|
41
|
-
model:
|
|
42
|
+
model: new ModelRouterEmbeddingModel("openai/text-embedding-3-small")
|
|
42
43
|
});
|
|
43
44
|
|
|
44
45
|
// 4. Store in vector database
|