@mastra/mcp-docs-server 1.0.0-beta.5 → 1.0.0-beta.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.docs/organized/changelogs/%40mastra%2Fagent-builder.md +9 -9
- package/.docs/organized/changelogs/%40mastra%2Fai-sdk.md +67 -67
- package/.docs/organized/changelogs/%40mastra%2Fastra.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fchroma.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fclickhouse.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Fclient-js.md +110 -110
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare-d1.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Fcloudflare.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Fcodemod.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fconvex.md +60 -0
- package/.docs/organized/changelogs/%40mastra%2Fcore.md +358 -358
- package/.docs/organized/changelogs/%40mastra%2Fcouchbase.md +11 -11
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-cloud.md +24 -24
- package/.docs/organized/changelogs/%40mastra%2Fdeployer-netlify.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fdeployer.md +79 -79
- package/.docs/organized/changelogs/%40mastra%2Fduckdb.md +42 -0
- package/.docs/organized/changelogs/%40mastra%2Fdynamodb.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Felasticsearch.md +61 -0
- package/.docs/organized/changelogs/%40mastra%2Fevals.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Flance.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Flibsql.md +55 -55
- package/.docs/organized/changelogs/%40mastra%2Floggers.md +12 -12
- package/.docs/organized/changelogs/%40mastra%2Fmcp-docs-server.md +17 -17
- package/.docs/organized/changelogs/%40mastra%2Fmcp.md +125 -125
- package/.docs/organized/changelogs/%40mastra%2Fmemory.md +36 -36
- package/.docs/organized/changelogs/%40mastra%2Fmongodb.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Fmssql.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Fopensearch.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fpg.md +59 -59
- package/.docs/organized/changelogs/%40mastra%2Fpinecone.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fplayground-ui.md +77 -77
- package/.docs/organized/changelogs/%40mastra%2Fqdrant.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Frag.md +43 -43
- package/.docs/organized/changelogs/%40mastra%2Freact.md +16 -0
- package/.docs/organized/changelogs/%40mastra%2Fs3vectors.md +9 -0
- package/.docs/organized/changelogs/%40mastra%2Fschema-compat.md +6 -0
- package/.docs/organized/changelogs/%40mastra%2Fserver.md +113 -113
- package/.docs/organized/changelogs/%40mastra%2Fturbopuffer.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fupstash.md +57 -57
- package/.docs/organized/changelogs/%40mastra%2Fvectorize.md +10 -10
- package/.docs/organized/changelogs/%40mastra%2Fvoice-google.md +19 -19
- package/.docs/organized/changelogs/create-mastra.md +15 -15
- package/.docs/organized/changelogs/mastra.md +30 -30
- package/.docs/organized/code-examples/agui.md +1 -0
- package/.docs/organized/code-examples/ai-elements.md +1 -1
- package/.docs/organized/code-examples/ai-sdk-useChat.md +1 -1
- package/.docs/organized/code-examples/ai-sdk-v5.md +2 -1
- package/.docs/organized/code-examples/assistant-ui.md +1 -1
- package/.docs/organized/code-examples/bird-checker-with-nextjs-and-eval.md +1 -1
- package/.docs/organized/code-examples/bird-checker-with-nextjs.md +1 -1
- package/.docs/organized/code-examples/crypto-chatbot.md +1 -1
- package/.docs/organized/code-examples/mcp-server-adapters.md +721 -0
- package/.docs/organized/code-examples/server-app-access.md +342 -0
- package/.docs/organized/code-examples/server-express-adapter.md +87 -0
- package/.docs/organized/code-examples/server-hono-adapter.md +85 -0
- package/.docs/raw/agents/agent-approval.mdx +189 -0
- package/.docs/raw/agents/guardrails.mdx +13 -9
- package/.docs/raw/agents/networks.mdx +1 -0
- package/.docs/raw/agents/overview.mdx +8 -152
- package/.docs/raw/agents/processors.mdx +279 -0
- package/.docs/raw/agents/structured-output.mdx +224 -0
- package/.docs/raw/deployment/cloud-providers/index.mdx +19 -26
- package/.docs/raw/deployment/cloud-providers/netlify-deployer.mdx +44 -13
- package/.docs/raw/evals/running-in-ci.mdx +0 -2
- package/.docs/raw/{guides/getting-started → getting-started}/manual-install.mdx +2 -2
- package/.docs/raw/getting-started/start.mdx +1 -1
- package/.docs/raw/guides/build-your-ui/ai-sdk-ui.mdx +8 -0
- package/.docs/raw/guides/getting-started/quickstart.mdx +1 -1
- package/.docs/raw/guides/guide/whatsapp-chat-bot.mdx +421 -0
- package/.docs/raw/guides/index.mdx +3 -35
- package/.docs/raw/guides/migrations/upgrade-to-v1/agent.mdx +11 -0
- package/.docs/raw/guides/migrations/upgrade-to-v1/workflows.mdx +37 -0
- package/.docs/raw/index.mdx +1 -1
- package/.docs/raw/memory/memory-processors.mdx +265 -79
- package/.docs/raw/memory/working-memory.mdx +11 -2
- package/.docs/raw/observability/overview.mdx +0 -1
- package/.docs/raw/observability/tracing/bridges/otel.mdx +200 -0
- package/.docs/raw/observability/tracing/exporters/arize.mdx +36 -0
- package/.docs/raw/observability/tracing/exporters/braintrust.mdx +19 -0
- package/.docs/raw/observability/tracing/exporters/langfuse.mdx +83 -0
- package/.docs/raw/observability/tracing/exporters/langsmith.mdx +12 -0
- package/.docs/raw/observability/tracing/exporters/otel.mdx +34 -22
- package/.docs/raw/observability/tracing/exporters/posthog.mdx +20 -0
- package/.docs/raw/observability/tracing/overview.mdx +76 -6
- package/.docs/raw/observability/tracing/processors/sensitive-data-filter.mdx +0 -1
- package/.docs/raw/rag/retrieval.mdx +23 -6
- package/.docs/raw/rag/vector-databases.mdx +93 -2
- package/.docs/raw/reference/agents/generate.mdx +55 -6
- package/.docs/raw/reference/agents/network.mdx +44 -0
- package/.docs/raw/reference/client-js/memory.mdx +43 -0
- package/.docs/raw/reference/client-js/workflows.mdx +92 -63
- package/.docs/raw/reference/deployer/netlify.mdx +1 -2
- package/.docs/raw/reference/evals/scorer-utils.mdx +362 -0
- package/.docs/raw/reference/index.mdx +1 -0
- package/.docs/raw/reference/observability/tracing/bridges/otel.mdx +177 -0
- package/.docs/raw/reference/observability/tracing/configuration.mdx +0 -4
- package/.docs/raw/reference/observability/tracing/exporters/arize.mdx +29 -0
- package/.docs/raw/reference/observability/tracing/exporters/langfuse.mdx +43 -0
- package/.docs/raw/reference/observability/tracing/exporters/langsmith.mdx +17 -1
- package/.docs/raw/reference/observability/tracing/exporters/otel.mdx +33 -43
- package/.docs/raw/reference/observability/tracing/instances.mdx +0 -4
- package/.docs/raw/reference/observability/tracing/interfaces.mdx +29 -4
- package/.docs/raw/reference/observability/tracing/spans.mdx +0 -4
- package/.docs/raw/reference/processors/language-detector.mdx +9 -2
- package/.docs/raw/reference/processors/message-history-processor.mdx +131 -0
- package/.docs/raw/reference/processors/moderation-processor.mdx +10 -3
- package/.docs/raw/reference/processors/pii-detector.mdx +10 -3
- package/.docs/raw/reference/processors/processor-interface.mdx +502 -0
- package/.docs/raw/reference/processors/prompt-injection-detector.mdx +9 -2
- package/.docs/raw/reference/processors/semantic-recall-processor.mdx +197 -0
- package/.docs/raw/reference/processors/system-prompt-scrubber.mdx +2 -2
- package/.docs/raw/reference/processors/tool-call-filter.mdx +125 -0
- package/.docs/raw/reference/processors/working-memory-processor.mdx +221 -0
- package/.docs/raw/reference/server/create-route.mdx +314 -0
- package/.docs/raw/reference/server/express-adapter.mdx +193 -0
- package/.docs/raw/reference/server/hono-adapter.mdx +174 -0
- package/.docs/raw/reference/server/mastra-server.mdx +316 -0
- package/.docs/raw/reference/server/routes.mdx +250 -0
- package/.docs/raw/reference/storage/cloudflare-d1.mdx +37 -0
- package/.docs/raw/reference/storage/convex.mdx +164 -0
- package/.docs/raw/reference/storage/lance.mdx +33 -0
- package/.docs/raw/reference/storage/libsql.mdx +37 -0
- package/.docs/raw/reference/storage/mongodb.mdx +39 -0
- package/.docs/raw/reference/storage/mssql.mdx +37 -0
- package/.docs/raw/reference/storage/postgresql.mdx +37 -0
- package/.docs/raw/reference/streaming/ChunkType.mdx +1 -1
- package/.docs/raw/reference/streaming/agents/stream.mdx +56 -1
- package/.docs/raw/reference/streaming/workflows/observeStream.mdx +7 -9
- package/.docs/raw/reference/streaming/workflows/{resumeStreamVNext.mdx → resumeStream.mdx} +51 -11
- package/.docs/raw/reference/streaming/workflows/stream.mdx +83 -24
- package/.docs/raw/reference/streaming/workflows/timeTravelStream.mdx +170 -0
- package/.docs/raw/reference/tools/mcp-client.mdx +128 -18
- package/.docs/raw/reference/vectors/convex.mdx +429 -0
- package/.docs/raw/reference/vectors/duckdb.mdx +462 -0
- package/.docs/raw/reference/vectors/elasticsearch.mdx +310 -0
- package/.docs/raw/reference/voice/google.mdx +159 -20
- package/.docs/raw/reference/workflows/run-methods/restart.mdx +142 -0
- package/.docs/raw/reference/workflows/run-methods/resume.mdx +44 -0
- package/.docs/raw/reference/workflows/run-methods/start.mdx +44 -0
- package/.docs/raw/reference/workflows/run-methods/timeTravel.mdx +310 -0
- package/.docs/raw/reference/workflows/run.mdx +27 -5
- package/.docs/raw/reference/workflows/step.mdx +13 -0
- package/.docs/raw/reference/workflows/workflow.mdx +19 -0
- package/.docs/raw/server-db/custom-adapters.mdx +380 -0
- package/.docs/raw/server-db/mastra-server.mdx +16 -8
- package/.docs/raw/server-db/request-context.mdx +0 -1
- package/.docs/raw/server-db/server-adapters.mdx +286 -0
- package/.docs/raw/server-db/storage.mdx +11 -0
- package/.docs/raw/streaming/overview.mdx +6 -6
- package/.docs/raw/streaming/tool-streaming.mdx +2 -2
- package/.docs/raw/streaming/workflow-streaming.mdx +5 -11
- package/.docs/raw/workflows/error-handling.mdx +1 -0
- package/.docs/raw/workflows/human-in-the-loop.mdx +4 -4
- package/.docs/raw/workflows/overview.mdx +56 -44
- package/.docs/raw/workflows/snapshots.mdx +1 -0
- package/.docs/raw/workflows/suspend-and-resume.mdx +85 -16
- package/.docs/raw/workflows/time-travel.mdx +313 -0
- package/.docs/raw/workflows/workflow-state.mdx +191 -0
- package/CHANGELOG.md +16 -0
- package/package.json +4 -4
- package/.docs/raw/agents/human-in-the-loop-with-tools.mdx +0 -91
- package/.docs/raw/reference/streaming/workflows/observeStreamVNext.mdx +0 -47
- package/.docs/raw/reference/streaming/workflows/streamVNext.mdx +0 -153
|
@@ -1,502 +1,502 @@
|
|
|
1
1
|
# @mastra/core
|
|
2
2
|
|
|
3
|
-
## 1.0.0-beta.
|
|
3
|
+
## 1.0.0-beta.7
|
|
4
4
|
|
|
5
|
-
###
|
|
5
|
+
### Minor Changes
|
|
6
6
|
|
|
7
|
-
- Add
|
|
7
|
+
- Add `disableInit` option to all storage adapters ([#10851](https://github.com/mastra-ai/mastra/pull/10851))
|
|
8
8
|
|
|
9
|
-
|
|
10
|
-
1. **Static deployments**: Provide deployment names from Azure Portal
|
|
11
|
-
2. **Dynamic discovery**: Query Azure Management API for available deployments
|
|
12
|
-
3. **Manual**: Specify deployment names when creating agents
|
|
13
|
-
|
|
14
|
-
## Usage
|
|
9
|
+
Adds a new `disableInit` config option to all storage providers that allows users to disable automatic table creation/migrations at runtime. This is useful for CI/CD pipelines where you want to run migrations during deployment with elevated credentials, then run the application with `disableInit: true` so it doesn't attempt schema changes at runtime.
|
|
15
10
|
|
|
16
11
|
```typescript
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
export const mastra = new Mastra({
|
|
22
|
-
gateways: [
|
|
23
|
-
new AzureOpenAIGateway({
|
|
24
|
-
resourceName: process.env.AZURE_RESOURCE_NAME!,
|
|
25
|
-
apiKey: process.env.AZURE_API_KEY!,
|
|
26
|
-
deployments: ['gpt-4-prod', 'gpt-35-turbo-dev'],
|
|
27
|
-
}),
|
|
28
|
-
],
|
|
12
|
+
// CI/CD script - run migrations
|
|
13
|
+
const storage = new PostgresStore({
|
|
14
|
+
connectionString: DATABASE_URL,
|
|
15
|
+
id: 'pg-storage',
|
|
29
16
|
});
|
|
17
|
+
await storage.init();
|
|
30
18
|
|
|
31
|
-
//
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
apiKey: process.env.AZURE_API_KEY!,
|
|
37
|
-
management: {
|
|
38
|
-
tenantId: process.env.AZURE_TENANT_ID!,
|
|
39
|
-
clientId: process.env.AZURE_CLIENT_ID!,
|
|
40
|
-
clientSecret: process.env.AZURE_CLIENT_SECRET!,
|
|
41
|
-
subscriptionId: process.env.AZURE_SUBSCRIPTION_ID!,
|
|
42
|
-
resourceGroup: 'my-resource-group',
|
|
43
|
-
},
|
|
44
|
-
}),
|
|
45
|
-
],
|
|
19
|
+
// Runtime - skip auto-init
|
|
20
|
+
const storage = new PostgresStore({
|
|
21
|
+
connectionString: DATABASE_URL,
|
|
22
|
+
id: 'pg-storage',
|
|
23
|
+
disableInit: true,
|
|
46
24
|
});
|
|
25
|
+
```
|
|
47
26
|
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
27
|
+
### Patch Changes
|
|
28
|
+
|
|
29
|
+
- Add time-to-first-token (TTFT) support for Langfuse integration ([#10781](https://github.com/mastra-ai/mastra/pull/10781))
|
|
30
|
+
|
|
31
|
+
Adds `completionStartTime` to model generation spans, which Langfuse uses to calculate TTFT metrics. The timestamp is automatically captured when the first content chunk arrives during streaming.
|
|
32
|
+
|
|
33
|
+
```typescript
|
|
34
|
+
// completionStartTime is now automatically captured and sent to Langfuse
|
|
35
|
+
// enabling TTFT metrics in your Langfuse dashboard
|
|
36
|
+
const result = await agent.stream('Hello');
|
|
53
37
|
```
|
|
54
38
|
|
|
55
|
-
-
|
|
56
|
-
-
|
|
39
|
+
- Updated OtelExporters, Bridge, and Arize packages to better implement GenAI v1.38.0 Otel Semantic Conventions. See: ([#10591](https://github.com/mastra-ai/mastra/pull/10591))
|
|
40
|
+
https://github.com/open-telemetry/semantic-conventions/blob/v1.38.0/docs/gen-ai/README.md
|
|
57
41
|
|
|
58
|
-
-
|
|
42
|
+
- Standardize error IDs across all storage and vector stores using centralized helper functions (`createStorageErrorId` and `createVectorErrorId`). This ensures consistent error ID patterns (`MASTRA_STORAGE_{STORE}_{OPERATION}_{STATUS}` and `MASTRA_VECTOR_{STORE}_{OPERATION}_{STATUS}`) across the codebase for better error tracking and debugging. ([#10913](https://github.com/mastra-ai/mastra/pull/10913))
|
|
59
43
|
|
|
60
|
-
|
|
44
|
+
- fix: generate unique text IDs for Anthropic/Google providers ([#10740](https://github.com/mastra-ai/mastra/pull/10740))
|
|
61
45
|
|
|
62
|
-
|
|
63
|
-
- `output`: The tool's return value (typed according to `outputSchema`)
|
|
64
|
-
- `toolCallId`: Unique identifier for the tool call
|
|
65
|
-
- `toolName`: The name of the tool that was executed
|
|
66
|
-
- `abortSignal`: Signal for detecting if the operation should be cancelled
|
|
46
|
+
Workaround for duplicate text-start/text-end IDs in multi-step agentic flows.
|
|
67
47
|
|
|
68
|
-
|
|
48
|
+
The `@ai-sdk/anthropic` and `@ai-sdk/google` providers use numeric indices ("0", "1", etc.) for text block IDs that reset for each LLM call. This caused duplicate IDs when an agent does TEXT → TOOL → TEXT, breaking message ordering and storage.
|
|
69
49
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
outputSchema: z.object({
|
|
78
|
-
temperature: z.number(),
|
|
79
|
-
conditions: z.string(),
|
|
80
|
-
}),
|
|
81
|
-
execute: async input => {
|
|
82
|
-
return { temperature: 72, conditions: 'sunny' };
|
|
83
|
-
},
|
|
84
|
-
onOutput: ({ output, toolCallId, toolName }) => {
|
|
85
|
-
console.log(`${toolName} completed:`, output);
|
|
86
|
-
// output is fully typed based on outputSchema
|
|
87
|
-
},
|
|
88
|
-
});
|
|
89
|
-
```
|
|
50
|
+
The fix replaces numeric IDs with UUIDs, maintaining a map per step so text-start, text-delta, and text-end chunks for the same block share the same UUID. OpenAI's UUIDs pass through unchanged.
|
|
51
|
+
|
|
52
|
+
Related: #9909
|
|
53
|
+
|
|
54
|
+
- Fix sub-agent requestContext propagation in listAgentTools ([#10844](https://github.com/mastra-ai/mastra/pull/10844))
|
|
55
|
+
|
|
56
|
+
Sub-agents with dynamic model configurations were broken because `requestContext` was not being passed to `getModel()` when creating agent tools. This caused sub-agents using function-based model configurations to receive an empty context instead of the parent's context.
|
|
90
57
|
|
|
91
|
-
|
|
92
|
-
1. `onInputStart` - Input streaming begins
|
|
93
|
-
2. `onInputDelta` - Input chunks arrive (called multiple times)
|
|
94
|
-
3. `onInputAvailable` - Complete input parsed and validated
|
|
95
|
-
4. Tool's `execute` function runs
|
|
96
|
-
5. `onOutput` - Tool completed successfully (NEW)
|
|
58
|
+
No code changes required for consumers - this fix restores expected behavior for dynamic model configurations in sub-agents.
|
|
97
59
|
|
|
98
|
-
-
|
|
60
|
+
- Fix ToolStream type error when piping streams with different types ([#10845](https://github.com/mastra-ai/mastra/pull/10845))
|
|
99
61
|
|
|
100
|
-
|
|
62
|
+
Changes `ToolStream` to extend `WritableStream<unknown>` instead of `WritableStream<T>`. This fixes the TypeScript error when piping `objectStream` or `fullStream` to `writer` in workflow steps.
|
|
101
63
|
|
|
102
|
-
|
|
64
|
+
Before:
|
|
103
65
|
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
- File part to experimental_attachments conversion in `mastraDBMessageToAIV4UIMessage`
|
|
109
|
-
- Added comprehensive tests for base64 images, data URIs, and HTTP URLs with threads
|
|
66
|
+
```typescript
|
|
67
|
+
// TypeError: ToolStream<ChunkType> is not assignable to WritableStream<Partial<StoryPlan>>
|
|
68
|
+
await response.objectStream.pipeTo(writer);
|
|
69
|
+
```
|
|
110
70
|
|
|
111
|
-
|
|
71
|
+
After:
|
|
112
72
|
|
|
113
|
-
|
|
73
|
+
```typescript
|
|
74
|
+
// Works without type errors
|
|
75
|
+
await response.objectStream.pipeTo(writer);
|
|
76
|
+
```
|
|
114
77
|
|
|
115
|
-
|
|
116
|
-
- Simple CoreMessage format: `{role, content, metadata}`
|
|
117
|
-
- Full UIMessage format: `{role, content, parts, metadata}`
|
|
118
|
-
- AI SDK v5 ModelMessage format with metadata
|
|
78
|
+
- feat: add native Perplexity provider support ([#10885](https://github.com/mastra-ai/mastra/pull/10885))
|
|
119
79
|
|
|
120
|
-
|
|
80
|
+
- When sending the first message to a new thread with PostgresStore, users would get a "Thread not found" error. This happened because the thread was created in memory but not persisted to the database before the MessageHistory output processor tried to save messages. ([#10881](https://github.com/mastra-ai/mastra/pull/10881))
|
|
121
81
|
|
|
122
|
-
|
|
82
|
+
**Before:**
|
|
123
83
|
|
|
124
|
-
|
|
84
|
+
```ts
|
|
85
|
+
threadObject = await memory.createThread({
|
|
86
|
+
// ...
|
|
87
|
+
saveThread: false, // thread not in DB yet
|
|
88
|
+
});
|
|
89
|
+
// Later: MessageHistory calls saveMessages() -> PostgresStore throws "Thread not found"
|
|
90
|
+
```
|
|
125
91
|
|
|
126
|
-
|
|
92
|
+
**After:**
|
|
127
93
|
|
|
128
|
-
|
|
94
|
+
```ts
|
|
95
|
+
threadObject = await memory.createThread({
|
|
96
|
+
// ...
|
|
97
|
+
saveThread: true, // thread persisted immediately
|
|
98
|
+
});
|
|
99
|
+
// MessageHistory can now save messages without error
|
|
100
|
+
```
|
|
129
101
|
|
|
130
|
-
-
|
|
102
|
+
- Emit error chunk and call onError when agent workflow step fails ([#10907](https://github.com/mastra-ai/mastra/pull/10907))
|
|
131
103
|
|
|
132
|
-
|
|
133
|
-
- OpenRouter (preventing activity tracking with `HTTP-Referer` and `X-Title`)
|
|
134
|
-
- Custom providers using custom URLs (headers not passed to `createOpenAICompatible`)
|
|
135
|
-
- Custom gateway implementations (headers not available in `resolveLanguageModel`)
|
|
104
|
+
When a workflow step fails (e.g., tool not found), the error is now properly emitted as an error chunk to the stream and the onError callback is called. This fixes the issue where agent.generate() would throw "promise 'text' was not resolved or rejected" instead of the actual error message.
|
|
136
105
|
|
|
137
|
-
|
|
138
|
-
- Base `MastraModelGateway` interface updated to accept headers
|
|
139
|
-
- `ModelRouterLanguageModel` passes headers from config to all gateways
|
|
140
|
-
- OpenRouter receives headers for activity tracking
|
|
141
|
-
- Custom URL providers receive headers via `createOpenAICompatible`
|
|
142
|
-
- Custom gateways can access headers in their `resolveLanguageModel` implementation
|
|
106
|
+
- fix(core): use agent description when converting agent to tool ([#10879](https://github.com/mastra-ai/mastra/pull/10879))
|
|
143
107
|
|
|
144
|
-
|
|
108
|
+
- Adds native @ai-sdk/deepseek provider support instead of using the OpenAI-compatible fallback. ([#10822](https://github.com/mastra-ai/mastra/pull/10822))
|
|
145
109
|
|
|
146
110
|
```typescript
|
|
147
|
-
// Works with OpenRouter
|
|
148
111
|
const agent = new Agent({
|
|
149
|
-
|
|
150
|
-
instructions: 'You are a helpful assistant.',
|
|
151
|
-
model: {
|
|
152
|
-
id: 'openrouter/anthropic/claude-3-5-sonnet',
|
|
153
|
-
headers: {
|
|
154
|
-
'HTTP-Referer': 'https://myapp.com',
|
|
155
|
-
'X-Title': 'My Application',
|
|
156
|
-
},
|
|
157
|
-
},
|
|
112
|
+
model: 'deepseek/deepseek-reasoner',
|
|
158
113
|
});
|
|
159
114
|
|
|
160
|
-
//
|
|
161
|
-
const
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
id: 'custom-provider/model',
|
|
166
|
-
url: 'https://api.custom.com/v1',
|
|
167
|
-
apiKey: 'key',
|
|
168
|
-
headers: {
|
|
169
|
-
'X-Custom-Header': 'custom-value',
|
|
115
|
+
// With provider options for reasoning
|
|
116
|
+
const response = await agent.generate('Solve this problem', {
|
|
117
|
+
providerOptions: {
|
|
118
|
+
deepseek: {
|
|
119
|
+
thinking: { type: 'enabled' },
|
|
170
120
|
},
|
|
171
121
|
},
|
|
172
122
|
});
|
|
173
123
|
```
|
|
174
124
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
- fix(agent): persist messages before tool suspension ([#10369](https://github.com/mastra-ai/mastra/pull/10369))
|
|
125
|
+
Also updates the doc generation scripts so DeepSeek provider options show up in the generated docs.
|
|
178
126
|
|
|
179
|
-
|
|
127
|
+
- Return state too if `includeState: true` is in `outputOptions` and workflow run is not successful ([#10806](https://github.com/mastra-ai/mastra/pull/10806))
|
|
180
128
|
|
|
181
|
-
|
|
182
|
-
- Add assistant messages to messageList immediately after LLM execution
|
|
183
|
-
- Flush messages synchronously before suspension to persist state
|
|
184
|
-
- Create thread if it doesn't exist before flushing
|
|
185
|
-
- Add metadata helpers to persist and remove tool approval state
|
|
186
|
-
- Pass saveQueueManager and memory context through workflow for immediate persistence
|
|
129
|
+
- feat: Add partial response support for agent and workflow list endpoints ([#10886](https://github.com/mastra-ai/mastra/pull/10886))
|
|
187
130
|
|
|
188
|
-
|
|
189
|
-
-
|
|
190
|
-
-
|
|
191
|
-
-
|
|
192
|
-
-
|
|
131
|
+
Add optional `partial` query parameter to `/api/agents` and `/api/workflows` endpoints to return minimal data without schemas, reducing payload size for list views:
|
|
132
|
+
- When `partial=true`: tool schemas (inputSchema, outputSchema) are omitted
|
|
133
|
+
- When `partial=true`: workflow steps are replaced with stepCount integer
|
|
134
|
+
- When `partial=true`: workflow root schemas (inputSchema, outputSchema) are omitted
|
|
135
|
+
- Maintains backward compatibility when partial parameter is not provided
|
|
193
136
|
|
|
194
|
-
|
|
195
|
-
- Handle tool calls awaiting approval in message initialization
|
|
196
|
-
- Convert approval metadata format when loading initial messages
|
|
137
|
+
## Server Endpoint Usage
|
|
197
138
|
|
|
198
|
-
|
|
139
|
+
```http
|
|
140
|
+
# Get partial agent data (no tool schemas)
|
|
141
|
+
GET /api/agents?partial=true
|
|
199
142
|
|
|
200
|
-
|
|
143
|
+
# Get full agent data (default behavior)
|
|
144
|
+
GET /api/agents
|
|
201
145
|
|
|
202
|
-
|
|
146
|
+
# Get partial workflow data (stepCount instead of steps, no schemas)
|
|
147
|
+
GET /api/workflows?partial=true
|
|
203
148
|
|
|
204
|
-
|
|
149
|
+
# Get full workflow data (default behavior)
|
|
150
|
+
GET /api/workflows
|
|
151
|
+
```
|
|
205
152
|
|
|
206
|
-
|
|
153
|
+
## Client SDK Usage
|
|
207
154
|
|
|
208
|
-
|
|
155
|
+
```typescript
|
|
156
|
+
import { MastraClient } from '@mastra/client-js';
|
|
209
157
|
|
|
210
|
-
|
|
158
|
+
const client = new MastraClient({ baseUrl: 'http://localhost:4111' });
|
|
211
159
|
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
by the model provider.
|
|
160
|
+
// Get partial agent list (smaller payload)
|
|
161
|
+
const partialAgents = await client.listAgents({ partial: true });
|
|
215
162
|
|
|
216
|
-
|
|
163
|
+
// Get full agent list with tool schemas
|
|
164
|
+
const fullAgents = await client.listAgents();
|
|
217
165
|
|
|
218
|
-
|
|
166
|
+
// Get partial workflow list (smaller payload)
|
|
167
|
+
const partialWorkflows = await client.listWorkflows({ partial: true });
|
|
219
168
|
|
|
220
|
-
|
|
221
|
-
|
|
169
|
+
// Get full workflow list with steps and schemas
|
|
170
|
+
const fullWorkflows = await client.listWorkflows();
|
|
222
171
|
```
|
|
223
172
|
|
|
224
|
-
|
|
173
|
+
- Fix processInputStep so it runs correctly. ([#10909](https://github.com/mastra-ai/mastra/pull/10909))
|
|
225
174
|
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
fs.writeFileSync(tempPath, content, 'utf-8');
|
|
229
|
-
fs.renameSync(tempPath, filePath); // atomic on POSIX
|
|
230
|
-
```
|
|
175
|
+
- Updated dependencies [[`6c59a40`](https://github.com/mastra-ai/mastra/commit/6c59a40e0ad160467bd13d63a8a287028d75b02d), [`3076c67`](https://github.com/mastra-ai/mastra/commit/3076c6778b18988ae7d5c4c5c466366974b2d63f), [`0bada2f`](https://github.com/mastra-ai/mastra/commit/0bada2f2c1234932cf30c1c47a719ffb64b801c5), [`cc60ff6`](https://github.com/mastra-ai/mastra/commit/cc60ff616541a3b0fb531a7e469bf9ae7bb90528)]:
|
|
176
|
+
- @mastra/observability@1.0.0-beta.3
|
|
231
177
|
|
|
232
|
-
|
|
178
|
+
## 1.0.0-beta.6
|
|
233
179
|
|
|
234
|
-
|
|
180
|
+
### Major Changes
|
|
235
181
|
|
|
236
|
-
-
|
|
237
|
-
- **Added bubbling logic in sub-agent execution**: When sub-agents execute, data chunks (chunks with type starting with `data-`) are detected and written via `writer.custom()` instead of `writer.write()`, ensuring they bubble up directly without being wrapped in `tool-output` chunks.
|
|
238
|
-
- **Added comprehensive tests**:
|
|
239
|
-
- Test for `writer.custom()` with direct tool execution
|
|
240
|
-
- Test for `writer.custom()` with sub-agent tools (nested execution)
|
|
241
|
-
- Test for mixed usage of `writer.write()` and `writer.custom()` in the same tool
|
|
182
|
+
- Changed `.branch()` result schema to make all branch output fields optional. ([#10693](https://github.com/mastra-ai/mastra/pull/10693))
|
|
242
183
|
|
|
243
|
-
|
|
184
|
+
**Breaking change**: Branch outputs are now optional since only one branch executes at runtime. Update your workflow schemas to handle optional branch results.
|
|
244
185
|
|
|
245
|
-
|
|
246
|
-
- Data chunks from tools always appear directly in the stream (not wrapped)
|
|
247
|
-
- Data chunks bubble up correctly through nested agent hierarchies
|
|
248
|
-
- Regular chunks continue to be wrapped in `tool-output` as expected
|
|
186
|
+
**Before:**
|
|
249
187
|
|
|
250
|
-
|
|
188
|
+
```typescript
|
|
189
|
+
const workflow = createWorkflow({...})
|
|
190
|
+
.branch([
|
|
191
|
+
[condition1, stepA], // outputSchema: { result: z.string() }
|
|
192
|
+
[condition2, stepB], // outputSchema: { data: z.number() }
|
|
193
|
+
])
|
|
194
|
+
.map({
|
|
195
|
+
finalResult: { step: stepA, path: 'result' } // Expected non-optional
|
|
196
|
+
});
|
|
197
|
+
```
|
|
251
198
|
|
|
252
|
-
|
|
199
|
+
**After:**
|
|
253
200
|
|
|
201
|
+
```typescript
|
|
202
|
+
const workflow = createWorkflow({...})
|
|
203
|
+
.branch([
|
|
204
|
+
[condition1, stepA],
|
|
205
|
+
[condition2, stepB],
|
|
206
|
+
])
|
|
207
|
+
.map({
|
|
208
|
+
finalResult: {
|
|
209
|
+
step: stepA,
|
|
210
|
+
path: 'result' // Now optional - provide fallback
|
|
211
|
+
}
|
|
212
|
+
});
|
|
254
213
|
```
|
|
255
|
-
workflowTool.execute({ inputData, initialState }, context)
|
|
256
214
|
|
|
257
|
-
|
|
258
|
-
```
|
|
215
|
+
**Why**: Branch conditionals execute only one path, so non-executed branches don't produce outputs. The type system now correctly reflects this runtime behavior.
|
|
259
216
|
|
|
260
|
-
|
|
217
|
+
Related issue: https://github.com/mastra-ai/mastra/issues/10642
|
|
261
218
|
|
|
262
|
-
|
|
263
|
-
const workflowResult = await workflowTool.execute({ inputData, initialState }, context)
|
|
219
|
+
### Minor Changes
|
|
264
220
|
|
|
265
|
-
|
|
266
|
-
console.log(workflowResult.result) // result of the workflow if success
|
|
267
|
-
```
|
|
221
|
+
- Memory system now uses processors. Memory processors (`MessageHistory`, `SemanticRecall`, `WorkingMemory`) are now exported from `@mastra/memory/processors` and automatically added to the agent pipeline based on your memory config. Core processors (`ToolCallFilter`, `TokenLimiter`) remain in `@mastra/core/processors`. ([#9254](https://github.com/mastra-ai/mastra/pull/9254))
|
|
268
222
|
|
|
269
|
-
|
|
223
|
+
- Add reserved keys in RequestContext for secure resourceId/threadId setting from middleware ([#10657](https://github.com/mastra-ai/mastra/pull/10657))
|
|
270
224
|
|
|
271
|
-
|
|
225
|
+
This allows middleware to securely set `resourceId` and `threadId` via reserved keys in RequestContext (`MASTRA_RESOURCE_ID_KEY` and `MASTRA_THREAD_ID_KEY`), which take precedence over client-provided values for security.
|
|
272
226
|
|
|
273
|
-
|
|
274
|
-
- **Automatic transformation**: Zod schemas are now automatically transformed for OpenAI strict mode compatibility when using OpenAI models (including reasoning models like o1, o3, o4)
|
|
275
|
-
- **Optional field handling**: `.optional()` fields are converted to `.nullable()` with a transform that converts `null` → `undefined`, preserving optional semantics while satisfying OpenAI's strict mode requirements
|
|
276
|
-
- **Preserves nullable fields**: Intentionally `.nullable()` fields remain unchanged
|
|
277
|
-
- **Deep transformation**: Handles `.optional()` fields at any nesting level (objects, arrays, unions, etc.)
|
|
278
|
-
- **JSON Schema objects**: Not transformed, only Zod schemas
|
|
227
|
+
- feat(workflows): add suspendData parameter to step execute function ([#10734](https://github.com/mastra-ai/mastra/pull/10734))
|
|
279
228
|
|
|
280
|
-
|
|
229
|
+
Adds a new `suspendData` parameter to workflow step execute functions that provides access to the data originally passed to `suspend()` when the step was suspended. This enables steps to access context about why they were suspended when they are later resumed.
|
|
230
|
+
|
|
231
|
+
**New Features:**
|
|
232
|
+
- `suspendData` parameter automatically populated in step execute function when resuming
|
|
233
|
+
- Type-safe access to suspend data matching the step's `suspendSchema`
|
|
234
|
+
- Backward compatible - existing workflows continue to work unchanged
|
|
235
|
+
|
|
236
|
+
**Example:**
|
|
281
237
|
|
|
282
238
|
```typescript
|
|
283
|
-
const
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
239
|
+
const step = createStep({
|
|
240
|
+
suspendSchema: z.object({ reason: z.string() }),
|
|
241
|
+
resumeSchema: z.object({ approved: z.boolean() }),
|
|
242
|
+
execute: async ({ suspend, suspendData, resumeData }) => {
|
|
243
|
+
if (!resumeData?.approved) {
|
|
244
|
+
return await suspend({ reason: 'Approval required' });
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
// Access original suspend data when resuming
|
|
248
|
+
console.log(`Resuming after: ${suspendData?.reason}`);
|
|
249
|
+
return { result: 'Approved' };
|
|
250
|
+
},
|
|
287
251
|
});
|
|
252
|
+
```
|
|
288
253
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
})
|
|
254
|
+
- feat(storage): support querying messages from multiple threads ([#10663](https://github.com/mastra-ai/mastra/pull/10663))
|
|
255
|
+
- Fixed TypeScript errors where `threadId: string | string[]` was being passed to places expecting `Scalar` type
|
|
256
|
+
- Added proper multi-thread support for `listMessages` across all adapters when `threadId` is an array
|
|
257
|
+
- Updated `_getIncludedMessages` to look up message threadId by ID (since message IDs are globally unique)
|
|
258
|
+
- **upstash**: Added `msg-idx:{messageId}` index for O(1) message lookups (backwards compatible with fallback to scan for old messages, with automatic backfill)
|
|
294
259
|
|
|
295
|
-
|
|
296
|
-
const result = await agent.generate('Extract: John, deleted yesterday', {
|
|
297
|
-
structuredOutput: { schema },
|
|
298
|
-
});
|
|
260
|
+
- Adds trace tagging support to the BrainTrust and Langfuse tracing exporters. ([#10765](https://github.com/mastra-ai/mastra/pull/10765))
|
|
299
261
|
|
|
300
|
-
|
|
301
|
-
```
|
|
262
|
+
- Add `messageList` parameter to `processOutputStream` for accessing remembered messages during streaming ([#10608](https://github.com/mastra-ai/mastra/pull/10608))
|
|
302
263
|
|
|
303
|
-
-
|
|
264
|
+
- Unify transformScoreRow functions across storage adapters ([#10648](https://github.com/mastra-ai/mastra/pull/10648))
|
|
304
265
|
|
|
305
|
-
|
|
266
|
+
Added a unified `transformScoreRow` function in `@mastra/core/storage` that provides schema-driven row transformation for score data. This eliminates code duplication across 10 storage adapters while maintaining store-specific behavior through configurable options:
|
|
267
|
+
- `preferredTimestampFields`: Preferred source fields for timestamps (PostgreSQL, Cloudflare D1)
|
|
268
|
+
- `convertTimestamps`: Convert timestamp strings to Date objects (MSSQL, MongoDB, ClickHouse)
|
|
269
|
+
- `nullValuePattern`: Skip values matching pattern (ClickHouse's `'_null_'`)
|
|
270
|
+
- `fieldMappings`: Map source column names to schema fields (LibSQL's `additionalLLMContext`)
|
|
306
271
|
|
|
307
|
-
|
|
308
|
-
- Enhanced step tracking in `AgentNetworkToAISDKTransformer` to properly maintain step state throughout execution lifecycle
|
|
309
|
-
- Steps are now identified by unique IDs and updated in place rather than creating duplicates
|
|
310
|
-
- Added proper iteration and task metadata to each step in the network execution flow
|
|
311
|
-
- Fixed agent, workflow, and tool execution events to correctly populate step data
|
|
312
|
-
- Updated network stream event types to include `networkId`, `workflowId`, and consistent `runId` tracking
|
|
313
|
-
- Added test coverage for network custom data chunks with comprehensive validation
|
|
272
|
+
Each store adapter now uses the unified function with appropriate options, reducing ~200 lines of duplicate transformation logic while ensuring consistent behavior across all storage backends.
|
|
314
273
|
|
|
315
|
-
|
|
274
|
+
### Patch Changes
|
|
316
275
|
|
|
317
|
-
-
|
|
276
|
+
- dependencies updates: ([#10110](https://github.com/mastra-ai/mastra/pull/10110))
|
|
277
|
+
- Updated dependency [`hono-openapi@^1.1.1` ↗︎](https://www.npmjs.com/package/hono-openapi/v/1.1.1) (from `^0.4.8`, in `dependencies`)
|
|
318
278
|
|
|
319
|
-
-
|
|
279
|
+
- unexpected json parse issue, log error but dont fail ([#10241](https://github.com/mastra-ai/mastra/pull/10241))
|
|
320
280
|
|
|
321
|
-
-
|
|
281
|
+
- Fixed a bug in agent networks where sometimes the task name was empty ([#10629](https://github.com/mastra-ai/mastra/pull/10629))
|
|
322
282
|
|
|
323
|
-
-
|
|
283
|
+
- Adds `tool-result` and `tool-error` chunks to the processor.processOutputStream path. Processors now have access to these two chunks. ([#10645](https://github.com/mastra-ai/mastra/pull/10645))
|
|
324
284
|
|
|
325
|
-
-
|
|
326
|
-
- @mastra/schema-compat@1.0.0-beta.1
|
|
285
|
+
- Include `.input` in workflow results for both engines and remove the option to omit them from Inngest workflows. ([#10688](https://github.com/mastra-ai/mastra/pull/10688))
|
|
327
286
|
|
|
328
|
-
|
|
287
|
+
- `getSpeakers` endpoint returns an empty array if voice is not configured on the agent and `getListeners` endpoint returns `{ enabled: false }` if voice is not figured on the agent. ([#10560](https://github.com/mastra-ai/mastra/pull/10560))
|
|
329
288
|
|
|
330
|
-
|
|
289
|
+
When no voice is set on agent don't throw error, by default set voice to undefined rather than DefaultVoice which throws errors when it is accessed.
|
|
331
290
|
|
|
332
|
-
-
|
|
333
|
-
- Improved provider metadata preservation across message transformations
|
|
334
|
-
- Optimized reasoning text storage to avoid duplication (using `details` instead of `reasoning` field)
|
|
335
|
-
- Fixed test snapshots for timestamp precision and metadata handling
|
|
291
|
+
- SimpleAuth and improved CloudAuth ([#10490](https://github.com/mastra-ai/mastra/pull/10490))
|
|
336
292
|
|
|
337
|
-
-
|
|
293
|
+
- When LLMs like Claude Sonnet 4.5 and Gemini 2.4 call tools with all-optional parameters, they send `args: undefined` instead of `args: {}`. This caused validation to fail with "root: Required". ([#10728](https://github.com/mastra-ai/mastra/pull/10728))
|
|
338
294
|
|
|
339
|
-
|
|
295
|
+
The fix normalizes `undefined`/`null` to `{}` for object schemas and `[]` for array schemas before validation.
|
|
340
296
|
|
|
341
|
-
-
|
|
297
|
+
- Fixed tool validation error messages so logs show Zod validation errors directly instead of hiding them inside structured JSON. ([#10579](https://github.com/mastra-ai/mastra/pull/10579))
|
|
342
298
|
|
|
343
|
-
-
|
|
299
|
+
- Fix error when spreading config objects in Mastra constructor ([#10718](https://github.com/mastra-ai/mastra/pull/10718))
|
|
344
300
|
|
|
345
|
-
|
|
301
|
+
Adds validation guards to handle undefined/null values that can occur when config objects are spread (`{ ...config }`). Previously, if getters or non-enumerable properties resulted in undefined values during spread, the constructor would throw cryptic errors when accessing `.id` or `.name` on undefined objects.
|
|
346
302
|
|
|
347
|
-
|
|
303
|
+
- Fix GPT-5/o3 reasoning models failing with "required reasoning item" errors when using memory with tools. Empty reasoning is now stored with providerMetadata to preserve OpenAI's item_reference. ([#10585](https://github.com/mastra-ai/mastra/pull/10585))
|
|
348
304
|
|
|
349
|
-
|
|
305
|
+
- Fix generateTitle model type to accept AI SDK LanguageModelV2 ([#10541](https://github.com/mastra-ai/mastra/pull/10541))
|
|
350
306
|
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
output: elevenlabs.speech('eleven_turbo_v2'), // AI SDK speech model
|
|
360
|
-
});
|
|
307
|
+
Updated the `generateTitle.model` config option to accept `MastraModelConfig` instead of `MastraLanguageModel`. This allows users to pass raw AI SDK `LanguageModelV2` models (e.g., `anthropic.languageModel('claude-3-5-haiku-20241022')`) directly without type errors.
|
|
308
|
+
|
|
309
|
+
Previously, passing a standard `LanguageModelV2` would fail because `MastraLanguageModelV2` has different `doGenerate`/`doStream` return types. Now `MastraModelConfig` is used consistently across:
|
|
310
|
+
- `memory/types.ts` - `generateTitle.model` config
|
|
311
|
+
- `agent.ts` - `genTitle`, `generateTitleFromUserMessage`, `resolveTitleGenerationConfig`
|
|
312
|
+
- `agent-legacy.ts` - `AgentLegacyCapabilities` interface
|
|
313
|
+
|
|
314
|
+
- Fix message ordering when using toAISdkV5Messages or prepareStep ([#10686](https://github.com/mastra-ai/mastra/pull/10686))
|
|
361
315
|
|
|
362
|
-
|
|
363
|
-
const audioStream = await voice.speak('Hello from AI SDK!');
|
|
316
|
+
Messages without `createdAt` timestamps were getting shuffled because they all received identical timestamps during conversion. Now messages are assigned monotonically increasing timestamps via `generateCreatedAt()`, preserving input order.
|
|
364
317
|
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
318
|
+
Before:
|
|
319
|
+
|
|
320
|
+
```
|
|
321
|
+
Input: [user: "hello", assistant: "Hi!", user: "bye"]
|
|
322
|
+
Output: [user: "bye", assistant: "Hi!", user: "hello"] // shuffled!
|
|
368
323
|
```
|
|
369
324
|
|
|
370
|
-
|
|
325
|
+
After:
|
|
371
326
|
|
|
372
|
-
|
|
327
|
+
```
|
|
328
|
+
Input: [user: "hello", assistant: "Hi!", user: "bye"]
|
|
329
|
+
Output: [user: "hello", assistant: "Hi!", user: "bye"] // correct order
|
|
330
|
+
```
|
|
373
331
|
|
|
374
|
-
|
|
332
|
+
- Fix Scorer not using custom gateways registered with Mastra ([#10778](https://github.com/mastra-ai/mastra/pull/10778))
|
|
375
333
|
|
|
376
|
-
|
|
334
|
+
Scorers now have access to custom gateways when resolving models. Previously, calling `resolveModelConfig` in the scorer didn't pass the Mastra instance, so custom gateways were never available.
|
|
335
|
+
|
|
336
|
+
- Fix workflow run status not being updated from storage snapshot in createRun ([#10664](https://github.com/mastra-ai/mastra/pull/10664))
|
|
337
|
+
|
|
338
|
+
When createRun is called with an existing runId, it now correctly updates the run's status from the storage snapshot. This fixes the issue where different workflow instances (e.g., different API requests) would get a run with 'pending' status instead of the correct status from storage (e.g., 'suspended').
|
|
339
|
+
|
|
340
|
+
- Pass resourceId and threadId to network agent's subAgent when it has its own memory ([#10592](https://github.com/mastra-ai/mastra/pull/10592))
|
|
341
|
+
|
|
342
|
+
- use `agent.getMemory` to fetch the memory instance on the Agent class to make sure that storage gets set if memory doesn't set it itself. ([#10556](https://github.com/mastra-ai/mastra/pull/10556))
|
|
343
|
+
|
|
344
|
+
- Built-in processors that use internal agents (PromptInjectionDetector, ModerationProcessor, PIIDetector, LanguageDetector, StructuredOutputProcessor) now accept `providerOptions` to control model behavior. ([#10651](https://github.com/mastra-ai/mastra/pull/10651))
|
|
345
|
+
|
|
346
|
+
This lets you pass provider-specific settings like `reasoningEffort` for OpenAI thinking models:
|
|
377
347
|
|
|
378
348
|
```typescript
|
|
379
|
-
const
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
return { id: '123', name: 'John' };
|
|
349
|
+
const processor = new PromptInjectionDetector({
|
|
350
|
+
model: 'openai/o1-mini',
|
|
351
|
+
threshold: 0.7,
|
|
352
|
+
strategy: 'block',
|
|
353
|
+
providerOptions: {
|
|
354
|
+
openai: {
|
|
355
|
+
reasoningEffort: 'low',
|
|
356
|
+
},
|
|
388
357
|
},
|
|
389
358
|
});
|
|
390
359
|
```
|
|
391
360
|
|
|
392
|
-
|
|
361
|
+
- Improved typing for `workflow.then` to allow the provided steps `inputSchema` to be a subset of the previous steps `outputSchema`. Also errors if the provided steps `inputSchema` is a superset of the previous steps outputSchema. ([#10763](https://github.com/mastra-ai/mastra/pull/10763))
|
|
393
362
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
363
|
+
- Fix type issue with workflow `.parallel()` when passing multiple steps, one or more of which has a `resumeSchema` provided. ([#10708](https://github.com/mastra-ai/mastra/pull/10708))
|
|
364
|
+
|
|
365
|
+
- Adds bidirectional integration with otel tracing via a new @mastra/otel-bridge package. ([#10482](https://github.com/mastra-ai/mastra/pull/10482))
|
|
366
|
+
|
|
367
|
+
- Adds `processInputStep` method to the Processor interface. Unlike `processInput` which runs once at the start, this runs at each step of the agentic loop (including tool call continuations). ([#10650](https://github.com/mastra-ai/mastra/pull/10650))
|
|
368
|
+
|
|
369
|
+
```ts
|
|
370
|
+
const processor: Processor = {
|
|
371
|
+
id: 'my-processor',
|
|
372
|
+
processInputStep: async ({ messages, messageList, stepNumber, systemMessages }) => {
|
|
373
|
+
// Transform messages at each step before LLM call
|
|
374
|
+
return messageList;
|
|
375
|
+
},
|
|
376
|
+
};
|
|
406
377
|
```
|
|
407
378
|
|
|
408
|
-
|
|
379
|
+
- When using output processors with `agent.generate()`, `result.text` was returning the unprocessed LLM response instead of the processed text. ([#10735](https://github.com/mastra-ai/mastra/pull/10735))
|
|
380
|
+
|
|
381
|
+
**Before:**
|
|
382
|
+
|
|
383
|
+
```ts
|
|
384
|
+
const result = await agent.generate('hello');
|
|
385
|
+
result.text; // "hello world" (unprocessed)
|
|
386
|
+
result.response.messages[0].content[0].text; // "HELLO WORLD" (processed)
|
|
387
|
+
```
|
|
388
|
+
|
|
389
|
+
**After:**
|
|
390
|
+
|
|
391
|
+
```ts
|
|
392
|
+
const result = await agent.generate('hello');
|
|
393
|
+
result.text; // "HELLO WORLD" (processed)
|
|
394
|
+
```
|
|
409
395
|
|
|
410
|
-
-
|
|
411
|
-
- Updated dependency [`hono@^4.10.5` ↗︎](https://www.npmjs.com/package/hono/v/4.10.5) (from `^4.9.7`, in `dependencies`)
|
|
396
|
+
The bug was caused by the `text` delayed promise being resolved twice - first correctly with the processed text, then overwritten with the unprocessed buffered text.
|
|
412
397
|
|
|
413
|
-
-
|
|
398
|
+
- Refactored default engine to fit durable execution better, and the inngest engine to match. ([#10627](https://github.com/mastra-ai/mastra/pull/10627))
|
|
399
|
+
Also fixes requestContext persistence by relying on inngest step memoization.
|
|
414
400
|
|
|
415
|
-
|
|
401
|
+
Unifies some of the stepResults and error formats in both engines.
|
|
416
402
|
|
|
417
|
-
-
|
|
403
|
+
- Allow direct access to server app handle directly from Mastra instance. ([#10598](https://github.com/mastra-ai/mastra/pull/10598))
|
|
418
404
|
|
|
419
|
-
|
|
405
|
+
```ts
|
|
406
|
+
// Before: HTTP request to localhost
|
|
407
|
+
const response = await fetch(`http://localhost:5000/api/tools`);
|
|
408
|
+
|
|
409
|
+
// After: Direct call via app.fetch()
|
|
410
|
+
const app = mastra.getServerApp<Hono>();
|
|
411
|
+
const response = await app.fetch(new Request('http://internal/api/tools'));
|
|
412
|
+
```
|
|
413
|
+
|
|
414
|
+
- Added `mastra.getServerApp<T>()` to access the underlying Hono/Express app
|
|
415
|
+
- Added `mastra.getMastraServer()` and `mastra.setMastraServer()` for adapter access
|
|
416
|
+
- Added `MastraServerBase` class in `@mastra/core/server` for adapter implementations
|
|
417
|
+
- Server adapters now auto-register with Mastra in their constructor
|
|
418
|
+
|
|
419
|
+
- Fix network agent not getting `text-delta` from subAgent when `.stream` is used ([#10533](https://github.com/mastra-ai/mastra/pull/10533))
|
|
420
|
+
|
|
421
|
+
- Fix discriminatedUnion schema information lost when json schema is converted to zod ([#10500](https://github.com/mastra-ai/mastra/pull/10500))
|
|
420
422
|
|
|
421
|
-
-
|
|
423
|
+
- Fix writer.custom not working during workflow resume operations ([#10720](https://github.com/mastra-ai/mastra/pull/10720))
|
|
422
424
|
|
|
423
|
-
|
|
425
|
+
When a workflow step is resumed, the writer parameter was not being properly passed through, causing writer.custom() calls to fail. This fix ensures the writableStream parameter is correctly passed to both run.resume() and run.start() calls in the workflow execution engine, allowing custom events to be emitted properly during resume operations.
|
|
424
426
|
|
|
425
|
-
-
|
|
427
|
+
- Fix corrupted provider-registry.json file in global cache and regenerate corrupted files ([#10606](https://github.com/mastra-ai/mastra/pull/10606))
|
|
426
428
|
|
|
427
|
-
-
|
|
428
|
-
- Added new abstraction over LanguageModelV2
|
|
429
|
+
- Fix TypeScript error when using Zod schemas in `defaultOptions.structuredOutput` ([#10710](https://github.com/mastra-ai/mastra/pull/10710))
|
|
429
430
|
|
|
430
|
-
|
|
431
|
+
Previously, defining `structuredOutput.schema` in `defaultOptions` would cause a TypeScript error because the type only accepted `undefined`. Now any valid `OutputSchema` is correctly accepted.
|
|
431
432
|
|
|
432
|
-
-
|
|
433
|
+
- Add support for `providerOptions` when defining tools. This allows developers to specify provider-specific configurations (like Anthropic's `cacheControl`) per tool. ([#10649](https://github.com/mastra-ai/mastra/pull/10649))
|
|
433
434
|
|
|
434
435
|
```typescript
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
class MyCustomGateway extends MastraModelGateway {
|
|
440
|
-
readonly id = 'my-custom-gateway';
|
|
441
|
-
readonly name = 'My Custom Gateway';
|
|
442
|
-
readonly prefix = 'custom';
|
|
443
|
-
|
|
444
|
-
async fetchProviders(): Promise<Record<string, ProviderConfig>> {
|
|
445
|
-
return {
|
|
446
|
-
'my-provider': {
|
|
447
|
-
name: 'My Provider',
|
|
448
|
-
models: ['model-1', 'model-2'],
|
|
449
|
-
apiKeyEnvVar: 'MY_API_KEY',
|
|
450
|
-
gateway: this.id,
|
|
451
|
-
},
|
|
452
|
-
};
|
|
453
|
-
}
|
|
454
|
-
|
|
455
|
-
buildUrl(modelId: string, envVars?: Record<string, string>): string {
|
|
456
|
-
return 'https://api.my-provider.com/v1';
|
|
457
|
-
}
|
|
458
|
-
|
|
459
|
-
async getApiKey(modelId: string): Promise<string> {
|
|
460
|
-
const apiKey = process.env.MY_API_KEY;
|
|
461
|
-
if (!apiKey) throw new Error('MY_API_KEY not set');
|
|
462
|
-
return apiKey;
|
|
463
|
-
}
|
|
464
|
-
|
|
465
|
-
async resolveLanguageModel({
|
|
466
|
-
modelId,
|
|
467
|
-
providerId,
|
|
468
|
-
apiKey,
|
|
469
|
-
}: {
|
|
470
|
-
modelId: string;
|
|
471
|
-
providerId: string;
|
|
472
|
-
apiKey: string;
|
|
473
|
-
}): Promise<LanguageModelV2> {
|
|
474
|
-
const baseURL = this.buildUrl(`${providerId}/${modelId}`);
|
|
475
|
-
return createOpenAICompatible({
|
|
476
|
-
name: providerId,
|
|
477
|
-
apiKey,
|
|
478
|
-
baseURL,
|
|
479
|
-
}).chatModel(modelId);
|
|
480
|
-
}
|
|
481
|
-
}
|
|
482
|
-
|
|
483
|
-
new Mastra({
|
|
484
|
-
gateways: {
|
|
485
|
-
myGateway: new MyCustomGateway(),
|
|
436
|
+
createTool({
|
|
437
|
+
id: 'my-tool',
|
|
438
|
+
providerOptions: {
|
|
439
|
+
anthropic: { cacheControl: { type: 'ephemeral' } },
|
|
486
440
|
},
|
|
441
|
+
// ...
|
|
487
442
|
});
|
|
488
443
|
```
|
|
489
444
|
|
|
490
|
-
-
|
|
445
|
+
- Fixed OpenAI reasoning message merging so distinct reasoning items are no longer dropped when they share a message ID. Prevents downstream errors where a function call is missing its required "reasoning" item. See #9005. ([#10614](https://github.com/mastra-ai/mastra/pull/10614))
|
|
446
|
+
|
|
447
|
+
- Updated dependencies [[`103586c`](https://github.com/mastra-ai/mastra/commit/103586cb23ebcd2466c7f68a71674d37cc10e263), [`61a5705`](https://github.com/mastra-ai/mastra/commit/61a570551278b6743e64243b3ce7d73de915ca8a), [`db70a48`](https://github.com/mastra-ai/mastra/commit/db70a48aeeeeb8e5f92007e8ede52c364ce15287), [`f03ae60`](https://github.com/mastra-ai/mastra/commit/f03ae60500fe350c9d828621006cdafe1975fdd8)]:
|
|
448
|
+
- @mastra/observability@1.0.0-beta.2
|
|
449
|
+
- @mastra/schema-compat@1.0.0-beta.2
|
|
450
|
+
|
|
451
|
+
## 1.0.0-beta.5
|
|
452
|
+
|
|
453
|
+
### Patch Changes
|
|
454
|
+
|
|
455
|
+
- Add Azure OpenAI gateway ([#9990](https://github.com/mastra-ai/mastra/pull/9990))
|
|
491
456
|
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
457
|
+
The Azure OpenAI gateway supports three configuration modes:
|
|
458
|
+
1. **Static deployments**: Provide deployment names from Azure Portal
|
|
459
|
+
2. **Dynamic discovery**: Query Azure Management API for available deployments
|
|
460
|
+
3. **Manual**: Specify deployment names when creating agents
|
|
461
|
+
|
|
462
|
+
## Usage
|
|
463
|
+
|
|
464
|
+
```typescript
|
|
465
|
+
import { Mastra } from '@mastra/core';
|
|
466
|
+
import { AzureOpenAIGateway } from '@mastra/core/llm';
|
|
495
467
|
|
|
496
|
-
|
|
468
|
+
// Static mode (recommended)
|
|
469
|
+
export const mastra = new Mastra({
|
|
470
|
+
gateways: [
|
|
471
|
+
new AzureOpenAIGateway({
|
|
472
|
+
resourceName: process.env.AZURE_RESOURCE_NAME!,
|
|
473
|
+
apiKey: process.env.AZURE_API_KEY!,
|
|
474
|
+
deployments: ['gpt-4-prod', 'gpt-35-turbo-dev'],
|
|
475
|
+
}),
|
|
476
|
+
],
|
|
477
|
+
});
|
|
497
478
|
|
|
498
|
-
|
|
479
|
+
// Dynamic discovery mode
|
|
480
|
+
export const mastra = new Mastra({
|
|
481
|
+
gateways: [
|
|
482
|
+
new AzureOpenAIGateway({
|
|
483
|
+
resourceName: process.env.AZURE_RESOURCE_NAME!,
|
|
484
|
+
apiKey: process.env.AZURE_API_KEY!,
|
|
485
|
+
management: {
|
|
486
|
+
tenantId: process.env.AZURE_TENANT_ID!,
|
|
487
|
+
clientId: process.env.AZURE_CLIENT_ID!,
|
|
488
|
+
clientSecret: process.env.AZURE_CLIENT_SECRET!,
|
|
489
|
+
subscriptionId: process.env.AZURE_SUBSCRIPTION_ID!,
|
|
490
|
+
resourceGroup: 'my-resource-group',
|
|
491
|
+
},
|
|
492
|
+
}),
|
|
493
|
+
],
|
|
494
|
+
});
|
|
499
495
|
|
|
500
|
-
|
|
496
|
+
// Use Azure OpenAI models
|
|
497
|
+
const agent = new Agent({
|
|
498
|
+
model: 'azure-openai/gpt-4-deployment',
|
|
499
|
+
instructions: 'You are a helpful assistant',
|
|
500
|
+
});
|
|
501
501
|
|
|
502
|
-
...
|
|
502
|
+
... 5916 more lines hidden. See full changelog in package directory.
|