@mastra/core 0.1.27-alpha.78 → 0.1.27-alpha.80
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -37
- package/package.json +20 -2
package/README.md
CHANGED
|
@@ -20,21 +20,9 @@ npm install @mastra/core
|
|
|
20
20
|
- Telemetry and logging infrastructure
|
|
21
21
|
- Text-to-speech capabilities
|
|
22
22
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
### Actions (`/action`)
|
|
26
|
-
|
|
27
|
-
Actions are the fundamental building blocks of Mastra workflows. They represent discrete, executable tasks with well-defined inputs and outputs. Each action can validate its inputs using Zod schemas and can access Mastra's core services through the execution context.
|
|
23
|
+
For comprehensive documentation, visit our [official documentation](https://mastra.ai/docs).
|
|
28
24
|
|
|
29
|
-
|
|
30
|
-
interface IAction<TId, TSchemaIn, TSchemaOut, TContext> {
|
|
31
|
-
id: TId;
|
|
32
|
-
description?: string;
|
|
33
|
-
inputSchema?: TSchemaIn;
|
|
34
|
-
outputSchema?: TSchemaOut;
|
|
35
|
-
execute: (context: TContext) => Promise<TSchemaOut>;
|
|
36
|
-
}
|
|
37
|
-
```
|
|
25
|
+
## Core Components
|
|
38
26
|
|
|
39
27
|
### Agents (`/agent`)
|
|
40
28
|
|
|
@@ -54,6 +42,8 @@ const agent = new Agent({
|
|
|
54
42
|
});
|
|
55
43
|
```
|
|
56
44
|
|
|
45
|
+
[More agent documentation →](https://mastra.ai/docs/reference/agents/overview)
|
|
46
|
+
|
|
57
47
|
### Embeddings (`/embeddings`)
|
|
58
48
|
|
|
59
49
|
The embeddings module provides a unified interface for converting text into vector representations across multiple AI providers. These vectors are essential for semantic search, similarity comparisons, and other NLP tasks.
|
|
@@ -67,14 +57,9 @@ const embeddings = await embed('text to embed', {
|
|
|
67
57
|
});
|
|
68
58
|
```
|
|
69
59
|
|
|
70
|
-
Supported providers
|
|
60
|
+
Supported providers right now are OpenAI, Cohere, Amazon Bedrock, Google AI, Mistral, and Voyage.
|
|
71
61
|
|
|
72
|
-
|
|
73
|
-
- Cohere: Multilingual support
|
|
74
|
-
- Amazon Bedrock: Enterprise-grade embeddings
|
|
75
|
-
- Google AI: PaLM-based embeddings
|
|
76
|
-
- Mistral: Open-source alternative
|
|
77
|
-
- Voyage: Specialized embeddings
|
|
62
|
+
[More embeddings documentation →](https://mastra.ai/docs/reference/embeddings/overview)
|
|
78
63
|
|
|
79
64
|
### Evaluations (`/eval`)
|
|
80
65
|
|
|
@@ -91,22 +76,7 @@ class CustomMetric extends Metric {
|
|
|
91
76
|
}
|
|
92
77
|
```
|
|
93
78
|
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
Hooks provide an event system for monitoring and extending Mastra's functionality. They allow you to react to key events in the AI pipeline, enabling logging, monitoring, and custom behavior injection.
|
|
97
|
-
|
|
98
|
-
```typescript
|
|
99
|
-
import { AvailableHooks, registerHook } from '@mastra/core';
|
|
100
|
-
|
|
101
|
-
registerHook(AvailableHooks.ON_GENERATION, ({ input, output, metric, runId, agentName }) => {
|
|
102
|
-
// Handle generation event
|
|
103
|
-
});
|
|
104
|
-
```
|
|
105
|
-
|
|
106
|
-
Available hooks:
|
|
107
|
-
|
|
108
|
-
- `ON_EVALUATION`: Triggered after evaluation, useful for logging and analysis
|
|
109
|
-
- `ON_GENERATION`: Triggered after text generation, perfect for monitoring outputs
|
|
79
|
+
[More evaluations documentation →](https://mastra.ai/docs/reference/eval/overview)
|
|
110
80
|
|
|
111
81
|
### Memory (`/memory`)
|
|
112
82
|
|
|
@@ -120,6 +90,8 @@ const memory = new MastraMemory({
|
|
|
120
90
|
});
|
|
121
91
|
```
|
|
122
92
|
|
|
93
|
+
[More memory documentation →](https://mastra.ai/docs/reference/memory/overview)
|
|
94
|
+
|
|
123
95
|
### Vector Stores (`/vector`)
|
|
124
96
|
|
|
125
97
|
Vector stores provide the infrastructure for storing and querying vector embeddings. They support semantic search, similarity matching, and efficient vector operations across different backend implementations.
|
|
@@ -132,6 +104,8 @@ class CustomVectorStore extends MastraVector {
|
|
|
132
104
|
}
|
|
133
105
|
```
|
|
134
106
|
|
|
107
|
+
[More vector stores documentation →](https://mastra.ai/docs/reference/vector/overview)
|
|
108
|
+
|
|
135
109
|
### Workflows (`/workflows`)
|
|
136
110
|
|
|
137
111
|
Workflows orchestrate complex AI tasks by combining multiple actions into a coherent sequence. They handle state management, error recovery, and can include conditional logic and parallel execution.
|
|
@@ -147,6 +121,8 @@ const workflow = new Workflow({
|
|
|
147
121
|
});
|
|
148
122
|
```
|
|
149
123
|
|
|
124
|
+
[More workflows documentation →](https://mastra.ai/docs/reference/workflows/overview)
|
|
125
|
+
|
|
150
126
|
### Tools (`/tools`)
|
|
151
127
|
|
|
152
128
|
Tools are functions that agents can use to interact with external systems or perform specific tasks. Each tool has a clear description and schema, making it easy for AI to understand and use them effectively.
|
|
@@ -163,6 +139,8 @@ const tool = new ToolAction({
|
|
|
163
139
|
});
|
|
164
140
|
```
|
|
165
141
|
|
|
142
|
+
[More tools documentation →](https://mastra.ai/docs/reference/tools/overview)
|
|
143
|
+
|
|
166
144
|
### Logger (`/logger`)
|
|
167
145
|
|
|
168
146
|
The logging system provides structured, leveled logging with multiple transport options. It supports debug information, performance monitoring, and error tracking across your AI applications.
|
|
@@ -176,6 +154,8 @@ const logger = createLogger({
|
|
|
176
154
|
});
|
|
177
155
|
```
|
|
178
156
|
|
|
157
|
+
[More logging documentation →](https://mastra.ai/docs/reference/observability/logging)
|
|
158
|
+
|
|
179
159
|
### Telemetry (`/telemetry`)
|
|
180
160
|
|
|
181
161
|
Telemetry provides OpenTelemetry integration for comprehensive monitoring of your AI systems. Track latency, success rates, and system health with distributed tracing and metrics collection.
|
|
@@ -187,3 +167,12 @@ const telemetry = Telemetry.init({
|
|
|
187
167
|
serviceName: 'my-service',
|
|
188
168
|
});
|
|
189
169
|
```
|
|
170
|
+
|
|
171
|
+
[More Telemetry documentation →](https://mastra.ai/docs/reference/observability/telemetry)
|
|
172
|
+
|
|
173
|
+
## Additional Resources
|
|
174
|
+
|
|
175
|
+
- [Getting Started Guide](https://mastra.ai/docs/getting-started/installation)
|
|
176
|
+
- [API Reference](https://mastra.ai/docs/reference)
|
|
177
|
+
- [Examples](https://mastra.ai/docs/examples)
|
|
178
|
+
- [Deployment Guide](https://mastra.ai/docs/deployment/overview)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mastra/core",
|
|
3
|
-
"version": "0.1.27-alpha.
|
|
3
|
+
"version": "0.1.27-alpha.80",
|
|
4
4
|
"license": "MIT",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/core.esm.js",
|
|
@@ -95,9 +95,27 @@
|
|
|
95
95
|
"size-limit": "^11.1.4",
|
|
96
96
|
"ts-node": "^10.9.2",
|
|
97
97
|
"tslib": "^2.6.3",
|
|
98
|
-
"typescript": "5.
|
|
98
|
+
"typescript": "^5.7.3",
|
|
99
99
|
"vitest": "^3.0.4"
|
|
100
100
|
},
|
|
101
|
+
"keywords": [
|
|
102
|
+
"ai",
|
|
103
|
+
"llm",
|
|
104
|
+
"llms",
|
|
105
|
+
"agent",
|
|
106
|
+
"agents",
|
|
107
|
+
"vectorstore",
|
|
108
|
+
"embeddings",
|
|
109
|
+
"rag",
|
|
110
|
+
"evals",
|
|
111
|
+
"memory",
|
|
112
|
+
"tools",
|
|
113
|
+
"telemetry",
|
|
114
|
+
"typescript",
|
|
115
|
+
"opentelemetry",
|
|
116
|
+
"otel",
|
|
117
|
+
"framework"
|
|
118
|
+
],
|
|
101
119
|
"scripts": {
|
|
102
120
|
"check": "tsc --noEmit",
|
|
103
121
|
"analyze": "size-limit --why",
|