@mastra/sentry 1.0.0-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/README.md +225 -0
- package/dist/index.cjs +479 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +457 -0
- package/dist/index.js.map +1 -0
- package/dist/tracing.d.ts +86 -0
- package/dist/tracing.d.ts.map +1 -0
- package/package.json +65 -0
package/CHANGELOG.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# @mastra/sentry
|
|
2
|
+
|
|
3
|
+
## 1.0.0-beta.2
|
|
4
|
+
|
|
5
|
+
### Major Changes
|
|
6
|
+
|
|
7
|
+
- Adding @mastra/sentry observability exporter for AI tracing with OpenTelemetry GenAI semantic conventions ([#11890](https://github.com/mastra-ai/mastra/pull/11890))
|
|
8
|
+
|
|
9
|
+
### Patch Changes
|
|
10
|
+
|
|
11
|
+
- Updated dependencies [[`ebae12a`](https://github.com/mastra-ai/mastra/commit/ebae12a2dd0212e75478981053b148a2c246962d), [`c61a0a5`](https://github.com/mastra-ai/mastra/commit/c61a0a5de4904c88fd8b3718bc26d1be1c2ec6e7), [`69136e7`](https://github.com/mastra-ai/mastra/commit/69136e748e32f57297728a4e0f9a75988462f1a7), [`449aed2`](https://github.com/mastra-ai/mastra/commit/449aed2ba9d507b75bf93d427646ea94f734dfd1), [`eb648a2`](https://github.com/mastra-ai/mastra/commit/eb648a2cc1728f7678768dd70cd77619b448dab9), [`0131105`](https://github.com/mastra-ai/mastra/commit/0131105532e83bdcbb73352fc7d0879eebf140dc), [`9d5059e`](https://github.com/mastra-ai/mastra/commit/9d5059eae810829935fb08e81a9bb7ecd5b144a7), [`ef756c6`](https://github.com/mastra-ai/mastra/commit/ef756c65f82d16531c43f49a27290a416611e526), [`b00ccd3`](https://github.com/mastra-ai/mastra/commit/b00ccd325ebd5d9e37e34dd0a105caae67eb568f), [`3bdfa75`](https://github.com/mastra-ai/mastra/commit/3bdfa7507a91db66f176ba8221aa28dd546e464a), [`e770de9`](https://github.com/mastra-ai/mastra/commit/e770de941a287a49b1964d44db5a5763d19890a6), [`52e2716`](https://github.com/mastra-ai/mastra/commit/52e2716b42df6eff443de72360ae83e86ec23993), [`27b4040`](https://github.com/mastra-ai/mastra/commit/27b4040bfa1a95d92546f420a02a626b1419a1d6), [`610a70b`](https://github.com/mastra-ai/mastra/commit/610a70bdad282079f0c630e0d7bb284578f20151), [`8dc7f55`](https://github.com/mastra-ai/mastra/commit/8dc7f55900395771da851dc7d78d53ae84fe34ec), [`8379099`](https://github.com/mastra-ai/mastra/commit/8379099fc467af6bef54dd7f80c9bd75bf8bbddf), [`b06be72`](https://github.com/mastra-ai/mastra/commit/b06be7223d5ef23edc98c01a67ef713c6cc039f9), [`8c0ec25`](https://github.com/mastra-ai/mastra/commit/8c0ec25646c8a7df253ed1e5ff4863a0d3f1316c), [`db28d12`](https://github.com/mastra-ai/mastra/commit/db28d12d37e170c99db3e32c06febf6225d13480), [`ff4d9a6`](https://github.com/mastra-ai/mastra/commit/ff4d9a6704fc87b31a380a76ed22736fdedbba5a), [`69821ef`](https://github.com/mastra-ai/mastra/commit/69821ef806482e2c44e2197ac0b050c3fe3a5285), [`1ed5716`](https://github.com/mastra-ai/mastra/commit/1ed5716830867b3774c4a1b43cc0d82935f32b96), [`4186bdd`](https://github.com/mastra-ai/mastra/commit/4186bdd00731305726fa06adba0b076a1d50b49f), [`7aaf973`](https://github.com/mastra-ai/mastra/commit/7aaf973f83fbbe9521f1f9e7a4fd99b8de464617)]:
|
|
12
|
+
- @mastra/core@1.0.0-beta.22
|
|
13
|
+
- @mastra/observability@1.0.0-beta.11
|
|
14
|
+
- @mastra/otel-exporter@1.0.0-beta.13
|
package/README.md
ADDED
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
# @mastra/sentry
|
|
2
|
+
|
|
3
|
+
Sentry AI Observability exporter for Mastra applications.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @mastra/sentry
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Usage
|
|
12
|
+
|
|
13
|
+
### Zero-Config Setup
|
|
14
|
+
|
|
15
|
+
The exporter automatically reads credentials from environment variables:
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
# Required
|
|
19
|
+
SENTRY_DSN=https://...@...sentry.io/...
|
|
20
|
+
|
|
21
|
+
# Optional
|
|
22
|
+
SENTRY_ENVIRONMENT=production
|
|
23
|
+
SENTRY_RELEASE=1.0.0
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
```typescript
|
|
27
|
+
import { SentryExporter } from '@mastra/sentry';
|
|
28
|
+
|
|
29
|
+
const mastra = new Mastra({
|
|
30
|
+
...,
|
|
31
|
+
observability: {
|
|
32
|
+
configs: {
|
|
33
|
+
sentry: {
|
|
34
|
+
serviceName: 'my-service',
|
|
35
|
+
exporters: [new SentryExporter()],
|
|
36
|
+
},
|
|
37
|
+
},
|
|
38
|
+
},
|
|
39
|
+
});
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### Explicit Configuration
|
|
43
|
+
|
|
44
|
+
You can also pass credentials directly:
|
|
45
|
+
|
|
46
|
+
```typescript
|
|
47
|
+
import { SentryExporter } from '@mastra/sentry';
|
|
48
|
+
|
|
49
|
+
const mastra = new Mastra({
|
|
50
|
+
...,
|
|
51
|
+
observability: {
|
|
52
|
+
configs: {
|
|
53
|
+
sentry: {
|
|
54
|
+
serviceName: 'my-service',
|
|
55
|
+
exporters: [
|
|
56
|
+
new SentryExporter({
|
|
57
|
+
dsn: 'https://...@...sentry.io/...',
|
|
58
|
+
environment: 'production', // Optional - deployment environment
|
|
59
|
+
tracesSampleRate: 1.0, // Optional - send 100% of transactions to Sentry
|
|
60
|
+
release: '1.0.0', // Optional - version of your code deployed
|
|
61
|
+
}),
|
|
62
|
+
],
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
},
|
|
66
|
+
});
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Configuration Options
|
|
70
|
+
|
|
71
|
+
| Option | Type | Description |
|
|
72
|
+
| ------------------ | -------- | --------------------------------------------------------------------------------------------------------------------------------------- |
|
|
73
|
+
| `dsn` | `string` | Data Source Name - tells the SDK where to send events. Defaults to `SENTRY_DSN` env var |
|
|
74
|
+
| `environment` | `string` | Deployment environment (enables filtering issues and alerts by environment). Defaults to `SENTRY_ENVIRONMENT` env var or `'production'` |
|
|
75
|
+
| `tracesSampleRate` | `number` | Percentage of transactions sent to Sentry (0.0 = 0%, 1.0 = 100%). Defaults to `1.0` |
|
|
76
|
+
| `release` | `string` | Version of your code deployed (helps identify regressions and track deployments). Defaults to `SENTRY_RELEASE` env var |
|
|
77
|
+
| `options` | `object` | Additional Sentry SDK options (integrations, beforeSend, etc.) |
|
|
78
|
+
|
|
79
|
+
## Features
|
|
80
|
+
|
|
81
|
+
### Tracing
|
|
82
|
+
|
|
83
|
+
- **Automatic span mapping**: Root spans create Sentry traces, child spans nest properly
|
|
84
|
+
- **OpenTelemetry semantic conventions**: Uses standard GenAI semantic conventions for AI spans
|
|
85
|
+
- **Model generation support**: `MODEL_GENERATION` spans include token usage, model parameters, and streaming info
|
|
86
|
+
- **Tool call tracking**: `TOOL_CALL` and `MCP_TOOL_CALL` spans track tool executions
|
|
87
|
+
- **Workflow support**: `WORKFLOW_RUN` and `WORKFLOW_STEP` spans track workflow execution
|
|
88
|
+
- **Error tracking**: Automatic error status and exception capture
|
|
89
|
+
- **Hierarchical traces**: Maintains parent-child relationships
|
|
90
|
+
|
|
91
|
+
### Span Types Mapping
|
|
92
|
+
|
|
93
|
+
| Mastra SpanType | Sentry Operation | Span Name Pattern | Notes |
|
|
94
|
+
| --------------------------- | ---------------------- | ----------------------- | ------------------------------------------------------- |
|
|
95
|
+
| `AGENT_RUN` | `gen_ai.invoke_agent` | `invoke_agent {agent}` | Accumulates tokens from the child MODEL_GENERATION span |
|
|
96
|
+
| `MODEL_GENERATION` | `gen_ai.chat` | `chat {model} [stream]` | Contains aggregated streaming data |
|
|
97
|
+
| `MODEL_STEP` | _(skipped)_ | - | Skipped to simplify trace hierarchy |
|
|
98
|
+
| `MODEL_CHUNK` | _(skipped)_ | - | Too granular; data aggregated in MODEL_GENERATION |
|
|
99
|
+
| `TOOL_CALL` | `gen_ai.execute_tool` | `execute_tool {tool}` | |
|
|
100
|
+
| `MCP_TOOL_CALL` | `gen_ai.execute_tool` | `execute_tool {tool}` | |
|
|
101
|
+
| `WORKFLOW_RUN` | `workflow.run` | `workflow` | |
|
|
102
|
+
| `WORKFLOW_STEP` | `workflow.step` | `step` | |
|
|
103
|
+
| `WORKFLOW_CONDITIONAL` | `workflow.conditional` | `step` | |
|
|
104
|
+
| `WORKFLOW_CONDITIONAL_EVAL` | `workflow.conditional` | `step` | |
|
|
105
|
+
| `WORKFLOW_PARALLEL` | `workflow.parallel` | `step` | |
|
|
106
|
+
| `WORKFLOW_LOOP` | `workflow.loop` | `step` | |
|
|
107
|
+
| `WORKFLOW_SLEEP` | `workflow.sleep` | `step` | |
|
|
108
|
+
| `WORKFLOW_WAIT_EVENT` | `workflow.wait` | `step` | |
|
|
109
|
+
| `PROCESSOR_RUN` | `ai.processor` | `step` | |
|
|
110
|
+
| `GENERIC` | `ai.span` | `span` | |
|
|
111
|
+
|
|
112
|
+
### Semantic Attributes
|
|
113
|
+
|
|
114
|
+
**Common attributes (all spans):**
|
|
115
|
+
|
|
116
|
+
- `sentry.origin`: `auto.ai.mastra` (identifies spans from Mastra)
|
|
117
|
+
- `ai.span.type`: Mastra span type (e.g., `model_generation`, `tool_call`)
|
|
118
|
+
|
|
119
|
+
**For `MODEL_GENERATION` and `MODEL_STEP` spans:**
|
|
120
|
+
|
|
121
|
+
- `gen_ai.operation.name`: `chat`
|
|
122
|
+
- `gen_ai.system`: Model provider (e.g., `openai`, `anthropic`)
|
|
123
|
+
- `gen_ai.request.model`: Model identifier (e.g., `gpt-4`)
|
|
124
|
+
- `gen_ai.request.messages`: Input messages/prompts (JSON)
|
|
125
|
+
- `gen_ai.response.text`: Output text response
|
|
126
|
+
- `gen_ai.usage.input_tokens`: Input token count
|
|
127
|
+
- `gen_ai.usage.output_tokens`: Output token count
|
|
128
|
+
- `gen_ai.usage.cache_read_input_tokens`: Cached input tokens
|
|
129
|
+
- `gen_ai.usage.cache_write_input_tokens`: Cache write tokens
|
|
130
|
+
- `gen_ai.usage.reasoning_tokens`: Reasoning tokens (for models like o1)
|
|
131
|
+
- `gen_ai.request.temperature`: Temperature parameter
|
|
132
|
+
- `gen_ai.request.max_tokens`: Max tokens parameter
|
|
133
|
+
- `gen_ai.request.top_p`, `top_k`, `frequency_penalty`, `presence_penalty`: Other parameters
|
|
134
|
+
- `gen_ai.request.stream`: Whether streaming was requested
|
|
135
|
+
- `gen_ai.response.streaming`: Whether response was streamed
|
|
136
|
+
- `gen_ai.response.tool_calls`: Tool calls made during generation (JSON array)
|
|
137
|
+
- `gen_ai.completion_start_time`: Time first token arrived (for TTFT calculation)
|
|
138
|
+
|
|
139
|
+
**For `TOOL_CALL` spans:**
|
|
140
|
+
|
|
141
|
+
- `gen_ai.operation.name`: `ai.toolCall`
|
|
142
|
+
- `gen_ai.tool.name`: Tool identifier
|
|
143
|
+
- `gen_ai.tool.type`: `function`
|
|
144
|
+
- `gen_ai.tool.call.id`: Tool call ID
|
|
145
|
+
- `gen_ai.tool.input`: Tool input (JSON)
|
|
146
|
+
- `gen_ai.tool.output`: Tool output (JSON)
|
|
147
|
+
- `gen_ai.tool.description`: Tool description
|
|
148
|
+
- `tool.success`: Whether the tool call succeeded
|
|
149
|
+
|
|
150
|
+
**For `AGENT_RUN` spans:**
|
|
151
|
+
|
|
152
|
+
- `gen_ai.operation.name`: `invoke_agent`
|
|
153
|
+
- `gen_ai.agent.name`: Agent identifier
|
|
154
|
+
- `gen_ai.pipeline.name`: Agent name (for Sentry AI view)
|
|
155
|
+
- `gen_ai.agent.instructions`: Agent instructions
|
|
156
|
+
- `gen_ai.agent.prompt`: Agent prompt
|
|
157
|
+
- `gen_ai.request.messages`: Input message (normalized)
|
|
158
|
+
- `gen_ai.request.available_tools`: Available tools (JSON array)
|
|
159
|
+
- `gen_ai.response.model`: Model from the child MODEL_GENERATION span
|
|
160
|
+
- `gen_ai.response.text`: Output text from the child MODEL_GENERATION span
|
|
161
|
+
- `gen_ai.usage.input_tokens`: Input tokens from the child MODEL_GENERATION span
|
|
162
|
+
- `gen_ai.usage.output_tokens`: Output tokens from the child MODEL_GENERATION span
|
|
163
|
+
- `gen_ai.usage.total_tokens`: Total tokens from the child MODEL_GENERATION span
|
|
164
|
+
- `gen_ai.usage.cache_read_input_tokens`: Cached input tokens from the child MODEL_GENERATION span
|
|
165
|
+
- `gen_ai.usage.cache_write_input_tokens`: Cache write tokens from the child MODEL_GENERATION span
|
|
166
|
+
- `gen_ai.usage.reasoning_tokens`: Reasoning tokens from the child MODEL_GENERATION span
|
|
167
|
+
- `agent.max_steps`: Maximum steps allowed
|
|
168
|
+
- `agent.available_tools`: Available tools (comma-separated)
|
|
169
|
+
|
|
170
|
+
## Example
|
|
171
|
+
|
|
172
|
+
```typescript
|
|
173
|
+
import { Mastra } from '@mastra/core';
|
|
174
|
+
import { SentryExporter } from '@mastra/sentry';
|
|
175
|
+
import { Agent } from '@mastra/core';
|
|
176
|
+
import { openai } from '@ai-sdk/openai';
|
|
177
|
+
|
|
178
|
+
const mastra = new Mastra({
|
|
179
|
+
observability: {
|
|
180
|
+
configs: {
|
|
181
|
+
sentry: {
|
|
182
|
+
serviceName: 'my-ai-app',
|
|
183
|
+
exporters: [
|
|
184
|
+
new SentryExporter({
|
|
185
|
+
dsn: process.env.SENTRY_DSN,
|
|
186
|
+
environment: process.env.NODE_ENV,
|
|
187
|
+
tracesSampleRate: 0.1, // Send 10% of transactions to Sentry (recommended for high-load backends)
|
|
188
|
+
}),
|
|
189
|
+
],
|
|
190
|
+
},
|
|
191
|
+
},
|
|
192
|
+
},
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
const agent = new Agent({
|
|
196
|
+
name: 'customer-support',
|
|
197
|
+
instructions: 'Help customers with their questions',
|
|
198
|
+
model: openai('gpt-4'),
|
|
199
|
+
mastra,
|
|
200
|
+
});
|
|
201
|
+
|
|
202
|
+
// All agent executions will be traced in Sentry
|
|
203
|
+
const result = await agent.generate('How do I reset my password?');
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
## Troubleshooting
|
|
207
|
+
|
|
208
|
+
### Spans not appearing in Sentry
|
|
209
|
+
|
|
210
|
+
1. Verify your DSN is correct
|
|
211
|
+
2. Check the `tracesSampleRate` - set to `1.0` for testing
|
|
212
|
+
3. Ensure you're using Sentry SDK v10.32.1 or higher
|
|
213
|
+
4. Check console for any Sentry initialization errors
|
|
214
|
+
|
|
215
|
+
### High volume / cost
|
|
216
|
+
|
|
217
|
+
Adjust the `tracesSampleRate` to send fewer transactions to Sentry:
|
|
218
|
+
|
|
219
|
+
```typescript
|
|
220
|
+
new SentryExporter({
|
|
221
|
+
tracesSampleRate: 0.1, // Send only 10% of transactions (recommended for high-load applications)
|
|
222
|
+
});
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
**Note:** To disable tracing entirely, don't set `tracesSampleRate` at all rather than setting it to `0`.
|