@tokamohsen/sentry-mcp 0.29.7 → 0.29.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. package/README.md +189 -0
  2. package/dist/index.cjs +33696 -34
  3. package/dist/index.cjs.map +1 -1
  4. package/dist/index.js +33706 -45
  5. package/dist/index.js.map +1 -1
  6. package/dist/{token-GX19_oyM.cjs → token-B5HKFZKz.cjs} +2 -2
  7. package/dist/{token-GX19_oyM.cjs.map → token-B5HKFZKz.cjs.map} +1 -1
  8. package/dist/{token-CO5Bq1Ct.js → token-JvPeoopD.js} +2 -2
  9. package/dist/{token-CO5Bq1Ct.js.map → token-JvPeoopD.js.map} +1 -1
  10. package/dist/{token-util-Cw83HNFN.cjs → token-util-BNYjqouU.cjs} +1 -1
  11. package/dist/{token-util-Cw83HNFN.cjs.map → token-util-BNYjqouU.cjs.map} +1 -1
  12. package/dist/{token-util-1O_mwf8r.js → token-util-CqB8j8br.js} +2 -2
  13. package/dist/{token-util-1O_mwf8r.js.map → token-util-CqB8j8br.js.map} +1 -1
  14. package/dist/transports/{stdio-DeWUp4RP.d.cts → stdio-BoqvAN3P.d.cts} +1 -1
  15. package/dist/transports/{stdio-DeWUp4RP.d.cts.map → stdio-BoqvAN3P.d.cts.map} +1 -1
  16. package/dist/transports/{stdio-DKQR8J7l.d.ts → stdio-BvNog4wx.d.ts} +1 -1
  17. package/dist/transports/{stdio-DKQR8J7l.d.ts.map → stdio-BvNog4wx.d.ts.map} +1 -1
  18. package/dist/transports/stdio.cjs +1 -1
  19. package/dist/transports/stdio.js +1 -1
  20. package/dist/{version-BD4r13ze.cjs → version-BN5g2FHt.cjs} +1 -1
  21. package/dist/{version-BD4r13ze.cjs.map → version-BN5g2FHt.cjs.map} +1 -1
  22. package/dist/{version-4iOZzjiD.js → version-CV6JJx_P.js} +1 -1
  23. package/dist/{version-4iOZzjiD.js.map → version-CV6JJx_P.js.map} +1 -1
  24. package/package.json +19 -18
  25. package/LICENSE.md +0 -105
  26. package/dist/cli/parse-CX7Bcldr.d.ts +0 -11
  27. package/dist/cli/parse-CX7Bcldr.d.ts.map +0 -1
  28. package/dist/cli/parse-EvLqDjN2.d.cts +0 -11
  29. package/dist/cli/parse-EvLqDjN2.d.cts.map +0 -1
  30. package/dist/cli/parse.cjs +0 -107
  31. package/dist/cli/parse.cjs.map +0 -1
  32. package/dist/cli/parse.js +0 -103
  33. package/dist/cli/parse.js.map +0 -1
  34. package/dist/cli/resolve-BVtyZcQM.d.ts +0 -10
  35. package/dist/cli/resolve-BVtyZcQM.d.ts.map +0 -1
  36. package/dist/cli/resolve-C3hwH129.d.cts +0 -10
  37. package/dist/cli/resolve-C3hwH129.d.cts.map +0 -1
  38. package/dist/cli/resolve.cjs +0 -56
  39. package/dist/cli/resolve.cjs.map +0 -1
  40. package/dist/cli/resolve.js +0 -54
  41. package/dist/cli/resolve.js.map +0 -1
  42. package/dist/cli/types-CnTkIHzd.d.ts +0 -73
  43. package/dist/cli/types-CnTkIHzd.d.ts.map +0 -1
  44. package/dist/cli/types-VWgo1wm6.d.cts +0 -73
  45. package/dist/cli/types-VWgo1wm6.d.cts.map +0 -1
  46. package/dist/cli/types.cjs +0 -0
  47. package/dist/cli/types.js +0 -1
  48. package/dist/cli/usage-B0gAPy4S.d.cts +0 -9
  49. package/dist/cli/usage-B0gAPy4S.d.cts.map +0 -1
  50. package/dist/cli/usage-BhCVaC5j.d.ts +0 -9
  51. package/dist/cli/usage-BhCVaC5j.d.ts.map +0 -1
  52. package/dist/cli/usage.cjs +0 -47
  53. package/dist/cli/usage.cjs.map +0 -1
  54. package/dist/cli/usage.js +0 -45
  55. package/dist/cli/usage.js.map +0 -1
  56. package/dist/config-CzqCJmB9.js +0 -613
  57. package/dist/config-CzqCJmB9.js.map +0 -1
  58. package/dist/config-DMt6phB6.cjs +0 -630
  59. package/dist/config-DMt6phB6.cjs.map +0 -1
  60. package/dist/constants-BrEVt86y.js +0 -194
  61. package/dist/constants-BrEVt86y.js.map +0 -1
  62. package/dist/constants-C14tQf_s.cjs +0 -217
  63. package/dist/constants-C14tQf_s.cjs.map +0 -1
  64. package/dist/server-DduxvXpe.js +0 -32499
  65. package/dist/server-DduxvXpe.js.map +0 -1
  66. package/dist/server-lur5iSHk.cjs +0 -32530
  67. package/dist/server-lur5iSHk.cjs.map +0 -1
  68. package/dist/skills-DOgs9MAy.cjs +0 -96
  69. package/dist/skills-DOgs9MAy.cjs.map +0 -1
  70. package/dist/skills-DfqlqYXj.js +0 -72
  71. package/dist/skills-DfqlqYXj.js.map +0 -1
  72. package/dist/url-utils-BHhxlntO.js +0 -119
  73. package/dist/url-utils-BHhxlntO.js.map +0 -1
  74. package/dist/url-utils-N2ExJl9F.cjs +0 -161
  75. package/dist/url-utils-N2ExJl9F.cjs.map +0 -1
  76. /package/dist/{index-16fTC-hT.d.cts → index-Cxeq9ZuA.d.ts} +0 -0
  77. /package/dist/{index-UKPmYT-S.d.ts → index-XTv1ki6h.d.cts} +0 -0
@@ -1 +0,0 @@
1
- {"version":3,"file":"usage.cjs","names":[],"sources":["../../src/cli/usage.ts"],"sourcesContent":["import type { Skill } from \"@sentry/mcp-core/skills\";\n\nexport function buildUsage(\n packageName: string,\n allSkills: ReadonlyArray<Skill>,\n): string {\n return `Usage: ${packageName} --access-token=<token> [--host=<host>]\n\nRequired:\n --access-token <token> Sentry User Auth Token with API access\n\nCommon optional flags:\n --host <host> Change Sentry host (self-hosted)\n --sentry-dsn <dsn> Override DSN used for telemetry reporting\n --agent Agent mode: only expose use_sentry tool (for AI agents)\n --experimental Enable experimental tools (hidden by default)\n\nEmbedded agent configuration:\n --agent-provider <provider> LLM provider: openai or anthropic (auto-detects from API keys)\n --openai-base-url <url> Override OpenAI API base URL\n --openai-model <model> Override OpenAI model (default: gpt-5)\n --anthropic-base-url <url> Override Anthropic API base URL\n --anthropic-model <model> Override Anthropic model (default: claude-sonnet-4-5)\n\nSession constraints:\n --organization-slug <slug> Force all calls to an organization\n --project-slug <slug> Optional project constraint\n\nSkill controls:\n --skills <list> Specify which skills to grant (default: all skills)\n\nAll skills: ${allSkills.join(\", \")}\n\nEnvironment variables:\n SENTRY_ACCESS_TOKEN Sentry auth token (alternative to --access-token)\n OPENAI_API_KEY OpenAI API key for AI-powered search tools\n ANTHROPIC_API_KEY Anthropic API key for AI-powered search tools\n EMBEDDED_AGENT_PROVIDER Provider override: openai or anthropic\n\nExamples:\n ${packageName} --access-token=TOKEN\n ${packageName} --access-token=TOKEN --skills=inspect,triage\n ${packageName} --access-token=TOKEN --host=sentry.example.com\n ${packageName} --access-token=TOKEN --agent-provider=anthropic`;\n}\n"],"mappings":";;;AAEA,SAAgB,WACd,aACA,WACQ;AACR,QAAO,UAAU,YAAY;;;;;;;;;;;;;;;;;;;;;;;;;cAyBjB,UAAU,KAAK,KAAK,CAAC;;;;;;;;;IAS/B,YAAY;IACZ,YAAY;IACZ,YAAY;IACZ,YAAY"}
package/dist/cli/usage.js DELETED
@@ -1,45 +0,0 @@
1
- //#region src/cli/usage.ts
2
- function buildUsage(packageName, allSkills) {
3
- return `Usage: ${packageName} --access-token=<token> [--host=<host>]
4
-
5
- Required:
6
- --access-token <token> Sentry User Auth Token with API access
7
-
8
- Common optional flags:
9
- --host <host> Change Sentry host (self-hosted)
10
- --sentry-dsn <dsn> Override DSN used for telemetry reporting
11
- --agent Agent mode: only expose use_sentry tool (for AI agents)
12
- --experimental Enable experimental tools (hidden by default)
13
-
14
- Embedded agent configuration:
15
- --agent-provider <provider> LLM provider: openai or anthropic (auto-detects from API keys)
16
- --openai-base-url <url> Override OpenAI API base URL
17
- --openai-model <model> Override OpenAI model (default: gpt-5)
18
- --anthropic-base-url <url> Override Anthropic API base URL
19
- --anthropic-model <model> Override Anthropic model (default: claude-sonnet-4-5)
20
-
21
- Session constraints:
22
- --organization-slug <slug> Force all calls to an organization
23
- --project-slug <slug> Optional project constraint
24
-
25
- Skill controls:
26
- --skills <list> Specify which skills to grant (default: all skills)
27
-
28
- All skills: ${allSkills.join(", ")}
29
-
30
- Environment variables:
31
- SENTRY_ACCESS_TOKEN Sentry auth token (alternative to --access-token)
32
- OPENAI_API_KEY OpenAI API key for AI-powered search tools
33
- ANTHROPIC_API_KEY Anthropic API key for AI-powered search tools
34
- EMBEDDED_AGENT_PROVIDER Provider override: openai or anthropic
35
-
36
- Examples:
37
- ${packageName} --access-token=TOKEN
38
- ${packageName} --access-token=TOKEN --skills=inspect,triage
39
- ${packageName} --access-token=TOKEN --host=sentry.example.com
40
- ${packageName} --access-token=TOKEN --agent-provider=anthropic`;
41
- }
42
-
43
- //#endregion
44
- export { buildUsage };
45
- //# sourceMappingURL=usage.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"usage.js","names":[],"sources":["../../src/cli/usage.ts"],"sourcesContent":["import type { Skill } from \"@sentry/mcp-core/skills\";\n\nexport function buildUsage(\n packageName: string,\n allSkills: ReadonlyArray<Skill>,\n): string {\n return `Usage: ${packageName} --access-token=<token> [--host=<host>]\n\nRequired:\n --access-token <token> Sentry User Auth Token with API access\n\nCommon optional flags:\n --host <host> Change Sentry host (self-hosted)\n --sentry-dsn <dsn> Override DSN used for telemetry reporting\n --agent Agent mode: only expose use_sentry tool (for AI agents)\n --experimental Enable experimental tools (hidden by default)\n\nEmbedded agent configuration:\n --agent-provider <provider> LLM provider: openai or anthropic (auto-detects from API keys)\n --openai-base-url <url> Override OpenAI API base URL\n --openai-model <model> Override OpenAI model (default: gpt-5)\n --anthropic-base-url <url> Override Anthropic API base URL\n --anthropic-model <model> Override Anthropic model (default: claude-sonnet-4-5)\n\nSession constraints:\n --organization-slug <slug> Force all calls to an organization\n --project-slug <slug> Optional project constraint\n\nSkill controls:\n --skills <list> Specify which skills to grant (default: all skills)\n\nAll skills: ${allSkills.join(\", \")}\n\nEnvironment variables:\n SENTRY_ACCESS_TOKEN Sentry auth token (alternative to --access-token)\n OPENAI_API_KEY OpenAI API key for AI-powered search tools\n ANTHROPIC_API_KEY Anthropic API key for AI-powered search tools\n EMBEDDED_AGENT_PROVIDER Provider override: openai or anthropic\n\nExamples:\n ${packageName} --access-token=TOKEN\n ${packageName} --access-token=TOKEN --skills=inspect,triage\n ${packageName} --access-token=TOKEN --host=sentry.example.com\n ${packageName} --access-token=TOKEN --agent-provider=anthropic`;\n}\n"],"mappings":";AAEA,SAAgB,WACd,aACA,WACQ;AACR,QAAO,UAAU,YAAY;;;;;;;;;;;;;;;;;;;;;;;;;cAyBjB,UAAU,KAAK,KAAK,CAAC;;;;;;;;;IAS/B,YAAY;IACZ,YAAY;IACZ,YAAY;IACZ,YAAY"}
@@ -1,613 +0,0 @@
1
- import { r as __exportAll } from "./index.js";
2
-
3
- //#region ../mcp-core/dist/tools/search-events/config.js
4
- var config_exports = /* @__PURE__ */ __exportAll({
5
- BASE_COMMON_FIELDS: () => BASE_COMMON_FIELDS,
6
- DATASET_EXAMPLES: () => DATASET_EXAMPLES,
7
- DATASET_FIELDS: () => DATASET_FIELDS,
8
- NUMERIC_FIELDS: () => NUMERIC_FIELDS,
9
- RECOMMENDED_FIELDS: () => RECOMMENDED_FIELDS,
10
- systemPrompt: () => systemPrompt
11
- });
12
- const systemPrompt = `You are a Sentry query translator. You need to:
13
- 1. FIRST determine which dataset (spans, errors, or logs) is most appropriate for the query
14
- 2. Query the available attributes for that dataset using the datasetAttributes tool
15
- 3. Use the otelSemantics tool if you need OpenTelemetry semantic conventions
16
- 4. Convert the natural language query to Sentry's search syntax (NOT SQL syntax)
17
- 5. Decide which fields to return in the results
18
-
19
- CRITICAL: Sentry does NOT use SQL syntax. Do NOT generate SQL-like queries.
20
-
21
- DATASET SELECTION GUIDELINES:
22
- - spans: Performance data, traces, AI/LLM calls, database queries, HTTP requests, token usage, costs, duration metrics, user agent data, "XYZ calls", ambiguous operations (richest attribute set)
23
- - errors: Exceptions, crashes, error messages, stack traces, unhandled errors, browser/client errors
24
- - logs: Log entries, log messages, severity levels, debugging information
25
-
26
- For ambiguous queries like "calls using XYZ", prefer spans dataset first as it contains the most comprehensive telemetry data.
27
-
28
- CRITICAL - FIELD VERIFICATION REQUIREMENT:
29
- Before constructing ANY query, you MUST verify field availability:
30
- 1. You CANNOT assume ANY field exists without checking - not even common ones
31
- 2. This includes ALL fields: custom attributes, database fields, HTTP fields, AI fields, user fields, etc.
32
- 3. Fields vary by project based on what data is being sent to Sentry
33
- 4. Using an unverified field WILL cause your query to fail with "field not found" errors
34
- 5. The datasetAttributes tool tells you EXACTLY which fields are available
35
-
36
- TOOL USAGE GUIDELINES:
37
- 1. Use datasetAttributes tool to discover available fields for your chosen dataset
38
- 2. Use otelSemantics tool when you need specific OpenTelemetry semantic convention attributes
39
- 3. Use whoami tool when queries contain "me" references for user.id or user.email fields
40
- 4. IMPORTANT: For ambiguous terms like "user agents", "browser", "client" - use the datasetAttributes tool to find the correct field name (typically user_agent.original) instead of assuming it's related to user.id
41
-
42
- CRITICAL - TOOL RESPONSE HANDLING:
43
- All tools return responses in this format: {error?: string, result?: data}
44
- - If 'error' is present: The tool failed - analyze the error message and potentially retry with corrections
45
- - If 'result' is present: The tool succeeded - use the result data for your query construction
46
- - Always check for errors before using results
47
-
48
- CRITICAL - HANDLING "DISTINCT" OR "UNIQUE VALUES" QUERIES:
49
- When user asks for "distinct", "unique", "all values of", or "what are the X" queries:
50
- 1. This ALWAYS requires an AGGREGATE query with count() function
51
- 2. Pattern: fields=['field_name', 'count()'] to show distinct values with counts
52
- 3. Sort by "-count()" to show most common values first
53
- 4. Use datasetAttributes tool to verify the field exists before constructing query
54
- 5. Examples:
55
- - "distinct categories" → fields=['category.name', 'count()'], sort='-count()'
56
- - "unique types" → fields=['item.type', 'count()'], sort='-count()'
57
-
58
- CRITICAL - TRAFFIC/VOLUME/COUNT QUERIES:
59
- When user asks about "traffic", "volume", "how much", "how many" (without specific metrics):
60
- 1. This ALWAYS requires an AGGREGATE query with count() function
61
- 2. For total counts: fields=['count()']
62
- 3. For grouped counts: fields=['grouping_field', 'count()']
63
- 4. Always include timeRange for period-specific queries
64
- 5. Examples:
65
- - "how much traffic in last 30 days" → fields=['count()'], timeRange: {"statsPeriod": "30d"}
66
- - "traffic on mcp-server" → query: "project:mcp-server", fields=['count()']
67
-
68
- CRITICAL - HANDLING "ME" REFERENCES:
69
- - If the query contains "me", "my", "myself", or "affecting me" in the context of user.id or user.email fields, use the whoami tool to get the user's ID and email
70
- - For assignedTo fields, you can use "me" directly without translation (e.g., assignedTo:me works as-is)
71
- - After calling whoami, replace "me" references with the actual user.id or user.email values
72
- - If whoami fails, return an error explaining the issue
73
-
74
- QUERY MODES:
75
- 1. INDIVIDUAL EVENTS (default): Returns raw event data
76
- - Used when fields contain no function() calls
77
- - Include recommended fields plus any user-requested fields
78
-
79
- 2. AGGREGATE QUERIES: Grouping and aggregation (NOT SQL)
80
- - Activated when ANY field contains a function() call
81
- - Fields should ONLY include: aggregate functions + groupBy fields
82
- - Automatically groups by ALL non-function fields
83
- - For aggregate queries, ONLY include the aggregate functions and groupBy fields - do NOT include default fields like timestamp, id, etc.
84
- - You SHOULD sort aggregate results by "-function_name()" for descending order (highest values first)
85
- - For equations in aggregate queries: You SHOULD use "-equation|..." prefix unless user wants lowest values
86
- - When user asks "how many total", "sum of", or similar: They want the highest/total value, use descending sort
87
-
88
- CRITICAL LIMITATION - TIME SERIES NOT SUPPORTED:
89
- - Queries asking for data "over time", "by hour", "by day", "time series", or similar temporal groupings are NOT currently supported
90
- - If user asks for "X over time", return an error explaining: "Time series aggregations are not currently supported."
91
-
92
- CRITICAL - DO NOT USE SQL SYNTAX:
93
- - NEVER use SQL functions like yesterday(), today(), now(), IS NOT NULL, IS NULL
94
- - NEVER use SQL date functions - use timeRange parameter instead
95
- - For "yesterday": Use timeRange: {"statsPeriod": "24h"}, NOT timestamp >= yesterday()
96
- - For field existence: Use has:field_name, NOT field_name IS NOT NULL
97
- - For field absence: Use !has:field_name, NOT field_name IS NULL
98
-
99
- MATHEMATICAL QUERY PATTERNS:
100
- When user asks mathematical questions like "how many X", "total Y used", "sum of Z":
101
- - Identify the appropriate dataset based on context
102
- - Use datasetAttributes tool to find available numeric fields
103
- - Use sum() function for totals, avg() for averages, count() for counts
104
- - For time-based queries ("today", "yesterday", "this week"), use timeRange parameter
105
- - For "total" or "how many" questions: Users typically want highest values first (descending sort)
106
-
107
- DERIVED METRICS AND CALCULATIONS (SPANS ONLY):
108
- When user asks for calculated metrics, ratios, or conversions:
109
- - Use equation fields with "equation|" prefix
110
- - Examples:
111
- - "duration in milliseconds" → fields: ["equation|avg(span.duration) * 1000"], sort: "-equation|avg(span.duration) * 1000"
112
- - "combined metric total" → fields: ["equation|sum(metric.a) + sum(metric.b)"], sort: "-equation|sum(metric.a) + sum(metric.b)"
113
- - "error rate percentage" → fields: ["equation|failure_rate() * 100"], sort: "-equation|failure_rate() * 100"
114
- - "events per second" → fields: ["equation|count() / 3600"], sort: "-equation|count() / 3600"
115
- - IMPORTANT: Equations are ONLY supported in the spans dataset, NOT in errors or logs
116
- - IMPORTANT: When sorting by equations, use "-equation|..." for descending order (highest values first)
117
-
118
- PERFORMANCE INVESTIGATION STRATEGY:
119
- When users ask about "performance problems", "slow pages", "slow endpoints", "latency issues",
120
- "web vitals", "LCP", "CLS", "INP", "page speed", "load time", "response time", or similar:
121
-
122
- 1. ALWAYS use AGGREGATE queries first - individual samples are misleading for performance analysis
123
- 2. Use p75() as the primary percentile for consistent performance measurement
124
- 3. Group by transaction to identify which pages/endpoints have problems
125
- 4. Include count() to understand sample size (low count = unreliable data)
126
- 5. Sort by the worst-performing metric (descending with "-" prefix)
127
-
128
- CRITICAL: For performance investigations, return AGGREGATES grouped by the span's transaction attribute, NOT individual events.
129
-
130
- SPAN QUERY PHILOSOPHY - DUCK TYPING:
131
- Use "has:attribute" to find spans by their characteristics, NOT "is_transaction:true".
132
- The is_transaction:true filter ONLY returns transaction boundaries (request entry/exit points).
133
- Most performance queries want specific span types, not just boundaries.
134
-
135
- Performance Query Patterns (use duck typing):
136
- - Web Vitals: has:measurements.lcp, has:measurements.cls, has:measurements.inp
137
- - Database: has:db.statement or has:db.system
138
- - HTTP/API calls: has:http.method or has:http.url
139
- - External Services: has:http.url (for outbound calls)
140
- - AI/LLM: has:gen_ai.system or has:gen_ai.request.model
141
- - MCP Tools: has:mcp.tool.name
142
-
143
- WHEN TO USE is_transaction:true (rare):
144
- - ONLY when you specifically need transaction boundaries (full request/response cycle)
145
- - Example: "total request duration by endpoint" - you want the outermost span
146
- - For most queries about "slow X" or "X performance", use duck typing instead
147
-
148
- Web Vitals Thresholds (for context when reporting):
149
- - LCP: Good < 2500ms, Needs Improvement 2500-4000ms, Poor >= 4000ms
150
- - CLS: Good < 0.1, Needs Improvement 0.1-0.25, Poor >= 0.25
151
- - INP: Good < 200ms, Needs Improvement 200-500ms, Poor >= 500ms
152
- - FCP: Good < 1800ms, Needs Improvement 1800-3000ms, Poor >= 3000ms
153
- - TTFB: Good < 800ms, Needs Improvement 800-1800ms, Poor >= 1800ms
154
-
155
- SORTING RULES (CRITICAL - YOU MUST ALWAYS SPECIFY A SORT):
156
- 1. CRITICAL: Sort MUST go in the separate "sort" field, NEVER in the "query" field
157
- - WRONG: query: "level:error sort:-timestamp" ← Sort syntax in query field is FORBIDDEN
158
- - CORRECT: query: "level:error", sort: "-timestamp" ← Sort in separate field
159
-
160
- 2. DEFAULT SORTING:
161
- - errors dataset: Use "-timestamp" (newest first)
162
- - spans dataset: Use "-span.duration" (slowest first)
163
- - logs dataset: Use "-timestamp" (newest first)
164
-
165
- 3. SORTING SYNTAX:
166
- - Use "-" prefix for descending order (e.g., "-timestamp" for newest first)
167
- - Use field name without prefix for ascending order
168
- - For aggregate queries: sort by aggregate function results (e.g., "-count()" for highest count first)
169
- - For equation fields: You SHOULD use "-equation|..." for descending order (e.g., "-equation|sum(field1) + sum(field2)")
170
- - Only omit the "-" prefix if the user clearly wants lowest values first (rare)
171
-
172
- 4. IMPORTANT SORTING REQUIREMENTS:
173
- - YOU MUST ALWAYS INCLUDE A SORT PARAMETER
174
- - CRITICAL: The field you sort by MUST be included in your fields array
175
- - If sorting by "-timestamp", include "timestamp" in fields
176
- - If sorting by "-count()", include "count()" in fields
177
- - This is MANDATORY - Sentry will reject queries where sort field is not in the selected fields
178
-
179
- YOUR RESPONSE FORMAT:
180
- Return a JSON object with these fields:
181
- - "dataset": Which dataset you determined to use ("spans", "errors", or "logs")
182
- - "query": The Sentry query string for filtering results (use empty string "" for no filters)
183
- - "fields": Array of field names to return in results
184
- - For individual event queries: OPTIONAL (will use recommended fields if not provided)
185
- - For aggregate queries: REQUIRED (must include aggregate functions AND any groupBy fields)
186
- - "sort": Sort parameter for results (REQUIRED - YOU MUST ALWAYS SPECIFY THIS)
187
- - "timeRange": Time range parameters (optional)
188
- - Relative: {"statsPeriod": "24h"} for last 24 hours, "7d" for last 7 days, etc.
189
- - Absolute: {"start": "2025-06-19T07:00:00", "end": "2025-06-20T06:59:59"} for specific date ranges
190
-
191
- CORRECT QUERY PATTERNS (FOLLOW THESE):
192
- - For field existence: Use has:field_name (NOT field_name IS NOT NULL)
193
- - For field absence: Use !has:field_name (NOT field_name IS NULL)
194
- - For time periods: Use timeRange parameter (NOT SQL date functions)
195
- - Example: "items processed yesterday" → query: "has:item.processed", timeRange: {"statsPeriod": "24h"}
196
-
197
- PROCESS:
198
- 1. Analyze the user's query
199
- 2. Determine appropriate dataset
200
- 3. Use datasetAttributes tool to discover available fields
201
- 4. Use otelSemantics tool if needed for OpenTelemetry attributes
202
- 5. Construct the final query with proper fields and sort parameters
203
-
204
- COMMON ERRORS TO AVOID:
205
- - Using SQL syntax (IS NOT NULL, IS NULL, yesterday(), today(), etc.) - Use has: operator and timeRange instead
206
- - Using numeric functions (sum, avg, min, max, percentiles) on non-numeric fields
207
- - Using incorrect field names (use the otelSemantics tool to look up correct names)
208
- - Missing required fields in the fields array for aggregate queries
209
- - Invalid sort parameter not included in fields array
210
- - For field existence: Use has:field_name (NOT field_name IS NOT NULL)
211
- - For field absence: Use !has:field_name (NOT field_name IS NULL)
212
- - For time periods: Use timeRange parameter (NOT SQL date functions like yesterday())`;
213
- const BASE_COMMON_FIELDS = {
214
- project: "Project slug",
215
- timestamp: "When the event occurred",
216
- environment: "Environment (production, staging, development)",
217
- release: "Release version",
218
- platform: "Platform (javascript, python, etc.)",
219
- "user.id": "User ID",
220
- "user.email": "User email",
221
- "sdk.name": "SDK name",
222
- "sdk.version": "SDK version"
223
- };
224
- const NUMERIC_FIELDS = {
225
- spans: new Set([
226
- "span.duration",
227
- "span.self_time",
228
- "transaction.duration",
229
- "http.status_code",
230
- "gen_ai.usage.input_tokens",
231
- "gen_ai.usage.output_tokens",
232
- "gen_ai.request.max_tokens",
233
- "measurements.lcp",
234
- "measurements.cls",
235
- "measurements.inp",
236
- "measurements.fcp",
237
- "measurements.ttfb"
238
- ]),
239
- errors: new Set(["stack.lineno"]),
240
- logs: new Set(["severity_number", "sentry.observed_timestamp_nanos"])
241
- };
242
- const DATASET_FIELDS = {
243
- spans: {
244
- "span.op": "Span operation type (e.g., http.client, db.query, cache.get)",
245
- "span.description": "Detailed description of the span operation",
246
- "span.duration": "Duration of the span in milliseconds",
247
- "span.status": "Span status (ok, cancelled, unknown, etc.)",
248
- "span.self_time": "Time spent in this span excluding child spans",
249
- transaction: "Transaction name/route",
250
- "transaction.duration": "Total transaction duration in milliseconds",
251
- "transaction.op": "Transaction operation type",
252
- "transaction.status": "Transaction status",
253
- is_transaction: "Whether this span is a transaction (true/false)",
254
- trace: "Trace ID",
255
- "trace.span_id": "Span ID within the trace",
256
- "trace.parent_span_id": "Parent span ID",
257
- "http.method": "HTTP method (GET, POST, etc.)",
258
- "http.status_code": "HTTP response status code",
259
- "http.url": "Full HTTP URL",
260
- "db.system": "Database system (postgresql, mysql, etc.)",
261
- "db.operation": "Database operation (SELECT, INSERT, etc.)",
262
- "gen_ai.system": "AI system (e.g., anthropic, openai)",
263
- "gen_ai.request.model": "Model name (e.g., claude-3-5-sonnet-20241022)",
264
- "gen_ai.operation.name": "Operation type (e.g., chat, completion)",
265
- "gen_ai.usage.input_tokens": "Number of input tokens (numeric)",
266
- "gen_ai.usage.output_tokens": "Number of output tokens (numeric)",
267
- "mcp.tool.name": "Tool name (e.g., search_issues, search_events)",
268
- "mcp.session.id": "MCP session identifier",
269
- "measurements.lcp": "Largest Contentful Paint - time until largest content element is visible (ms). Good < 2500ms",
270
- "measurements.cls": "Cumulative Layout Shift - visual stability score (unitless). Good < 0.1",
271
- "measurements.inp": "Interaction to Next Paint - responsiveness to user input (ms). Good < 200ms",
272
- "measurements.fcp": "First Contentful Paint - time until first content is visible (ms). Good < 1800ms",
273
- "measurements.ttfb": "Time to First Byte - server response time (ms). Good < 800ms",
274
- "count()": "Count of spans",
275
- "count_unique(field)": "Count of unique values, e.g. count_unique(user.id)",
276
- "avg(field)": "Average of numeric field, e.g. avg(span.duration)",
277
- "sum(field)": "Sum of numeric field, e.g. sum(span.self_time)",
278
- "min(field)": "Minimum of numeric field, e.g. min(span.duration)",
279
- "max(field)": "Maximum of numeric field, e.g. max(span.duration)",
280
- "p50(field)": "50th percentile (median), e.g. p50(span.duration)",
281
- "p75(field)": "75th percentile - standard for performance analysis, e.g. p75(span.duration)",
282
- "p90(field)": "90th percentile, e.g. p90(span.duration)",
283
- "p95(field)": "95th percentile, e.g. p95(span.duration)",
284
- "p99(field)": "99th percentile, e.g. p99(span.duration)",
285
- "p100(field)": "100th percentile (max), e.g. p100(span.duration)",
286
- "epm()": "Events per minute rate",
287
- "failure_rate()": "Percentage of failed spans"
288
- },
289
- errors: {
290
- message: "Error message",
291
- level: "Error level (error, warning, info, debug)",
292
- "error.type": "Error type/exception class",
293
- "error.value": "Error value/description",
294
- "error.handled": "Whether the error was handled (true/false)",
295
- culprit: "Code location that caused the error",
296
- title: "Error title/grouping",
297
- "stack.filename": "File where error occurred",
298
- "stack.function": "Function where error occurred",
299
- "stack.module": "Module where error occurred",
300
- "stack.abs_path": "Absolute path to file",
301
- "os.name": "Operating system name",
302
- "browser.name": "Browser name",
303
- "device.family": "Device family",
304
- "count()": "Count of error events",
305
- "count_unique(field)": "Count of unique values, e.g. count_unique(user.id)",
306
- "count_if(field,equals,value)": "Conditional count, e.g. count_if(error.handled,equals,false)",
307
- "last_seen()": "Most recent timestamp of the group",
308
- "eps()": "Events per second rate",
309
- "epm()": "Events per minute rate"
310
- },
311
- logs: {
312
- message: "Log message",
313
- severity: "Log severity level",
314
- severity_number: "Numeric severity level",
315
- "sentry.item_id": "Sentry item ID",
316
- "sentry.observed_timestamp_nanos": "Observed timestamp in nanoseconds",
317
- trace: "Trace ID",
318
- "count()": "Count of log entries",
319
- "count_unique(field)": "Count of unique values, e.g. count_unique(user.id)",
320
- "avg(field)": "Average of numeric field, e.g. avg(severity_number)",
321
- "sum(field)": "Sum of numeric field",
322
- "min(field)": "Minimum of numeric field",
323
- "max(field)": "Maximum of numeric field",
324
- "p50(field)": "50th percentile (median)",
325
- "p75(field)": "75th percentile",
326
- "p90(field)": "90th percentile",
327
- "p95(field)": "95th percentile",
328
- "p99(field)": "99th percentile",
329
- "p100(field)": "100th percentile (max)",
330
- "epm()": "Events per minute rate"
331
- }
332
- };
333
- const DATASET_EXAMPLES = {
334
- spans: [
335
- {
336
- description: "web vitals performance problems",
337
- output: {
338
- query: "has:measurements.lcp",
339
- fields: [
340
- "transaction",
341
- "p75(measurements.lcp)",
342
- "p75(measurements.cls)",
343
- "p75(measurements.inp)",
344
- "count()"
345
- ],
346
- sort: "-p75(measurements.lcp)"
347
- }
348
- },
349
- {
350
- description: "slowest database queries",
351
- output: {
352
- query: "has:db.statement",
353
- fields: [
354
- "db.system",
355
- "db.statement",
356
- "count()",
357
- "p75(span.duration)",
358
- "p95(span.duration)"
359
- ],
360
- sort: "-p75(span.duration)"
361
- }
362
- },
363
- {
364
- description: "total request duration by endpoint (transaction boundaries)",
365
- output: {
366
- query: "is_transaction:true",
367
- fields: [
368
- "transaction",
369
- "count()",
370
- "avg(span.duration)",
371
- "p95(span.duration)"
372
- ],
373
- sort: "-avg(span.duration)"
374
- }
375
- },
376
- {
377
- description: "slow API calls over 5 seconds",
378
- output: {
379
- query: "has:request.url AND span.duration:>5000",
380
- fields: [
381
- "span.op",
382
- "span.description",
383
- "span.duration",
384
- "transaction",
385
- "timestamp",
386
- "trace"
387
- ],
388
- sort: "-span.duration"
389
- }
390
- },
391
- {
392
- description: "token usage by AI model",
393
- output: {
394
- query: "has:gen_ai.usage.input_tokens",
395
- fields: [
396
- "gen_ai.request.model",
397
- "sum(gen_ai.usage.input_tokens)",
398
- "sum(gen_ai.usage.output_tokens)",
399
- "count()"
400
- ],
401
- sort: "-sum(gen_ai.usage.input_tokens)"
402
- }
403
- },
404
- {
405
- description: "top MCP tool calls by usage",
406
- output: {
407
- query: "has:mcp.tool.name",
408
- fields: ["mcp.tool.name", "count()"],
409
- sort: "-count()"
410
- }
411
- },
412
- {
413
- description: "HTTP requests by user agent",
414
- output: {
415
- query: "has:http.method AND has:user_agent.original",
416
- fields: [
417
- "user_agent.original",
418
- "count()",
419
- "avg(span.duration)"
420
- ],
421
- sort: "-count()"
422
- }
423
- },
424
- {
425
- description: "frontend performance overview",
426
- output: {
427
- query: "has:measurements.lcp",
428
- fields: [
429
- "transaction",
430
- "p75(span.duration)",
431
- "p75(measurements.lcp)",
432
- "p75(measurements.fcp)",
433
- "p75(measurements.ttfb)",
434
- "count()"
435
- ],
436
- sort: "-p75(span.duration)"
437
- }
438
- }
439
- ],
440
- errors: [
441
- {
442
- description: "unhandled errors in production",
443
- output: {
444
- query: "error.handled:false AND environment:production",
445
- fields: [
446
- "issue",
447
- "title",
448
- "timestamp",
449
- "message",
450
- "error.type",
451
- "culprit"
452
- ],
453
- sort: "-timestamp"
454
- }
455
- },
456
- {
457
- description: "count errors by type",
458
- output: {
459
- query: "level:error",
460
- fields: [
461
- "error.type",
462
- "count()",
463
- "last_seen()"
464
- ],
465
- sort: "-count()"
466
- }
467
- },
468
- {
469
- description: "errors in specific file",
470
- output: {
471
- query: "stack.filename:\"**/Button.tsx\"",
472
- fields: [
473
- "issue",
474
- "title",
475
- "timestamp",
476
- "message",
477
- "stack.filename"
478
- ],
479
- sort: "-timestamp"
480
- }
481
- },
482
- {
483
- description: "most common errors",
484
- output: {
485
- query: "",
486
- fields: ["title", "count()"],
487
- sort: "-count()"
488
- }
489
- },
490
- {
491
- description: "unique users affected by errors",
492
- output: {
493
- query: "level:error",
494
- fields: [
495
- "error.type",
496
- "count()",
497
- "count_unique(user.id)"
498
- ],
499
- sort: "-count_unique(user.id)"
500
- }
501
- },
502
- {
503
- description: "errors by browser/user agent",
504
- output: {
505
- query: "level:error AND has:user_agent.original",
506
- fields: [
507
- "user_agent.original",
508
- "count()",
509
- "count_unique(user.id)"
510
- ],
511
- sort: "-count()"
512
- }
513
- }
514
- ],
515
- logs: [
516
- {
517
- description: "error logs about database",
518
- output: {
519
- query: "severity:error AND message:\"*database*\"",
520
- fields: [
521
- "timestamp",
522
- "message",
523
- "severity",
524
- "trace"
525
- ],
526
- sort: "-timestamp"
527
- }
528
- },
529
- {
530
- description: "count logs by severity",
531
- output: {
532
- query: "",
533
- fields: ["severity", "count()"],
534
- sort: "-count()"
535
- }
536
- },
537
- {
538
- description: "most common log messages",
539
- output: {
540
- query: "",
541
- fields: ["message", "count()"],
542
- sort: "-count()"
543
- }
544
- },
545
- {
546
- description: "log volume by project",
547
- output: {
548
- query: "",
549
- fields: [
550
- "project",
551
- "count()",
552
- "epm()"
553
- ],
554
- sort: "-count()"
555
- }
556
- },
557
- {
558
- description: "warning logs about memory",
559
- output: {
560
- query: "severity:warning AND message:\"*memory*\"",
561
- fields: [
562
- "timestamp",
563
- "message",
564
- "severity",
565
- "trace"
566
- ],
567
- sort: "-timestamp"
568
- }
569
- }
570
- ]
571
- };
572
- const RECOMMENDED_FIELDS = {
573
- errors: {
574
- basic: [
575
- "issue",
576
- "title",
577
- "project",
578
- "timestamp",
579
- "level",
580
- "message",
581
- "error.type",
582
- "culprit"
583
- ],
584
- description: "Basic error information including issue ID, title, timestamp, severity, and location"
585
- },
586
- logs: {
587
- basic: [
588
- "timestamp",
589
- "project",
590
- "message",
591
- "severity",
592
- "trace"
593
- ],
594
- description: "Essential log entry information"
595
- },
596
- spans: {
597
- basic: [
598
- "id",
599
- "span.op",
600
- "span.description",
601
- "span.duration",
602
- "transaction",
603
- "timestamp",
604
- "project",
605
- "trace"
606
- ],
607
- description: "Core span/trace information including span ID, operation, duration, and trace context"
608
- }
609
- };
610
-
611
- //#endregion
612
- export { config_exports as n, systemPrompt as r, RECOMMENDED_FIELDS as t };
613
- //# sourceMappingURL=config-CzqCJmB9.js.map