@sweny-ai/core 0.1.6 → 0.1.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/claude.d.ts +6 -0
- package/dist/claude.js +32 -3
- package/dist/cli/config-file.d.ts +5 -2
- package/dist/cli/config-file.js +17 -15
- package/dist/cli/config.d.ts +4 -1
- package/dist/cli/config.js +26 -3
- package/dist/cli/main.js +65 -5
- package/dist/executor.js +32 -5
- package/dist/index.d.ts +2 -1
- package/dist/index.js +1 -1
- package/dist/mcp.d.ts +15 -0
- package/dist/mcp.js +41 -0
- package/dist/skills/betterstack.d.ts +8 -0
- package/dist/skills/betterstack.js +151 -0
- package/dist/skills/index.d.ts +2 -1
- package/dist/skills/index.js +3 -2
- package/dist/skills/linear.js +35 -0
- package/dist/templates.d.ts +6 -2
- package/dist/templates.js +31 -7
- package/dist/types.d.ts +1 -1
- package/dist/workflows/triage.d.ts +2 -2
- package/dist/workflows/triage.js +93 -16
- package/package.json +1 -1
package/dist/claude.d.ts
CHANGED
|
@@ -31,6 +31,12 @@ export declare class ClaudeClient implements Claude {
|
|
|
31
31
|
private defaultContext;
|
|
32
32
|
private mcpServers;
|
|
33
33
|
constructor(opts?: ClaudeClientOptions);
|
|
34
|
+
/**
|
|
35
|
+
* Build env for the Claude Code subprocess.
|
|
36
|
+
* OAuth token takes priority over API key to prevent .env files from
|
|
37
|
+
* overriding the user's subscription-based auth.
|
|
38
|
+
*/
|
|
39
|
+
private buildEnv;
|
|
34
40
|
run(opts: {
|
|
35
41
|
instruction: string;
|
|
36
42
|
context: Record<string, unknown>;
|
package/dist/claude.js
CHANGED
|
@@ -27,6 +27,18 @@ export class ClaudeClient {
|
|
|
27
27
|
this.defaultContext = opts.defaultContext ?? { config: {}, logger: this.logger };
|
|
28
28
|
this.mcpServers = opts.mcpServers ?? {};
|
|
29
29
|
}
|
|
30
|
+
/**
|
|
31
|
+
* Build env for the Claude Code subprocess.
|
|
32
|
+
* OAuth token takes priority over API key to prevent .env files from
|
|
33
|
+
* overriding the user's subscription-based auth.
|
|
34
|
+
*/
|
|
35
|
+
buildEnv() {
|
|
36
|
+
const env = Object.fromEntries(Object.entries(process.env).filter((e) => e[1] != null));
|
|
37
|
+
if (env.CLAUDE_CODE_OAUTH_TOKEN) {
|
|
38
|
+
delete env.ANTHROPIC_API_KEY;
|
|
39
|
+
}
|
|
40
|
+
return env;
|
|
41
|
+
}
|
|
30
42
|
async run(opts) {
|
|
31
43
|
const { instruction, context, tools, outputSchema, onProgress } = opts;
|
|
32
44
|
const toolCalls = [];
|
|
@@ -47,8 +59,7 @@ export class ClaudeClient {
|
|
|
47
59
|
]
|
|
48
60
|
.filter(Boolean)
|
|
49
61
|
.join("\n\n");
|
|
50
|
-
|
|
51
|
-
const env = Object.fromEntries(Object.entries(process.env).filter((e) => e[1] != null));
|
|
62
|
+
const env = this.buildEnv();
|
|
52
63
|
let response = "";
|
|
53
64
|
try {
|
|
54
65
|
const allMcpServers = { ...this.mcpServers };
|
|
@@ -62,6 +73,8 @@ export class ClaudeClient {
|
|
|
62
73
|
cwd: this.cwd,
|
|
63
74
|
env,
|
|
64
75
|
permissionMode: "bypassPermissions",
|
|
76
|
+
allowDangerouslySkipPermissions: true,
|
|
77
|
+
stderr: (data) => this.logger.debug(`[claude-code] ${data}`),
|
|
65
78
|
...(this.model ? { model: this.model } : {}),
|
|
66
79
|
...(Object.keys(allMcpServers).length > 0 ? { mcpServers: allMcpServers } : {}),
|
|
67
80
|
},
|
|
@@ -82,6 +95,20 @@ export class ClaudeClient {
|
|
|
82
95
|
onProgress?.(clean.length > 80 ? clean.slice(0, 79) + "\u2026" : clean);
|
|
83
96
|
}
|
|
84
97
|
}
|
|
98
|
+
else if (message.type === "assistant") {
|
|
99
|
+
// Extract tool_use blocks from assistant messages (MCP tool calls)
|
|
100
|
+
const am = message;
|
|
101
|
+
if (am.message?.content && Array.isArray(am.message.content)) {
|
|
102
|
+
for (const block of am.message.content) {
|
|
103
|
+
if (block.type === "tool_use") {
|
|
104
|
+
toolCalls.push({
|
|
105
|
+
tool: stripMcpPrefix(block.name ?? ""),
|
|
106
|
+
input: block.input,
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
85
112
|
else if (message.type === "result") {
|
|
86
113
|
const resultMsg = message;
|
|
87
114
|
if (resultMsg.subtype === "success" && "result" in resultMsg) {
|
|
@@ -121,7 +148,7 @@ export class ClaudeClient {
|
|
|
121
148
|
`\nChoices:\n${choiceList}`,
|
|
122
149
|
`\nRespond with ONLY the choice ID, nothing else.`,
|
|
123
150
|
].join("\n");
|
|
124
|
-
const env =
|
|
151
|
+
const env = this.buildEnv();
|
|
125
152
|
let response = "";
|
|
126
153
|
try {
|
|
127
154
|
const stream = query({
|
|
@@ -131,6 +158,8 @@ export class ClaudeClient {
|
|
|
131
158
|
cwd: this.cwd,
|
|
132
159
|
env,
|
|
133
160
|
permissionMode: "bypassPermissions",
|
|
161
|
+
allowDangerouslySkipPermissions: true,
|
|
162
|
+
stderr: (data) => this.logger.debug(`[claude-code] ${data}`),
|
|
134
163
|
...(this.model ? { model: this.model } : {}),
|
|
135
164
|
},
|
|
136
165
|
});
|
|
@@ -3,10 +3,13 @@
|
|
|
3
3
|
* Sets `process.env[KEY]` only if not already defined (real env vars win).
|
|
4
4
|
*/
|
|
5
5
|
export declare function loadDotenv(cwd?: string): void;
|
|
6
|
+
/** Parsed config file — flat strings for scalar fields, arrays for list fields. */
|
|
7
|
+
export type FileConfig = Record<string, string | string[]>;
|
|
6
8
|
/**
|
|
7
|
-
* Search upward from `cwd` for `.sweny.yml` and parse it
|
|
9
|
+
* Search upward from `cwd` for `.sweny.yml` and parse it.
|
|
10
|
+
* Scalar values are strings, list values (rules, context) are string arrays.
|
|
8
11
|
* Returns empty object if no config file is found.
|
|
9
12
|
*/
|
|
10
|
-
export declare function loadConfigFile(cwd?: string):
|
|
13
|
+
export declare function loadConfigFile(cwd?: string): FileConfig;
|
|
11
14
|
/** Starter config written by `sweny init`. */
|
|
12
15
|
export declare const STARTER_CONFIG = "# .sweny.yml \u2014 SWEny project configuration\n# Commit this file. Secrets (API keys, tokens) go in .env (gitignored).\n#\n# Every key matches a CLI flag: \"time-range: 4h\" is the same as \"--time-range 4h\".\n# CLI flags override this file; env vars override this file; this file overrides defaults.\n\n# \u2500\u2500 Providers \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# observability-provider: datadog # datadog | sentry | cloudwatch | splunk | elastic | newrelic | loki | prometheus | pagerduty | heroku | opsgenie | vercel | supabase | netlify | fly | render | file\n# issue-tracker-provider: github-issues # github-issues | linear | jira\n# source-control-provider: github # github | gitlab\n# coding-agent-provider: claude # claude | codex | gemini\n# notification-provider: console # console | slack | teams | discord | email | webhook\n\n# \u2500\u2500 Investigation \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# time-range: 24h\n# severity-focus: errors\n# service-filter: \"*\"\n# investigation-depth: standard # quick | standard | thorough\n\n# \u2500\u2500 PR / branch \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# base-branch: main\n# pr-labels: agent,triage,needs-review\n\n# \u2500\u2500 Paths \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# service-map-path: .github/service-map.yml\n# log-file: ./logs/errors.json # required when observability-provider is \"file\"\n\n# \u2500\u2500 Cache \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# cache-dir: .sweny/cache\n# cache-ttl: 86400\n\n# \u2500\u2500 MCP servers \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# Extend the coding agent with additional tools via MCP.\n# Value is a JSON object \u2014 each key is a server name you choose.\n# See docs/mcp-servers.md for a full catalog with copy-paste configs.\n#\n# Example: GitHub MCP server (query PRs, issues, CI run logs)\n# mcp-servers-json: '{\"github\":{\"type\":\"stdio\",\"command\":\"npx\",\"args\":[\"-y\",\"@modelcontextprotocol/server-github@latest\"],\"env\":{\"GITHUB_PERSONAL_ACCESS_TOKEN\":\"ghp_...\"}}}'\n\n# \u2500\u2500 Local-only quick start \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# Uncomment to run without any external services (just an LLM API key):\n# observability-provider: file\n# log-file: ./sample-errors.json\n# issue-tracker-provider: file\n# source-control-provider: file\n# notification-provider: file\n# output-dir: .sweny/output\n\n# \u2500\u2500 Credentials (.env) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# Copy the relevant block into your .env file and fill in the values.\n#\n# Claude (coding agent) \u2014 https://console.anthropic.com/settings/api-keys\n# ANTHROPIC_API_KEY=sk-ant-...\n#\n# GitHub (source control + issue tracker)\n# GITHUB_TOKEN=ghp_... # https://github.com/settings/tokens (repo + issues scopes)\n#\n# Datadog (observability) \u2014 https://app.datadoghq.com/organization-settings\n# DD_API_KEY=... # Organization Settings > API Keys\n# DD_APP_KEY=... # Organization Settings > Application Keys\n# DD_SITE=datadoghq.com # or datadoghq.eu, us3.datadoghq.com, etc.\n#\n# Sentry (observability) \u2014 https://sentry.io/settings/auth-tokens/\n# SENTRY_AUTH_TOKEN=sntrys_...\n# SENTRY_ORG=your-org-slug # from sentry.io/organizations/<slug>/\n# SENTRY_PROJECT=your-project # Project Settings > General > Project Slug\n#\n# Linear (issue tracker) \u2014 https://linear.app/settings/api\n# LINEAR_API_KEY=lin_api_...\n# LINEAR_TEAM_ID=... # Settings > Workspace > Teams > [team] > copy ID from URL\n# LINEAR_BUG_LABEL_ID=... # Settings > Labels > [label] > copy ID from URL\n#\n# Jira (issue tracker) \u2014 https://your-org.atlassian.net\n# JIRA_BASE_URL=https://your-org.atlassian.net\n# JIRA_EMAIL=you@company.com # your Atlassian account email\n# JIRA_API_TOKEN=... # https://id.atlassian.com/manage-profile/security/api-tokens\n#\n# Vercel (observability) \u2014 https://vercel.com/account/tokens\n# VERCEL_TOKEN=...\n# VERCEL_PROJECT_ID=prj_... # Project Settings > General > Project ID\n# VERCEL_TEAM_ID=team_... # optional, for team-owned projects\n#\n# Supabase (observability) \u2014 https://supabase.com/dashboard/account/tokens\n# SUPABASE_MANAGEMENT_KEY=...\n# SUPABASE_PROJECT_REF=... # Project Settings > General > Reference ID\n#\n# Netlify (observability) \u2014 https://app.netlify.com/user/applications#personal-access-tokens\n# NETLIFY_TOKEN=...\n# NETLIFY_SITE_ID=... # Site Settings > General > Site ID\n#\n# Fly.io (observability) \u2014 https://fly.io/user/personal_access_tokens\n# FLY_TOKEN=...\n# FLY_APP_NAME=... # the name of your Fly.io application\n#\n# Render (observability) \u2014 https://dashboard.render.com/u/settings\n# RENDER_API_KEY=...\n# RENDER_SERVICE_ID=srv-... # from your service's Settings page\n#\n# Prometheus (observability) \u2014 self-hosted or Grafana Cloud\n# PROMETHEUS_URL=http://prometheus.internal:9090\n# PROMETHEUS_TOKEN=... # optional, for secured instances\n#\n# PagerDuty (observability) \u2014 https://your-account.pagerduty.com/api_keys\n# PAGERDUTY_API_KEY=...\n#\n# Honeycomb (observability) \u2014 https://docs.honeycomb.io/api/\n# HONEYCOMB_API_KEY=...\n# HONEYCOMB_DATASET=... # dataset name (e.g. production)\n#\n# Heroku (observability) \u2014 https://devcenter.heroku.com/articles/platform-api-reference\n# HEROKU_API_KEY=... # https://dashboard.heroku.com/account\n# HEROKU_APP_NAME=... # the name of your Heroku application\n#\n# OpsGenie (observability) \u2014 https://support.atlassian.com/opsgenie/docs/api-key-management/\n# OPSGENIE_API_KEY=...\n# OPSGENIE_REGION=us # or eu for EU-hosted accounts\n#\n# Slack (notifications) \u2014 https://api.slack.com/apps\n# NOTIFICATION_WEBHOOK_URL=https://hooks.slack.com/services/...\n# # or use a bot token: SLACK_BOT_TOKEN=xoxb-...\n";
|
package/dist/cli/config-file.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import * as fs from "node:fs";
|
|
2
2
|
import * as path from "node:path";
|
|
3
|
+
import { parse as parseYaml } from "yaml";
|
|
3
4
|
/**
|
|
4
5
|
* Auto-load a `.env` file from the given directory.
|
|
5
6
|
* Sets `process.env[KEY]` only if not already defined (real env vars win).
|
|
@@ -32,7 +33,8 @@ export function loadDotenv(cwd = process.cwd()) {
|
|
|
32
33
|
}
|
|
33
34
|
}
|
|
34
35
|
/**
|
|
35
|
-
* Search upward from `cwd` for `.sweny.yml` and parse it
|
|
36
|
+
* Search upward from `cwd` for `.sweny.yml` and parse it.
|
|
37
|
+
* Scalar values are strings, list values (rules, context) are string arrays.
|
|
36
38
|
* Returns empty object if no config file is found.
|
|
37
39
|
*/
|
|
38
40
|
export function loadConfigFile(cwd = process.cwd()) {
|
|
@@ -46,22 +48,22 @@ export function loadConfigFile(cwd = process.cwd()) {
|
|
|
46
48
|
catch {
|
|
47
49
|
return {};
|
|
48
50
|
}
|
|
51
|
+
let raw;
|
|
52
|
+
try {
|
|
53
|
+
raw = parseYaml(content);
|
|
54
|
+
}
|
|
55
|
+
catch {
|
|
56
|
+
return {};
|
|
57
|
+
}
|
|
58
|
+
if (!raw || typeof raw !== "object")
|
|
59
|
+
return {};
|
|
49
60
|
const config = {};
|
|
50
|
-
for (const
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
continue;
|
|
54
|
-
const colonIndex = trimmed.indexOf(":");
|
|
55
|
-
if (colonIndex === -1)
|
|
56
|
-
continue;
|
|
57
|
-
const key = trimmed.slice(0, colonIndex).trim();
|
|
58
|
-
let value = trimmed.slice(colonIndex + 1).trim();
|
|
59
|
-
// Strip surrounding quotes
|
|
60
|
-
if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) {
|
|
61
|
-
value = value.slice(1, -1);
|
|
61
|
+
for (const [key, value] of Object.entries(raw)) {
|
|
62
|
+
if (Array.isArray(value)) {
|
|
63
|
+
config[key] = value.map(String);
|
|
62
64
|
}
|
|
63
|
-
if (
|
|
64
|
-
config[key] = value;
|
|
65
|
+
else if (value != null && value !== "") {
|
|
66
|
+
config[key] = String(value);
|
|
65
67
|
}
|
|
66
68
|
}
|
|
67
69
|
return config;
|
package/dist/cli/config.d.ts
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { Command } from "commander";
|
|
2
2
|
import type { McpServerConfig } from "../types.js";
|
|
3
|
+
import type { FileConfig } from "./config-file.js";
|
|
3
4
|
export interface CliConfig {
|
|
4
5
|
codingAgentProvider: string;
|
|
5
6
|
anthropicApiKey: string;
|
|
@@ -57,9 +58,11 @@ export interface CliConfig {
|
|
|
57
58
|
outputDir: string;
|
|
58
59
|
mcpServers: Record<string, McpServerConfig>;
|
|
59
60
|
workspaceTools: string[];
|
|
61
|
+
rules: string[];
|
|
62
|
+
context: string[];
|
|
60
63
|
}
|
|
61
64
|
export declare function registerTriageCommand(program: Command): Command;
|
|
62
|
-
export declare function parseCliInputs(options: Record<string, unknown>, fileConfig?:
|
|
65
|
+
export declare function parseCliInputs(options: Record<string, unknown>, fileConfig?: FileConfig): CliConfig;
|
|
63
66
|
/**
|
|
64
67
|
* All recognized workspace tool names. Update here when adding a new Category B MCP server.
|
|
65
68
|
*
|
package/dist/cli/config.js
CHANGED
|
@@ -65,8 +65,26 @@ export function registerTriageCommand(program) {
|
|
|
65
65
|
}
|
|
66
66
|
export function parseCliInputs(options, fileConfig = {}) {
|
|
67
67
|
const env = process.env;
|
|
68
|
-
// Config file lookup helper: CLI flag > env var > file > default
|
|
69
|
-
const f = (key) =>
|
|
68
|
+
// Config file lookup helper: CLI flag > env var > file > default (scalar only)
|
|
69
|
+
const f = (key) => {
|
|
70
|
+
const v = fileConfig[key];
|
|
71
|
+
return typeof v === "string" && v ? v : undefined;
|
|
72
|
+
};
|
|
73
|
+
// Array helper: CLI flag (comma-separated) > file > default
|
|
74
|
+
const fa = (key, cliKey) => {
|
|
75
|
+
const cliVal = options[cliKey ?? key];
|
|
76
|
+
if (cliVal)
|
|
77
|
+
return cliVal
|
|
78
|
+
.split(",")
|
|
79
|
+
.map((s) => s.trim())
|
|
80
|
+
.filter(Boolean);
|
|
81
|
+
const fileVal = fileConfig[key];
|
|
82
|
+
if (Array.isArray(fileVal))
|
|
83
|
+
return fileVal;
|
|
84
|
+
if (typeof fileVal === "string" && fileVal)
|
|
85
|
+
return [fileVal];
|
|
86
|
+
return [];
|
|
87
|
+
};
|
|
70
88
|
const obsProvider = options.observabilityProvider || f("observability-provider") || "datadog";
|
|
71
89
|
return {
|
|
72
90
|
codingAgentProvider: options.agent || options.codingAgentProvider || f("coding-agent-provider") || "claude",
|
|
@@ -138,6 +156,8 @@ export function parseCliInputs(options, fileConfig = {}) {
|
|
|
138
156
|
.split(",")
|
|
139
157
|
.map((s) => s.trim())
|
|
140
158
|
.filter(Boolean),
|
|
159
|
+
rules: fa("rules"),
|
|
160
|
+
context: fa("context"),
|
|
141
161
|
};
|
|
142
162
|
}
|
|
143
163
|
/**
|
|
@@ -395,7 +415,10 @@ export function validateWarnings(config) {
|
|
|
395
415
|
}
|
|
396
416
|
function parseObservabilityCredentials(provider, options, fileConfig = {}) {
|
|
397
417
|
const env = process.env;
|
|
398
|
-
const f = (key) =>
|
|
418
|
+
const f = (key) => {
|
|
419
|
+
const v = fileConfig[key];
|
|
420
|
+
return typeof v === "string" && v ? v : undefined;
|
|
421
|
+
};
|
|
399
422
|
switch (provider) {
|
|
400
423
|
case "datadog":
|
|
401
424
|
return {
|
package/dist/cli/main.js
CHANGED
|
@@ -12,6 +12,8 @@ import { implementWorkflow } from "../workflows/implement.js";
|
|
|
12
12
|
import { consoleLogger } from "../types.js";
|
|
13
13
|
import { ClaudeClient } from "../claude.js";
|
|
14
14
|
import { createSkillMap, configuredSkills } from "../skills/index.js";
|
|
15
|
+
import { buildAutoMcpServers, buildProviderContext } from "../mcp.js";
|
|
16
|
+
import { loadAdditionalContext } from "../templates.js";
|
|
15
17
|
import { validateWorkflow as validateWorkflowSchema } from "../schema.js";
|
|
16
18
|
import { parse as parseYaml, stringify as stringifyYaml } from "yaml";
|
|
17
19
|
import { buildWorkflow, refineWorkflow } from "../workflow-builder.js";
|
|
@@ -134,6 +136,41 @@ function buildMcpAutoConfig(config) {
|
|
|
134
136
|
userMcpServers: Object.keys(config.mcpServers).length > 0 ? config.mcpServers : undefined,
|
|
135
137
|
};
|
|
136
138
|
}
|
|
139
|
+
/**
|
|
140
|
+
* Build provider context string (available tools/providers).
|
|
141
|
+
*/
|
|
142
|
+
function buildProviderCtx(config, mcpServers) {
|
|
143
|
+
const extras = {};
|
|
144
|
+
if (config.observabilityCredentials.sourceId) {
|
|
145
|
+
extras["BetterStack source ID"] = config.observabilityCredentials.sourceId;
|
|
146
|
+
}
|
|
147
|
+
if (config.observabilityCredentials.tableName) {
|
|
148
|
+
extras["BetterStack table name"] = config.observabilityCredentials.tableName;
|
|
149
|
+
}
|
|
150
|
+
return buildProviderContext({
|
|
151
|
+
observabilityProvider: config.observabilityProvider,
|
|
152
|
+
issueTrackerProvider: config.issueTrackerProvider,
|
|
153
|
+
sourceControlProvider: config.sourceControlProvider,
|
|
154
|
+
mcpServers: Object.keys(mcpServers),
|
|
155
|
+
extras: Object.keys(extras).length > 0 ? extras : undefined,
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Resolve rules and context from config into structured workflow input fields.
|
|
160
|
+
* Local files + inline text are resolved now; URLs are passed to the prepare node.
|
|
161
|
+
*/
|
|
162
|
+
async function resolveRulesAndContext(config) {
|
|
163
|
+
const [rulesResult, contextResult] = await Promise.all([
|
|
164
|
+
loadAdditionalContext(config.rules),
|
|
165
|
+
loadAdditionalContext(config.context),
|
|
166
|
+
]);
|
|
167
|
+
return {
|
|
168
|
+
rules: rulesResult.resolved,
|
|
169
|
+
context: contextResult.resolved,
|
|
170
|
+
rulesUrls: rulesResult.urls,
|
|
171
|
+
contextUrls: contextResult.urls,
|
|
172
|
+
};
|
|
173
|
+
}
|
|
137
174
|
// ── sweny triage ──────────────────────────────────────────────────────
|
|
138
175
|
const triageCmd = registerTriageCommand(program);
|
|
139
176
|
triageCmd.action(async (options) => {
|
|
@@ -154,12 +191,15 @@ triageCmd.action(async (options) => {
|
|
|
154
191
|
if (!config.json) {
|
|
155
192
|
console.log(formatBanner(config, version));
|
|
156
193
|
}
|
|
157
|
-
// ── Build skill map + Claude client
|
|
194
|
+
// ── Build skill map + MCP servers + Claude client ──────────
|
|
158
195
|
const skills = createSkillMap(configuredSkills());
|
|
196
|
+
const mcpAutoConfig = buildMcpAutoConfig(config);
|
|
197
|
+
const mcpServers = buildAutoMcpServers(mcpAutoConfig);
|
|
159
198
|
const claude = new ClaudeClient({
|
|
160
199
|
maxTurns: config.maxInvestigateTurns || 50,
|
|
161
200
|
cwd: process.cwd(),
|
|
162
201
|
logger: consoleLogger,
|
|
202
|
+
mcpServers,
|
|
163
203
|
});
|
|
164
204
|
// ── Progress display state ─────────────────────────────────
|
|
165
205
|
const FRAMES = ["\u280B", "\u2819", "\u2839", "\u2838", "\u283C", "\u2834", "\u2826", "\u2827", "\u2807", "\u280F"];
|
|
@@ -280,9 +320,13 @@ triageCmd.action(async (options) => {
|
|
|
280
320
|
};
|
|
281
321
|
const observer = composeObservers(progressObserver, config.stream ? createStreamObserver() : undefined);
|
|
282
322
|
// ── Build workflow input from config ──────────────────────
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
//
|
|
323
|
+
const providerCtx = buildProviderCtx(config, mcpServers);
|
|
324
|
+
const { rules, context, rulesUrls, contextUrls } = await resolveRulesAndContext(config);
|
|
325
|
+
// Combine provider context + additional instructions into the context field
|
|
326
|
+
const contextParts = [providerCtx];
|
|
327
|
+
if (config.additionalInstructions)
|
|
328
|
+
contextParts.push(config.additionalInstructions);
|
|
329
|
+
const fullContext = [contextParts.join("\n\n"), context].filter(Boolean).join("\n\n---\n\n");
|
|
286
330
|
const workflowInput = {
|
|
287
331
|
timeRange: config.timeRange,
|
|
288
332
|
severityFocus: config.severityFocus,
|
|
@@ -304,6 +348,12 @@ triageCmd.action(async (options) => {
|
|
|
304
348
|
...(config.observabilityCredentials.tableName && {
|
|
305
349
|
betterstackTableName: config.observabilityCredentials.tableName,
|
|
306
350
|
}),
|
|
351
|
+
// Structured rules/context for executor
|
|
352
|
+
rules,
|
|
353
|
+
context: fullContext,
|
|
354
|
+
// URLs for the prepare node to fetch at runtime
|
|
355
|
+
rulesUrls,
|
|
356
|
+
contextUrls,
|
|
307
357
|
};
|
|
308
358
|
try {
|
|
309
359
|
const results = await execute(triageWorkflow, workflowInput, {
|
|
@@ -355,13 +405,19 @@ implementCmd.action(async (issueId, options) => {
|
|
|
355
405
|
maxImplementTurns: parseInt(String(options.maxImplementTurns || fileConfig["max-implement-turns"] || "40"), 10),
|
|
356
406
|
baseBranch: options.baseBranch || fileConfig["base-branch"] || "main",
|
|
357
407
|
repository: options.repository || process.env.GITHUB_REPOSITORY || "",
|
|
358
|
-
outputDir: options.outputDir ||
|
|
408
|
+
outputDir: options.outputDir ||
|
|
409
|
+
process.env.SWENY_OUTPUT_DIR ||
|
|
410
|
+
fileConfig["output-dir"] ||
|
|
411
|
+
".sweny/output",
|
|
359
412
|
};
|
|
360
413
|
const skills = createSkillMap(configuredSkills());
|
|
414
|
+
const mcpAutoConfig = buildMcpAutoConfig(config);
|
|
415
|
+
const mcpServers = buildAutoMcpServers(mcpAutoConfig);
|
|
361
416
|
const claude = new ClaudeClient({
|
|
362
417
|
maxTurns: config.maxImplementTurns || 40,
|
|
363
418
|
cwd: process.cwd(),
|
|
364
419
|
logger: consoleLogger,
|
|
420
|
+
mcpServers,
|
|
365
421
|
});
|
|
366
422
|
console.log(chalk.cyan(`\n sweny implement ${issueId}\n`));
|
|
367
423
|
const isTTY = process.stderr.isTTY ?? false;
|
|
@@ -477,10 +533,13 @@ export async function workflowRunAction(file, options) {
|
|
|
477
533
|
const isJson = Boolean(options.json);
|
|
478
534
|
const isTTY = !isJson && (process.stderr.isTTY ?? false);
|
|
479
535
|
const skills = createSkillMap(configuredSkills());
|
|
536
|
+
const mcpAutoConfig = buildMcpAutoConfig(config);
|
|
537
|
+
const mcpServers = buildAutoMcpServers(mcpAutoConfig);
|
|
480
538
|
const claude = new ClaudeClient({
|
|
481
539
|
maxTurns: config.maxInvestigateTurns || 50,
|
|
482
540
|
cwd: process.cwd(),
|
|
483
541
|
logger: consoleLogger,
|
|
542
|
+
mcpServers,
|
|
484
543
|
});
|
|
485
544
|
// Track per-node entry time to compute elapsed on exit
|
|
486
545
|
const nodeEnterTimes = new Map();
|
|
@@ -535,6 +594,7 @@ export async function workflowRunAction(file, options) {
|
|
|
535
594
|
...(config.observabilityCredentials.tableName && {
|
|
536
595
|
betterstackTableName: config.observabilityCredentials.tableName,
|
|
537
596
|
}),
|
|
597
|
+
context: buildProviderCtx(config, mcpServers),
|
|
538
598
|
};
|
|
539
599
|
try {
|
|
540
600
|
const results = await execute(workflow, workflowInput, {
|
package/dist/executor.js
CHANGED
|
@@ -46,11 +46,8 @@ export async function execute(workflow, input, options) {
|
|
|
46
46
|
return output;
|
|
47
47
|
},
|
|
48
48
|
}));
|
|
49
|
-
// Prepend
|
|
50
|
-
const
|
|
51
|
-
const instruction = additionalContext
|
|
52
|
-
? `## Additional Context & Rules\n\n${additionalContext}\n\n---\n\n${node.instruction}`
|
|
53
|
-
: node.instruction;
|
|
49
|
+
// Prepend rules and context to instruction if provided
|
|
50
|
+
const instruction = buildNodeInstruction(node.instruction, input);
|
|
54
51
|
// Run Claude on this node
|
|
55
52
|
const result = await claude.run({
|
|
56
53
|
instruction,
|
|
@@ -74,6 +71,36 @@ export async function execute(workflow, input, options) {
|
|
|
74
71
|
return results;
|
|
75
72
|
}
|
|
76
73
|
// ─── Internals ───────────────────────────────────────────────────
|
|
74
|
+
/**
|
|
75
|
+
* Build the full instruction for a node by prepending rules and context.
|
|
76
|
+
* Rules get "You MUST follow" framing; context gets "Background" framing.
|
|
77
|
+
* Falls back to legacy `additionalContext` if rules/context aren't set.
|
|
78
|
+
*/
|
|
79
|
+
function buildNodeInstruction(baseInstruction, input) {
|
|
80
|
+
const inp = input;
|
|
81
|
+
if (!inp)
|
|
82
|
+
return baseInstruction;
|
|
83
|
+
const sections = [];
|
|
84
|
+
// New structured format
|
|
85
|
+
const rules = typeof inp.rules === "string" && inp.rules ? inp.rules : "";
|
|
86
|
+
const context = typeof inp.context === "string" && inp.context ? inp.context : "";
|
|
87
|
+
if (rules) {
|
|
88
|
+
sections.push(`## Rules — You MUST Follow These\n\n${rules}`);
|
|
89
|
+
}
|
|
90
|
+
if (context) {
|
|
91
|
+
sections.push(`## Background Context\n\n${context}`);
|
|
92
|
+
}
|
|
93
|
+
// Legacy fallback
|
|
94
|
+
if (sections.length === 0) {
|
|
95
|
+
const legacy = typeof inp.additionalContext === "string" ? inp.additionalContext : "";
|
|
96
|
+
if (legacy) {
|
|
97
|
+
sections.push(`## Additional Context & Rules\n\n${legacy}`);
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
if (sections.length === 0)
|
|
101
|
+
return baseInstruction;
|
|
102
|
+
return `${sections.join("\n\n")}\n\n---\n\n${baseInstruction}`;
|
|
103
|
+
}
|
|
77
104
|
/** Call observer without letting exceptions crash the workflow */
|
|
78
105
|
function safeObserve(observer, event, logger) {
|
|
79
106
|
if (!observer)
|
package/dist/index.d.ts
CHANGED
|
@@ -31,7 +31,8 @@ export { github, linear, slack, sentry, datadog, notification, builtinSkills, cr
|
|
|
31
31
|
export type { SkillValidationResult } from "./skills/index.js";
|
|
32
32
|
export { workflowZ, nodeZ, edgeZ, skillZ, parseWorkflow, validateWorkflow, workflowJsonSchema } from "./schema.js";
|
|
33
33
|
export type { WorkflowError } from "./schema.js";
|
|
34
|
-
export { buildAutoMcpServers } from "./mcp.js";
|
|
34
|
+
export { buildAutoMcpServers, buildProviderContext } from "./mcp.js";
|
|
35
|
+
export type { ProviderContextOptions } from "./mcp.js";
|
|
35
36
|
export type { McpServerConfig, McpAutoConfig } from "./types.js";
|
|
36
37
|
export { buildWorkflow, refineWorkflow } from "./workflow-builder.js";
|
|
37
38
|
export type { BuildWorkflowOptions } from "./workflow-builder.js";
|
package/dist/index.js
CHANGED
|
@@ -31,7 +31,7 @@ export { github, linear, slack, sentry, datadog, notification, builtinSkills, cr
|
|
|
31
31
|
// Schema & validation
|
|
32
32
|
export { workflowZ, nodeZ, edgeZ, skillZ, parseWorkflow, validateWorkflow, workflowJsonSchema } from "./schema.js";
|
|
33
33
|
// MCP auto-injection
|
|
34
|
-
export { buildAutoMcpServers } from "./mcp.js";
|
|
34
|
+
export { buildAutoMcpServers, buildProviderContext } from "./mcp.js";
|
|
35
35
|
// Workflow builder
|
|
36
36
|
export { buildWorkflow, refineWorkflow } from "./workflow-builder.js";
|
|
37
37
|
// Templates
|
package/dist/mcp.d.ts
CHANGED
|
@@ -9,3 +9,18 @@ import type { McpAutoConfig, McpServerConfig } from "./types.js";
|
|
|
9
9
|
* User-supplied servers (userMcpServers) always win on key conflicts.
|
|
10
10
|
*/
|
|
11
11
|
export declare function buildAutoMcpServers(config: McpAutoConfig): Record<string, McpServerConfig>;
|
|
12
|
+
export interface ProviderContextOptions {
|
|
13
|
+
observabilityProvider?: string;
|
|
14
|
+
issueTrackerProvider?: string;
|
|
15
|
+
sourceControlProvider?: string;
|
|
16
|
+
/** Which MCP servers were actually injected (keys from buildAutoMcpServers) */
|
|
17
|
+
mcpServers: string[];
|
|
18
|
+
/** Extra details to include (e.g. betterstack source ID) */
|
|
19
|
+
extras?: Record<string, string>;
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Build a human-readable summary of configured providers and MCP tools.
|
|
23
|
+
* Prepended to every node instruction via additionalContext so the agent
|
|
24
|
+
* knows exactly what tools are available and how to use them.
|
|
25
|
+
*/
|
|
26
|
+
export declare function buildProviderContext(opts: ProviderContextOptions): string;
|
package/dist/mcp.js
CHANGED
|
@@ -183,3 +183,44 @@ export function buildAutoMcpServers(config) {
|
|
|
183
183
|
// User-supplied servers always win on key conflict.
|
|
184
184
|
return { ...auto, ...(config.userMcpServers ?? {}) };
|
|
185
185
|
}
|
|
186
|
+
/**
|
|
187
|
+
* Build a human-readable summary of configured providers and MCP tools.
|
|
188
|
+
* Prepended to every node instruction via additionalContext so the agent
|
|
189
|
+
* knows exactly what tools are available and how to use them.
|
|
190
|
+
*/
|
|
191
|
+
export function buildProviderContext(opts) {
|
|
192
|
+
const lines = ["## Available Providers & Tools", ""];
|
|
193
|
+
// Observability
|
|
194
|
+
if (opts.observabilityProvider) {
|
|
195
|
+
const mcpNote = opts.mcpServers.includes(opts.observabilityProvider)
|
|
196
|
+
? ` (available via MCP — use its tools to query logs, errors, and metrics)`
|
|
197
|
+
: "";
|
|
198
|
+
lines.push(`- **Observability**: ${opts.observabilityProvider}${mcpNote}`);
|
|
199
|
+
}
|
|
200
|
+
// BetterStack as secondary (token present but not primary provider)
|
|
201
|
+
if (opts.observabilityProvider !== "betterstack" && opts.mcpServers.includes("betterstack")) {
|
|
202
|
+
lines.push(`- **Logs**: betterstack (available via MCP — use its tools to query logs)`);
|
|
203
|
+
}
|
|
204
|
+
// Issue tracker
|
|
205
|
+
if (opts.issueTrackerProvider) {
|
|
206
|
+
const mcpNote = opts.mcpServers.includes(opts.issueTrackerProvider === "github-issues" ? "github" : opts.issueTrackerProvider)
|
|
207
|
+
? ` (available via MCP)`
|
|
208
|
+
: "";
|
|
209
|
+
lines.push(`- **Issue tracker**: ${opts.issueTrackerProvider}${mcpNote}`);
|
|
210
|
+
}
|
|
211
|
+
// Source control
|
|
212
|
+
if (opts.sourceControlProvider) {
|
|
213
|
+
const mcpNote = opts.mcpServers.includes(opts.sourceControlProvider) ? ` (available via MCP)` : "";
|
|
214
|
+
lines.push(`- **Source control**: ${opts.sourceControlProvider}${mcpNote}`);
|
|
215
|
+
}
|
|
216
|
+
// Extras (source IDs, table names, etc.)
|
|
217
|
+
if (opts.extras && Object.keys(opts.extras).length > 0) {
|
|
218
|
+
lines.push("");
|
|
219
|
+
for (const [key, value] of Object.entries(opts.extras)) {
|
|
220
|
+
lines.push(`- **${key}**: ${value}`);
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
lines.push("");
|
|
224
|
+
lines.push("Use all available MCP tools to gather data. " + "MCP tools are already connected — just call them directly.");
|
|
225
|
+
return lines.join("\n");
|
|
226
|
+
}
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* BetterStack Skill
|
|
3
|
+
*
|
|
4
|
+
* Telemetry REST API for source management + ClickHouse HTTP for log queries.
|
|
5
|
+
* CI-native alternative to the BetterStack MCP server.
|
|
6
|
+
*/
|
|
7
|
+
// ─── REST API (source management) ──────────────────────────────
|
|
8
|
+
async function bsApi(path, ctx) {
|
|
9
|
+
const res = await fetch(`https://telemetry.betterstack.com/api/v1${path}`, {
|
|
10
|
+
headers: { Authorization: `Bearer ${ctx.config.BETTERSTACK_API_TOKEN}` },
|
|
11
|
+
signal: AbortSignal.timeout(30_000),
|
|
12
|
+
});
|
|
13
|
+
if (!res.ok)
|
|
14
|
+
throw new Error(`[BetterStack] API request failed (HTTP ${res.status}): ${await res.text()}`);
|
|
15
|
+
return res.json();
|
|
16
|
+
}
|
|
17
|
+
// ─── ClickHouse HTTP (log queries) ─────────────────────────────
|
|
18
|
+
async function bsQuery(sql, ctx) {
|
|
19
|
+
const endpoint = ctx.config.BETTERSTACK_QUERY_ENDPOINT.replace(/\/+$/, "");
|
|
20
|
+
const res = await fetch(`${endpoint}?output_format_pretty_row_numbers=0`, {
|
|
21
|
+
method: "POST",
|
|
22
|
+
headers: {
|
|
23
|
+
"Content-Type": "text/plain",
|
|
24
|
+
Authorization: `Basic ${btoa(`${ctx.config.BETTERSTACK_QUERY_USERNAME}:${ctx.config.BETTERSTACK_QUERY_PASSWORD}`)}`,
|
|
25
|
+
},
|
|
26
|
+
body: `${sql} FORMAT JSONEachRow`,
|
|
27
|
+
signal: AbortSignal.timeout(30_000),
|
|
28
|
+
});
|
|
29
|
+
if (!res.ok)
|
|
30
|
+
throw new Error(`[BetterStack] ClickHouse query failed (HTTP ${res.status}): ${await res.text()}`);
|
|
31
|
+
// JSONEachRow returns one JSON object per line (NDJSON)
|
|
32
|
+
const text = await res.text();
|
|
33
|
+
if (!text.trim())
|
|
34
|
+
return [];
|
|
35
|
+
return text
|
|
36
|
+
.trim()
|
|
37
|
+
.split("\n")
|
|
38
|
+
.map((line) => JSON.parse(line));
|
|
39
|
+
}
|
|
40
|
+
// ─── Skill definition ──────────────────────────────────────────
|
|
41
|
+
export const betterstack = {
|
|
42
|
+
id: "betterstack",
|
|
43
|
+
name: "BetterStack",
|
|
44
|
+
description: "Query logs and manage telemetry sources in BetterStack",
|
|
45
|
+
category: "observability",
|
|
46
|
+
config: {
|
|
47
|
+
BETTERSTACK_API_TOKEN: {
|
|
48
|
+
description: "BetterStack Telemetry API token (team-scoped)",
|
|
49
|
+
required: true,
|
|
50
|
+
env: "BETTERSTACK_API_TOKEN",
|
|
51
|
+
},
|
|
52
|
+
BETTERSTACK_QUERY_ENDPOINT: {
|
|
53
|
+
description: "ClickHouse HTTP endpoint (e.g. https://eu-fsn-3-connect.betterstackdata.com)",
|
|
54
|
+
required: true,
|
|
55
|
+
env: "BETTERSTACK_QUERY_ENDPOINT",
|
|
56
|
+
},
|
|
57
|
+
BETTERSTACK_QUERY_USERNAME: {
|
|
58
|
+
description: "ClickHouse connection username",
|
|
59
|
+
required: true,
|
|
60
|
+
env: "BETTERSTACK_QUERY_USERNAME",
|
|
61
|
+
},
|
|
62
|
+
BETTERSTACK_QUERY_PASSWORD: {
|
|
63
|
+
description: "ClickHouse connection password",
|
|
64
|
+
required: true,
|
|
65
|
+
env: "BETTERSTACK_QUERY_PASSWORD",
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
tools: [
|
|
69
|
+
{
|
|
70
|
+
name: "betterstack_list_sources",
|
|
71
|
+
description: "List available telemetry sources (id, name, table_name, platform)",
|
|
72
|
+
input_schema: {
|
|
73
|
+
type: "object",
|
|
74
|
+
properties: {
|
|
75
|
+
name: { type: "string", description: "Filter by name (partial match)" },
|
|
76
|
+
},
|
|
77
|
+
},
|
|
78
|
+
handler: async (input, ctx) => {
|
|
79
|
+
const params = new URLSearchParams({ per_page: "50" });
|
|
80
|
+
if (input.name)
|
|
81
|
+
params.set("name", input.name);
|
|
82
|
+
const data = await bsApi(`/sources?${params}`, ctx);
|
|
83
|
+
return data.data.map((s) => ({
|
|
84
|
+
id: s.id,
|
|
85
|
+
name: s.attributes.name,
|
|
86
|
+
table_name: s.attributes.table_name,
|
|
87
|
+
platform: s.attributes.platform,
|
|
88
|
+
}));
|
|
89
|
+
},
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
name: "betterstack_get_source",
|
|
93
|
+
description: "Get full details for a telemetry source (table name, retention, config)",
|
|
94
|
+
input_schema: {
|
|
95
|
+
type: "object",
|
|
96
|
+
properties: {
|
|
97
|
+
id: { type: "number", description: "Source ID" },
|
|
98
|
+
},
|
|
99
|
+
required: ["id"],
|
|
100
|
+
},
|
|
101
|
+
handler: async (input, ctx) => {
|
|
102
|
+
const data = await bsApi(`/sources/${input.id}`, ctx);
|
|
103
|
+
return { id: data.data.id, ...data.data.attributes };
|
|
104
|
+
},
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
name: "betterstack_get_source_fields",
|
|
108
|
+
description: "Get queryable fields for a source table (column names and types)",
|
|
109
|
+
input_schema: {
|
|
110
|
+
type: "object",
|
|
111
|
+
properties: {
|
|
112
|
+
table: { type: "string", description: "Table name (e.g. t273774_offload_ecs_production)" },
|
|
113
|
+
},
|
|
114
|
+
required: ["table"],
|
|
115
|
+
},
|
|
116
|
+
handler: async (input, ctx) => {
|
|
117
|
+
return bsQuery(`DESCRIBE TABLE remote(${input.table}_logs)`, ctx);
|
|
118
|
+
},
|
|
119
|
+
},
|
|
120
|
+
{
|
|
121
|
+
name: "betterstack_query",
|
|
122
|
+
description: `Execute a read-only ClickHouse SQL query against a telemetry source.
|
|
123
|
+
Tables: remote(TABLE_logs) for recent logs, s3Cluster(primary, TABLE_s3) for historical (add WHERE _row_type = 1).
|
|
124
|
+
Key fields: dt (timestamp), raw (JSON blob with all log fields).
|
|
125
|
+
Extract nested fields: JSONExtract(raw, 'field_name', 'Nullable(String)').
|
|
126
|
+
Use betterstack_get_source_fields to discover available columns.`,
|
|
127
|
+
input_schema: {
|
|
128
|
+
type: "object",
|
|
129
|
+
properties: {
|
|
130
|
+
query: { type: "string", description: "ClickHouse SQL query (SELECT only)" },
|
|
131
|
+
source_id: { type: "number", description: "Source ID (for context)" },
|
|
132
|
+
table: { type: "string", description: "Table name (e.g. t273774_offload_ecs_production)" },
|
|
133
|
+
},
|
|
134
|
+
required: ["query", "source_id", "table"],
|
|
135
|
+
},
|
|
136
|
+
handler: async (input, ctx) => {
|
|
137
|
+
const trimmed = input.query.trim();
|
|
138
|
+
const upper = trimmed.toUpperCase();
|
|
139
|
+
if (!upper.startsWith("SELECT") && !upper.startsWith("DESCRIBE")) {
|
|
140
|
+
throw new Error("[BetterStack] Only SELECT and DESCRIBE queries are allowed");
|
|
141
|
+
}
|
|
142
|
+
// Append LIMIT if none present to prevent unbounded result sets
|
|
143
|
+
let sql = trimmed;
|
|
144
|
+
if (!upper.includes("LIMIT")) {
|
|
145
|
+
sql = `${sql} LIMIT 500`;
|
|
146
|
+
}
|
|
147
|
+
return bsQuery(sql, ctx);
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
],
|
|
151
|
+
};
|
package/dist/skills/index.d.ts
CHANGED
|
@@ -9,9 +9,10 @@ import { linear } from "./linear.js";
|
|
|
9
9
|
import { slack } from "./slack.js";
|
|
10
10
|
import { sentry } from "./sentry.js";
|
|
11
11
|
import { datadog } from "./datadog.js";
|
|
12
|
+
import { betterstack } from "./betterstack.js";
|
|
12
13
|
import { notification } from "./notification.js";
|
|
13
14
|
export declare const builtinSkills: Skill[];
|
|
14
|
-
export { github, linear, slack, sentry, datadog, notification };
|
|
15
|
+
export { github, linear, slack, sentry, datadog, betterstack, notification };
|
|
15
16
|
/**
|
|
16
17
|
* Build a skill map from an array of skills.
|
|
17
18
|
* Pass to `execute()` as the `skills` option.
|
package/dist/skills/index.js
CHANGED
|
@@ -8,10 +8,11 @@ import { linear } from "./linear.js";
|
|
|
8
8
|
import { slack } from "./slack.js";
|
|
9
9
|
import { sentry } from "./sentry.js";
|
|
10
10
|
import { datadog } from "./datadog.js";
|
|
11
|
+
import { betterstack } from "./betterstack.js";
|
|
11
12
|
import { notification } from "./notification.js";
|
|
12
13
|
// ─── Built-in skill catalog ─────────────────────────────────────
|
|
13
|
-
export const builtinSkills = [github, linear, slack, sentry, datadog, notification];
|
|
14
|
-
export { github, linear, slack, sentry, datadog, notification };
|
|
14
|
+
export const builtinSkills = [github, linear, slack, sentry, datadog, betterstack, notification];
|
|
15
|
+
export { github, linear, slack, sentry, datadog, betterstack, notification };
|
|
15
16
|
// ─── Registry helpers ───────────────────────────────────────────
|
|
16
17
|
/**
|
|
17
18
|
* Build a skill map from an array of skills.
|
package/dist/skills/linear.js
CHANGED
|
@@ -79,6 +79,41 @@ export const linear = {
|
|
|
79
79
|
},
|
|
80
80
|
handler: async (input, ctx) => linearGql(`mutation($input: CommentCreateInput!) { commentCreate(input: $input) { success comment { id body } } }`, { input: { issueId: input.issueId, body: input.body } }, ctx),
|
|
81
81
|
},
|
|
82
|
+
{
|
|
83
|
+
name: "linear_get_issue",
|
|
84
|
+
description: "Get a Linear issue by ID or identifier (e.g. 'OFF-1020')",
|
|
85
|
+
input_schema: {
|
|
86
|
+
type: "object",
|
|
87
|
+
properties: {
|
|
88
|
+
id: { type: "string", description: "Linear issue ID (UUID) or identifier (e.g. 'OFF-1020')" },
|
|
89
|
+
},
|
|
90
|
+
required: ["id"],
|
|
91
|
+
},
|
|
92
|
+
handler: async (input, ctx) => linearGql(`query($id: String!) {
|
|
93
|
+
issue(id: $id) {
|
|
94
|
+
id identifier title url description
|
|
95
|
+
state { name type }
|
|
96
|
+
priority priorityLabel
|
|
97
|
+
assignee { name email }
|
|
98
|
+
labels { nodes { name } }
|
|
99
|
+
team { key name }
|
|
100
|
+
createdAt updatedAt
|
|
101
|
+
}
|
|
102
|
+
}`, { id: input.id }, ctx),
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
name: "linear_list_teams",
|
|
106
|
+
description: "List Linear teams (needed for teamId when creating issues)",
|
|
107
|
+
input_schema: {
|
|
108
|
+
type: "object",
|
|
109
|
+
properties: {},
|
|
110
|
+
},
|
|
111
|
+
handler: async (_input, ctx) => linearGql(`query {
|
|
112
|
+
teams {
|
|
113
|
+
nodes { id key name description }
|
|
114
|
+
}
|
|
115
|
+
}`, {}, ctx),
|
|
116
|
+
},
|
|
82
117
|
{
|
|
83
118
|
name: "linear_update_issue",
|
|
84
119
|
description: "Update an existing Linear issue",
|
package/dist/templates.d.ts
CHANGED
|
@@ -25,7 +25,11 @@ export declare function resolveTemplates(config: {
|
|
|
25
25
|
prTemplate?: string;
|
|
26
26
|
}, cwd?: string): Promise<Templates>;
|
|
27
27
|
/**
|
|
28
|
-
* Load additional context documents (local files or
|
|
28
|
+
* Load additional context documents (local files, URLs, or inline text).
|
|
29
29
|
* Each source is loaded and wrapped with a header.
|
|
30
|
+
* Returns { resolved, urls } — resolved text for files/inline, urls for agent to fetch.
|
|
30
31
|
*/
|
|
31
|
-
export declare function loadAdditionalContext(sources: string[], cwd?: string): Promise<
|
|
32
|
+
export declare function loadAdditionalContext(sources: string[], cwd?: string): Promise<{
|
|
33
|
+
resolved: string;
|
|
34
|
+
urls: string[];
|
|
35
|
+
}>;
|
package/dist/templates.js
CHANGED
|
@@ -89,22 +89,46 @@ export async function resolveTemplates(config, cwd) {
|
|
|
89
89
|
return { issueTemplate, prTemplate };
|
|
90
90
|
}
|
|
91
91
|
/**
|
|
92
|
-
*
|
|
92
|
+
* Classify a source entry as URL, file path, or inline text.
|
|
93
|
+
*/
|
|
94
|
+
function classifySource(source) {
|
|
95
|
+
if (source.startsWith("http://") || source.startsWith("https://"))
|
|
96
|
+
return "url";
|
|
97
|
+
if (source.startsWith("./") || source.startsWith("../") || source.startsWith("/"))
|
|
98
|
+
return "file";
|
|
99
|
+
return "inline";
|
|
100
|
+
}
|
|
101
|
+
/**
|
|
102
|
+
* Load additional context documents (local files, URLs, or inline text).
|
|
93
103
|
* Each source is loaded and wrapped with a header.
|
|
104
|
+
* Returns { resolved, urls } — resolved text for files/inline, urls for agent to fetch.
|
|
94
105
|
*/
|
|
95
106
|
export async function loadAdditionalContext(sources, cwd = process.cwd()) {
|
|
96
107
|
if (sources.length === 0)
|
|
97
|
-
return "";
|
|
108
|
+
return { resolved: "", urls: [] };
|
|
98
109
|
const parts = [];
|
|
110
|
+
const urls = [];
|
|
99
111
|
for (const source of sources) {
|
|
100
112
|
const trimmed = source.trim();
|
|
101
113
|
if (!trimmed)
|
|
102
114
|
continue;
|
|
103
|
-
const
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
115
|
+
const kind = classifySource(trimmed);
|
|
116
|
+
if (kind === "url") {
|
|
117
|
+
urls.push(trimmed);
|
|
118
|
+
}
|
|
119
|
+
else if (kind === "file") {
|
|
120
|
+
const content = await loadTemplate(trimmed, "", cwd);
|
|
121
|
+
if (content) {
|
|
122
|
+
parts.push(`### ${path.basename(trimmed)}\n\n${content}`);
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
else {
|
|
126
|
+
// Inline text — use as-is
|
|
127
|
+
parts.push(trimmed);
|
|
107
128
|
}
|
|
108
129
|
}
|
|
109
|
-
return
|
|
130
|
+
return {
|
|
131
|
+
resolved: parts.length > 0 ? parts.join("\n\n---\n\n") : "",
|
|
132
|
+
urls,
|
|
133
|
+
};
|
|
110
134
|
}
|
package/dist/types.d.ts
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Triage Workflow
|
|
3
3
|
*
|
|
4
|
-
* Investigate a production alert → gather context
|
|
5
|
-
*
|
|
4
|
+
* Investigate a production alert → gather context → determine root cause →
|
|
5
|
+
* create/update issue → implement fix → open PR → notify team.
|
|
6
6
|
*
|
|
7
7
|
* Provider-agnostic: nodes list all compatible skills per category.
|
|
8
8
|
* The executor uses whichever skills are configured. Pre-execution
|
package/dist/workflows/triage.js
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Triage Workflow
|
|
3
3
|
*
|
|
4
|
-
* Investigate a production alert → gather context
|
|
5
|
-
*
|
|
4
|
+
* Investigate a production alert → gather context → determine root cause →
|
|
5
|
+
* create/update issue → implement fix → open PR → notify team.
|
|
6
6
|
*
|
|
7
7
|
* Provider-agnostic: nodes list all compatible skills per category.
|
|
8
8
|
* The executor uses whichever skills are configured. Pre-execution
|
|
@@ -17,9 +17,23 @@
|
|
|
17
17
|
export const triageWorkflow = {
|
|
18
18
|
id: "triage",
|
|
19
19
|
name: "Alert Triage",
|
|
20
|
-
description: "Investigate a production alert, determine root cause, create an issue, and notify the team",
|
|
21
|
-
entry: "
|
|
20
|
+
description: "Investigate a production alert, determine root cause, create an issue, implement a fix, and notify the team",
|
|
21
|
+
entry: "prepare",
|
|
22
22
|
nodes: {
|
|
23
|
+
prepare: {
|
|
24
|
+
name: "Load Rules & Context",
|
|
25
|
+
instruction: `You are preparing for a triage workflow. Your job is to fetch and review any knowledge documents listed in the input.
|
|
26
|
+
|
|
27
|
+
1. Check input for \`rules_urls\` and \`context_urls\` arrays.
|
|
28
|
+
2. For each URL, fetch its content:
|
|
29
|
+
- Linear document URLs → use the Linear MCP tools (get_document or search_documentation)
|
|
30
|
+
- Other HTTP URLs → fetch directly
|
|
31
|
+
3. Summarize the key rules and context that downstream workflow nodes should follow.
|
|
32
|
+
4. Output the consolidated information so it's available to all subsequent steps.
|
|
33
|
+
|
|
34
|
+
If there are no URLs to fetch, just pass through — this step is a no-op.`,
|
|
35
|
+
skills: ["linear"],
|
|
36
|
+
},
|
|
23
37
|
gather: {
|
|
24
38
|
name: "Gather Context",
|
|
25
39
|
instruction: `You are investigating a production alert. Gather all relevant context using the available tools:
|
|
@@ -28,10 +42,8 @@ export const triageWorkflow = {
|
|
|
28
42
|
2. **Source control**: Check recent commits, pull requests, and deploys that might be related.
|
|
29
43
|
3. **Issue tracker**: Search for similar past issues or known problems.
|
|
30
44
|
|
|
31
|
-
If input.betterstackSourceId or input.betterstackTableName is provided, use those to scope your BetterStack log queries to the correct source.
|
|
32
|
-
|
|
33
45
|
Be thorough — the investigation step depends on complete context. Use every tool available to you.`,
|
|
34
|
-
skills: ["github", "sentry", "datadog", "linear"],
|
|
46
|
+
skills: ["github", "sentry", "datadog", "betterstack", "linear"],
|
|
35
47
|
},
|
|
36
48
|
investigate: {
|
|
37
49
|
name: "Root Cause Analysis",
|
|
@@ -42,6 +54,7 @@ Be thorough — the investigation step depends on complete context. Use every to
|
|
|
42
54
|
3. Assess severity: critical (service down), high (major feature broken), medium (degraded), low (cosmetic/minor).
|
|
43
55
|
4. Determine affected services and users.
|
|
44
56
|
5. Recommend a fix approach.
|
|
57
|
+
6. Assess fix complexity: "simple" (a few lines, clear change), "moderate" (multiple files but well-understood), or "complex" (architectural, risky, or unclear).
|
|
45
58
|
|
|
46
59
|
**Novelty check (REQUIRED — you MUST do this before finishing):**
|
|
47
60
|
Search the issue tracker for existing issues (BOTH open AND closed) that cover the same root cause, error pattern, or affected service. Use github_search_issues and/or linear_search_issues with multiple keyword variations.
|
|
@@ -63,6 +76,7 @@ Set is_duplicate=true if ANY match is found. Set is_duplicate=false ONLY if you
|
|
|
63
76
|
duplicate_of: { type: "string", description: "Issue ID/URL if duplicate" },
|
|
64
77
|
recommendation: { type: "string" },
|
|
65
78
|
fix_approach: { type: "string" },
|
|
79
|
+
fix_complexity: { type: "string", enum: ["simple", "moderate", "complex"] },
|
|
66
80
|
},
|
|
67
81
|
required: ["root_cause", "severity", "is_duplicate", "recommendation"],
|
|
68
82
|
},
|
|
@@ -83,25 +97,62 @@ If context.issueTemplate is provided, use it as the format for the issue body. O
|
|
|
83
97
|
Create the issue in whichever tracker is available to you.`,
|
|
84
98
|
skills: ["linear", "github"],
|
|
85
99
|
},
|
|
100
|
+
skip: {
|
|
101
|
+
name: "Skip — Duplicate or Low Priority",
|
|
102
|
+
instruction: `This alert was determined to be a duplicate or low-priority.
|
|
103
|
+
|
|
104
|
+
If this is a **duplicate** of an existing issue (check context for duplicate_of):
|
|
105
|
+
1. Find the existing issue using the issue tracker tools.
|
|
106
|
+
2. Add a comment: "+1 — SWEny triage confirmed this issue is still active (seen again at {current UTC timestamp}). Latest context: {1-2 sentence summary of what was found this run}."
|
|
107
|
+
3. If the issue is closed/done, reopen it or note in the comment that the bug has recurred.
|
|
108
|
+
|
|
109
|
+
If this is just **low priority**, log a brief note about why it was skipped.`,
|
|
110
|
+
skills: ["linear", "github"],
|
|
111
|
+
},
|
|
112
|
+
implement: {
|
|
113
|
+
name: "Implement Fix",
|
|
114
|
+
instruction: `Implement the fix identified during investigation:
|
|
115
|
+
|
|
116
|
+
1. Create a feature branch from the base branch (check context for baseBranch, default "main").
|
|
117
|
+
2. Read the relevant source files to understand the current code.
|
|
118
|
+
3. Make the necessary code changes — fix the bug, nothing more.
|
|
119
|
+
4. Run any existing tests if available to verify the fix doesn't break anything.
|
|
120
|
+
5. Stage and commit with a clear commit message referencing the issue.
|
|
121
|
+
|
|
122
|
+
Keep changes minimal and focused. Do not refactor surrounding code or add unrelated improvements.
|
|
123
|
+
|
|
124
|
+
If the fix turns out to be more complex than expected, stop and explain why — do not force a bad fix.`,
|
|
125
|
+
skills: ["github"],
|
|
126
|
+
},
|
|
127
|
+
create_pr: {
|
|
128
|
+
name: "Open Pull Request",
|
|
129
|
+
instruction: `Open a pull request for the fix:
|
|
130
|
+
|
|
131
|
+
1. Push the branch to the remote.
|
|
132
|
+
2. Create a PR with a clear title referencing the issue (e.g. "[OFF-1020] fix: guard empty pdf_texts before access").
|
|
133
|
+
3. In the PR body, include: summary of the bug, what the fix does, and a link to the issue.
|
|
134
|
+
4. Add appropriate labels if available.
|
|
135
|
+
|
|
136
|
+
If context.prTemplate is provided, use it as the format for the PR body. Otherwise use a clear structure with: Summary, Changes, Testing, and Related Issues.
|
|
137
|
+
|
|
138
|
+
Return the PR URL and number.`,
|
|
139
|
+
skills: ["github"],
|
|
140
|
+
},
|
|
86
141
|
notify: {
|
|
87
142
|
name: "Notify Team",
|
|
88
143
|
instruction: `Send a notification summarizing the triage result:
|
|
89
144
|
|
|
90
|
-
1. Include: alert summary, severity, root cause (1-2 sentences), and
|
|
145
|
+
1. Include: alert summary, severity, root cause (1-2 sentences), and links to any created issues or PRs.
|
|
91
146
|
2. For critical/high severity, make the notification urgent.
|
|
92
147
|
3. For medium/low, a standard notification is fine.
|
|
93
148
|
|
|
94
149
|
Use whichever notification channel is available to you.`,
|
|
95
150
|
skills: ["slack", "notification"],
|
|
96
151
|
},
|
|
97
|
-
skip: {
|
|
98
|
-
name: "Skip — Duplicate or Low Priority",
|
|
99
|
-
instruction: `This alert was determined to be a duplicate or low-priority.
|
|
100
|
-
Log a brief note about why it was skipped. No further action needed.`,
|
|
101
|
-
skills: [],
|
|
102
|
-
},
|
|
103
152
|
},
|
|
104
153
|
edges: [
|
|
154
|
+
// prepare → gather (always)
|
|
155
|
+
{ from: "prepare", to: "gather" },
|
|
105
156
|
// gather → investigate (always)
|
|
106
157
|
{ from: "gather", to: "investigate" },
|
|
107
158
|
// investigate → create_issue (if novel and actionable)
|
|
@@ -116,7 +167,33 @@ Log a brief note about why it was skipped. No further action needed.`,
|
|
|
116
167
|
to: "skip",
|
|
117
168
|
when: "is_duplicate is true, OR severity is low",
|
|
118
169
|
},
|
|
119
|
-
// create_issue →
|
|
120
|
-
{
|
|
170
|
+
// create_issue → implement (if fix is clear and not too complex)
|
|
171
|
+
{
|
|
172
|
+
from: "create_issue",
|
|
173
|
+
to: "implement",
|
|
174
|
+
when: "fix_complexity is simple or moderate AND fix_approach is provided AND dryRun is not true",
|
|
175
|
+
},
|
|
176
|
+
// create_issue → notify (if fix is too complex or risky, or dry run)
|
|
177
|
+
{
|
|
178
|
+
from: "create_issue",
|
|
179
|
+
to: "notify",
|
|
180
|
+
when: "fix_complexity is complex, OR no clear fix_approach, OR dryRun is true",
|
|
181
|
+
},
|
|
182
|
+
// skip → implement (duplicate exists but has a clear unfixed bug with a simple fix)
|
|
183
|
+
{
|
|
184
|
+
from: "skip",
|
|
185
|
+
to: "implement",
|
|
186
|
+
when: "is_duplicate is true AND the duplicate issue is still open/unfixed AND fix_complexity is simple or moderate AND fix_approach is provided AND dryRun is not true",
|
|
187
|
+
},
|
|
188
|
+
// skip → notify (duplicate was +1'd, no implementation needed or too complex)
|
|
189
|
+
{
|
|
190
|
+
from: "skip",
|
|
191
|
+
to: "notify",
|
|
192
|
+
when: "is_duplicate is true AND (fix_complexity is complex OR no fix_approach OR the issue already has a PR in progress OR dryRun is true), OR severity is low",
|
|
193
|
+
},
|
|
194
|
+
// implement → create_pr (always after successful implementation)
|
|
195
|
+
{ from: "implement", to: "create_pr" },
|
|
196
|
+
// create_pr → notify (always)
|
|
197
|
+
{ from: "create_pr", to: "notify" },
|
|
121
198
|
],
|
|
122
199
|
};
|