@sweny-ai/core 0.1.5 → 0.1.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/claude.d.ts CHANGED
@@ -31,6 +31,12 @@ export declare class ClaudeClient implements Claude {
31
31
  private defaultContext;
32
32
  private mcpServers;
33
33
  constructor(opts?: ClaudeClientOptions);
34
+ /**
35
+ * Build env for the Claude Code subprocess.
36
+ * OAuth token takes priority over API key to prevent .env files from
37
+ * overriding the user's subscription-based auth.
38
+ */
39
+ private buildEnv;
34
40
  run(opts: {
35
41
  instruction: string;
36
42
  context: Record<string, unknown>;
package/dist/claude.js CHANGED
@@ -27,6 +27,18 @@ export class ClaudeClient {
27
27
  this.defaultContext = opts.defaultContext ?? { config: {}, logger: this.logger };
28
28
  this.mcpServers = opts.mcpServers ?? {};
29
29
  }
30
+ /**
31
+ * Build env for the Claude Code subprocess.
32
+ * OAuth token takes priority over API key to prevent .env files from
33
+ * overriding the user's subscription-based auth.
34
+ */
35
+ buildEnv() {
36
+ const env = Object.fromEntries(Object.entries(process.env).filter((e) => e[1] != null));
37
+ if (env.CLAUDE_CODE_OAUTH_TOKEN) {
38
+ delete env.ANTHROPIC_API_KEY;
39
+ }
40
+ return env;
41
+ }
30
42
  async run(opts) {
31
43
  const { instruction, context, tools, outputSchema, onProgress } = opts;
32
44
  const toolCalls = [];
@@ -47,8 +59,7 @@ export class ClaudeClient {
47
59
  ]
48
60
  .filter(Boolean)
49
61
  .join("\n\n");
50
- // Spread process.env so Claude Code inherits PATH, HOME, auth tokens, etc.
51
- const env = Object.fromEntries(Object.entries(process.env).filter((e) => e[1] != null));
62
+ const env = this.buildEnv();
52
63
  let response = "";
53
64
  try {
54
65
  const allMcpServers = { ...this.mcpServers };
@@ -62,6 +73,8 @@ export class ClaudeClient {
62
73
  cwd: this.cwd,
63
74
  env,
64
75
  permissionMode: "bypassPermissions",
76
+ allowDangerouslySkipPermissions: true,
77
+ stderr: (data) => this.logger.debug(`[claude-code] ${data}`),
65
78
  ...(this.model ? { model: this.model } : {}),
66
79
  ...(Object.keys(allMcpServers).length > 0 ? { mcpServers: allMcpServers } : {}),
67
80
  },
@@ -82,6 +95,20 @@ export class ClaudeClient {
82
95
  onProgress?.(clean.length > 80 ? clean.slice(0, 79) + "\u2026" : clean);
83
96
  }
84
97
  }
98
+ else if (message.type === "assistant") {
99
+ // Extract tool_use blocks from assistant messages (MCP tool calls)
100
+ const am = message;
101
+ if (am.message?.content && Array.isArray(am.message.content)) {
102
+ for (const block of am.message.content) {
103
+ if (block.type === "tool_use") {
104
+ toolCalls.push({
105
+ tool: stripMcpPrefix(block.name ?? ""),
106
+ input: block.input,
107
+ });
108
+ }
109
+ }
110
+ }
111
+ }
85
112
  else if (message.type === "result") {
86
113
  const resultMsg = message;
87
114
  if (resultMsg.subtype === "success" && "result" in resultMsg) {
@@ -121,7 +148,7 @@ export class ClaudeClient {
121
148
  `\nChoices:\n${choiceList}`,
122
149
  `\nRespond with ONLY the choice ID, nothing else.`,
123
150
  ].join("\n");
124
- const env = Object.fromEntries(Object.entries(process.env).filter((e) => e[1] != null));
151
+ const env = this.buildEnv();
125
152
  let response = "";
126
153
  try {
127
154
  const stream = query({
@@ -131,6 +158,8 @@ export class ClaudeClient {
131
158
  cwd: this.cwd,
132
159
  env,
133
160
  permissionMode: "bypassPermissions",
161
+ allowDangerouslySkipPermissions: true,
162
+ stderr: (data) => this.logger.debug(`[claude-code] ${data}`),
134
163
  ...(this.model ? { model: this.model } : {}),
135
164
  },
136
165
  });
@@ -3,10 +3,13 @@
3
3
  * Sets `process.env[KEY]` only if not already defined (real env vars win).
4
4
  */
5
5
  export declare function loadDotenv(cwd?: string): void;
6
+ /** Parsed config file — flat strings for scalar fields, arrays for list fields. */
7
+ export type FileConfig = Record<string, string | string[]>;
6
8
  /**
7
- * Search upward from `cwd` for `.sweny.yml` and parse it into flat key-value pairs.
9
+ * Search upward from `cwd` for `.sweny.yml` and parse it.
10
+ * Scalar values are strings, list values (rules, context) are string arrays.
8
11
  * Returns empty object if no config file is found.
9
12
  */
10
- export declare function loadConfigFile(cwd?: string): Record<string, string>;
13
+ export declare function loadConfigFile(cwd?: string): FileConfig;
11
14
  /** Starter config written by `sweny init`. */
12
15
  export declare const STARTER_CONFIG = "# .sweny.yml \u2014 SWEny project configuration\n# Commit this file. Secrets (API keys, tokens) go in .env (gitignored).\n#\n# Every key matches a CLI flag: \"time-range: 4h\" is the same as \"--time-range 4h\".\n# CLI flags override this file; env vars override this file; this file overrides defaults.\n\n# \u2500\u2500 Providers \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# observability-provider: datadog # datadog | sentry | cloudwatch | splunk | elastic | newrelic | loki | prometheus | pagerduty | heroku | opsgenie | vercel | supabase | netlify | fly | render | file\n# issue-tracker-provider: github-issues # github-issues | linear | jira\n# source-control-provider: github # github | gitlab\n# coding-agent-provider: claude # claude | codex | gemini\n# notification-provider: console # console | slack | teams | discord | email | webhook\n\n# \u2500\u2500 Investigation \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# time-range: 24h\n# severity-focus: errors\n# service-filter: \"*\"\n# investigation-depth: standard # quick | standard | thorough\n\n# \u2500\u2500 PR / branch \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# base-branch: main\n# pr-labels: agent,triage,needs-review\n\n# \u2500\u2500 Paths \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# service-map-path: .github/service-map.yml\n# log-file: ./logs/errors.json # required when observability-provider is \"file\"\n\n# \u2500\u2500 Cache \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# cache-dir: .sweny/cache\n# cache-ttl: 86400\n\n# \u2500\u2500 MCP servers \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# Extend the coding agent with additional tools via MCP.\n# Value is a JSON object \u2014 each key is a server name you choose.\n# See docs/mcp-servers.md for a full catalog with copy-paste configs.\n#\n# Example: GitHub MCP server (query PRs, issues, CI run logs)\n# mcp-servers-json: '{\"github\":{\"type\":\"stdio\",\"command\":\"npx\",\"args\":[\"-y\",\"@modelcontextprotocol/server-github@latest\"],\"env\":{\"GITHUB_PERSONAL_ACCESS_TOKEN\":\"ghp_...\"}}}'\n\n# \u2500\u2500 Local-only quick start \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# Uncomment to run without any external services (just an LLM API key):\n# observability-provider: file\n# log-file: ./sample-errors.json\n# issue-tracker-provider: file\n# source-control-provider: file\n# notification-provider: file\n# output-dir: .sweny/output\n\n# \u2500\u2500 Credentials (.env) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n# Copy the relevant block into your .env file and fill in the values.\n#\n# Claude (coding agent) \u2014 https://console.anthropic.com/settings/api-keys\n# ANTHROPIC_API_KEY=sk-ant-...\n#\n# GitHub (source control + issue tracker)\n# GITHUB_TOKEN=ghp_... # https://github.com/settings/tokens (repo + issues scopes)\n#\n# Datadog (observability) \u2014 https://app.datadoghq.com/organization-settings\n# DD_API_KEY=... # Organization Settings > API Keys\n# DD_APP_KEY=... # Organization Settings > Application Keys\n# DD_SITE=datadoghq.com # or datadoghq.eu, us3.datadoghq.com, etc.\n#\n# Sentry (observability) \u2014 https://sentry.io/settings/auth-tokens/\n# SENTRY_AUTH_TOKEN=sntrys_...\n# SENTRY_ORG=your-org-slug # from sentry.io/organizations/<slug>/\n# SENTRY_PROJECT=your-project # Project Settings > General > Project Slug\n#\n# Linear (issue tracker) \u2014 https://linear.app/settings/api\n# LINEAR_API_KEY=lin_api_...\n# LINEAR_TEAM_ID=... # Settings > Workspace > Teams > [team] > copy ID from URL\n# LINEAR_BUG_LABEL_ID=... # Settings > Labels > [label] > copy ID from URL\n#\n# Jira (issue tracker) \u2014 https://your-org.atlassian.net\n# JIRA_BASE_URL=https://your-org.atlassian.net\n# JIRA_EMAIL=you@company.com # your Atlassian account email\n# JIRA_API_TOKEN=... # https://id.atlassian.com/manage-profile/security/api-tokens\n#\n# Vercel (observability) \u2014 https://vercel.com/account/tokens\n# VERCEL_TOKEN=...\n# VERCEL_PROJECT_ID=prj_... # Project Settings > General > Project ID\n# VERCEL_TEAM_ID=team_... # optional, for team-owned projects\n#\n# Supabase (observability) \u2014 https://supabase.com/dashboard/account/tokens\n# SUPABASE_MANAGEMENT_KEY=...\n# SUPABASE_PROJECT_REF=... # Project Settings > General > Reference ID\n#\n# Netlify (observability) \u2014 https://app.netlify.com/user/applications#personal-access-tokens\n# NETLIFY_TOKEN=...\n# NETLIFY_SITE_ID=... # Site Settings > General > Site ID\n#\n# Fly.io (observability) \u2014 https://fly.io/user/personal_access_tokens\n# FLY_TOKEN=...\n# FLY_APP_NAME=... # the name of your Fly.io application\n#\n# Render (observability) \u2014 https://dashboard.render.com/u/settings\n# RENDER_API_KEY=...\n# RENDER_SERVICE_ID=srv-... # from your service's Settings page\n#\n# Prometheus (observability) \u2014 self-hosted or Grafana Cloud\n# PROMETHEUS_URL=http://prometheus.internal:9090\n# PROMETHEUS_TOKEN=... # optional, for secured instances\n#\n# PagerDuty (observability) \u2014 https://your-account.pagerduty.com/api_keys\n# PAGERDUTY_API_KEY=...\n#\n# Honeycomb (observability) \u2014 https://docs.honeycomb.io/api/\n# HONEYCOMB_API_KEY=...\n# HONEYCOMB_DATASET=... # dataset name (e.g. production)\n#\n# Heroku (observability) \u2014 https://devcenter.heroku.com/articles/platform-api-reference\n# HEROKU_API_KEY=... # https://dashboard.heroku.com/account\n# HEROKU_APP_NAME=... # the name of your Heroku application\n#\n# OpsGenie (observability) \u2014 https://support.atlassian.com/opsgenie/docs/api-key-management/\n# OPSGENIE_API_KEY=...\n# OPSGENIE_REGION=us # or eu for EU-hosted accounts\n#\n# Slack (notifications) \u2014 https://api.slack.com/apps\n# NOTIFICATION_WEBHOOK_URL=https://hooks.slack.com/services/...\n# # or use a bot token: SLACK_BOT_TOKEN=xoxb-...\n";
@@ -1,5 +1,6 @@
1
1
  import * as fs from "node:fs";
2
2
  import * as path from "node:path";
3
+ import { parse as parseYaml } from "yaml";
3
4
  /**
4
5
  * Auto-load a `.env` file from the given directory.
5
6
  * Sets `process.env[KEY]` only if not already defined (real env vars win).
@@ -32,7 +33,8 @@ export function loadDotenv(cwd = process.cwd()) {
32
33
  }
33
34
  }
34
35
  /**
35
- * Search upward from `cwd` for `.sweny.yml` and parse it into flat key-value pairs.
36
+ * Search upward from `cwd` for `.sweny.yml` and parse it.
37
+ * Scalar values are strings, list values (rules, context) are string arrays.
36
38
  * Returns empty object if no config file is found.
37
39
  */
38
40
  export function loadConfigFile(cwd = process.cwd()) {
@@ -46,22 +48,22 @@ export function loadConfigFile(cwd = process.cwd()) {
46
48
  catch {
47
49
  return {};
48
50
  }
51
+ let raw;
52
+ try {
53
+ raw = parseYaml(content);
54
+ }
55
+ catch {
56
+ return {};
57
+ }
58
+ if (!raw || typeof raw !== "object")
59
+ return {};
49
60
  const config = {};
50
- for (const line of content.split("\n")) {
51
- const trimmed = line.trim();
52
- if (!trimmed || trimmed.startsWith("#"))
53
- continue;
54
- const colonIndex = trimmed.indexOf(":");
55
- if (colonIndex === -1)
56
- continue;
57
- const key = trimmed.slice(0, colonIndex).trim();
58
- let value = trimmed.slice(colonIndex + 1).trim();
59
- // Strip surrounding quotes
60
- if ((value.startsWith('"') && value.endsWith('"')) || (value.startsWith("'") && value.endsWith("'"))) {
61
- value = value.slice(1, -1);
61
+ for (const [key, value] of Object.entries(raw)) {
62
+ if (Array.isArray(value)) {
63
+ config[key] = value.map(String);
62
64
  }
63
- if (key && value !== "") {
64
- config[key] = value;
65
+ else if (value != null && value !== "") {
66
+ config[key] = String(value);
65
67
  }
66
68
  }
67
69
  return config;
@@ -1,5 +1,6 @@
1
1
  import type { Command } from "commander";
2
2
  import type { McpServerConfig } from "../types.js";
3
+ import type { FileConfig } from "./config-file.js";
3
4
  export interface CliConfig {
4
5
  codingAgentProvider: string;
5
6
  anthropicApiKey: string;
@@ -57,9 +58,11 @@ export interface CliConfig {
57
58
  outputDir: string;
58
59
  mcpServers: Record<string, McpServerConfig>;
59
60
  workspaceTools: string[];
61
+ rules: string[];
62
+ context: string[];
60
63
  }
61
64
  export declare function registerTriageCommand(program: Command): Command;
62
- export declare function parseCliInputs(options: Record<string, unknown>, fileConfig?: Record<string, string>): CliConfig;
65
+ export declare function parseCliInputs(options: Record<string, unknown>, fileConfig?: FileConfig): CliConfig;
63
66
  /**
64
67
  * All recognized workspace tool names. Update here when adding a new Category B MCP server.
65
68
  *
@@ -65,8 +65,26 @@ export function registerTriageCommand(program) {
65
65
  }
66
66
  export function parseCliInputs(options, fileConfig = {}) {
67
67
  const env = process.env;
68
- // Config file lookup helper: CLI flag > env var > file > default
69
- const f = (key) => fileConfig[key] || undefined;
68
+ // Config file lookup helper: CLI flag > env var > file > default (scalar only)
69
+ const f = (key) => {
70
+ const v = fileConfig[key];
71
+ return typeof v === "string" && v ? v : undefined;
72
+ };
73
+ // Array helper: CLI flag (comma-separated) > file > default
74
+ const fa = (key, cliKey) => {
75
+ const cliVal = options[cliKey ?? key];
76
+ if (cliVal)
77
+ return cliVal
78
+ .split(",")
79
+ .map((s) => s.trim())
80
+ .filter(Boolean);
81
+ const fileVal = fileConfig[key];
82
+ if (Array.isArray(fileVal))
83
+ return fileVal;
84
+ if (typeof fileVal === "string" && fileVal)
85
+ return [fileVal];
86
+ return [];
87
+ };
70
88
  const obsProvider = options.observabilityProvider || f("observability-provider") || "datadog";
71
89
  return {
72
90
  codingAgentProvider: options.agent || options.codingAgentProvider || f("coding-agent-provider") || "claude",
@@ -138,6 +156,8 @@ export function parseCliInputs(options, fileConfig = {}) {
138
156
  .split(",")
139
157
  .map((s) => s.trim())
140
158
  .filter(Boolean),
159
+ rules: fa("rules"),
160
+ context: fa("context"),
141
161
  };
142
162
  }
143
163
  /**
@@ -395,7 +415,10 @@ export function validateWarnings(config) {
395
415
  }
396
416
  function parseObservabilityCredentials(provider, options, fileConfig = {}) {
397
417
  const env = process.env;
398
- const f = (key) => fileConfig[key] || undefined;
418
+ const f = (key) => {
419
+ const v = fileConfig[key];
420
+ return typeof v === "string" && v ? v : undefined;
421
+ };
399
422
  switch (provider) {
400
423
  case "datadog":
401
424
  return {
package/dist/cli/main.js CHANGED
@@ -12,6 +12,8 @@ import { implementWorkflow } from "../workflows/implement.js";
12
12
  import { consoleLogger } from "../types.js";
13
13
  import { ClaudeClient } from "../claude.js";
14
14
  import { createSkillMap, configuredSkills } from "../skills/index.js";
15
+ import { buildAutoMcpServers, buildProviderContext } from "../mcp.js";
16
+ import { loadAdditionalContext } from "../templates.js";
15
17
  import { validateWorkflow as validateWorkflowSchema } from "../schema.js";
16
18
  import { parse as parseYaml, stringify as stringifyYaml } from "yaml";
17
19
  import { buildWorkflow, refineWorkflow } from "../workflow-builder.js";
@@ -106,6 +108,8 @@ function buildCredentialMap() {
106
108
  "NR_API_KEY",
107
109
  "NR_REGION",
108
110
  "BETTERSTACK_API_TOKEN",
111
+ "BETTERSTACK_SOURCE_ID",
112
+ "BETTERSTACK_TABLE_NAME",
109
113
  "SLACK_BOT_TOKEN",
110
114
  "SLACK_TEAM_ID",
111
115
  "NOTION_TOKEN",
@@ -132,6 +136,41 @@ function buildMcpAutoConfig(config) {
132
136
  userMcpServers: Object.keys(config.mcpServers).length > 0 ? config.mcpServers : undefined,
133
137
  };
134
138
  }
139
+ /**
140
+ * Build provider context string (available tools/providers).
141
+ */
142
+ function buildProviderCtx(config, mcpServers) {
143
+ const extras = {};
144
+ if (config.observabilityCredentials.sourceId) {
145
+ extras["BetterStack source ID"] = config.observabilityCredentials.sourceId;
146
+ }
147
+ if (config.observabilityCredentials.tableName) {
148
+ extras["BetterStack table name"] = config.observabilityCredentials.tableName;
149
+ }
150
+ return buildProviderContext({
151
+ observabilityProvider: config.observabilityProvider,
152
+ issueTrackerProvider: config.issueTrackerProvider,
153
+ sourceControlProvider: config.sourceControlProvider,
154
+ mcpServers: Object.keys(mcpServers),
155
+ extras: Object.keys(extras).length > 0 ? extras : undefined,
156
+ });
157
+ }
158
+ /**
159
+ * Resolve rules and context from config into structured workflow input fields.
160
+ * Local files + inline text are resolved now; URLs are passed to the prepare node.
161
+ */
162
+ async function resolveRulesAndContext(config) {
163
+ const [rulesResult, contextResult] = await Promise.all([
164
+ loadAdditionalContext(config.rules),
165
+ loadAdditionalContext(config.context),
166
+ ]);
167
+ return {
168
+ rules: rulesResult.resolved,
169
+ context: contextResult.resolved,
170
+ rulesUrls: rulesResult.urls,
171
+ contextUrls: contextResult.urls,
172
+ };
173
+ }
135
174
  // ── sweny triage ──────────────────────────────────────────────────────
136
175
  const triageCmd = registerTriageCommand(program);
137
176
  triageCmd.action(async (options) => {
@@ -152,12 +191,15 @@ triageCmd.action(async (options) => {
152
191
  if (!config.json) {
153
192
  console.log(formatBanner(config, version));
154
193
  }
155
- // ── Build skill map + Claude client ──────────────────────
194
+ // ── Build skill map + MCP servers + Claude client ──────────
156
195
  const skills = createSkillMap(configuredSkills());
196
+ const mcpAutoConfig = buildMcpAutoConfig(config);
197
+ const mcpServers = buildAutoMcpServers(mcpAutoConfig);
157
198
  const claude = new ClaudeClient({
158
199
  maxTurns: config.maxInvestigateTurns || 50,
159
200
  cwd: process.cwd(),
160
201
  logger: consoleLogger,
202
+ mcpServers,
161
203
  });
162
204
  // ── Progress display state ─────────────────────────────────
163
205
  const FRAMES = ["\u280B", "\u2819", "\u2839", "\u2838", "\u283C", "\u2834", "\u2826", "\u2827", "\u2807", "\u280F"];
@@ -278,9 +320,13 @@ triageCmd.action(async (options) => {
278
320
  };
279
321
  const observer = composeObservers(progressObserver, config.stream ? createStreamObserver() : undefined);
280
322
  // ── Build workflow input from config ──────────────────────
281
- // TODO: The triage workflow input structure may need further refinement
282
- // once the workflow nodes have stabilized. For now, pass config fields
283
- // that the workflow instructions can reference via the `input` context.
323
+ const providerCtx = buildProviderCtx(config, mcpServers);
324
+ const { rules, context, rulesUrls, contextUrls } = await resolveRulesAndContext(config);
325
+ // Combine provider context + additional instructions into the context field
326
+ const contextParts = [providerCtx];
327
+ if (config.additionalInstructions)
328
+ contextParts.push(config.additionalInstructions);
329
+ const fullContext = [contextParts.join("\n\n"), context].filter(Boolean).join("\n\n---\n\n");
284
330
  const workflowInput = {
285
331
  timeRange: config.timeRange,
286
332
  severityFocus: config.severityFocus,
@@ -295,6 +341,19 @@ triageCmd.action(async (options) => {
295
341
  issueOverride: config.issueOverride,
296
342
  noveltyMode: config.noveltyMode,
297
343
  reviewMode: config.reviewMode,
344
+ observabilityProvider: config.observabilityProvider,
345
+ ...(config.observabilityCredentials.sourceId && {
346
+ betterstackSourceId: config.observabilityCredentials.sourceId,
347
+ }),
348
+ ...(config.observabilityCredentials.tableName && {
349
+ betterstackTableName: config.observabilityCredentials.tableName,
350
+ }),
351
+ // Structured rules/context for executor
352
+ rules,
353
+ context: fullContext,
354
+ // URLs for the prepare node to fetch at runtime
355
+ rulesUrls,
356
+ contextUrls,
298
357
  };
299
358
  try {
300
359
  const results = await execute(triageWorkflow, workflowInput, {
@@ -346,13 +405,19 @@ implementCmd.action(async (issueId, options) => {
346
405
  maxImplementTurns: parseInt(String(options.maxImplementTurns || fileConfig["max-implement-turns"] || "40"), 10),
347
406
  baseBranch: options.baseBranch || fileConfig["base-branch"] || "main",
348
407
  repository: options.repository || process.env.GITHUB_REPOSITORY || "",
349
- outputDir: options.outputDir || process.env.SWENY_OUTPUT_DIR || fileConfig["output-dir"] || ".sweny/output",
408
+ outputDir: options.outputDir ||
409
+ process.env.SWENY_OUTPUT_DIR ||
410
+ fileConfig["output-dir"] ||
411
+ ".sweny/output",
350
412
  };
351
413
  const skills = createSkillMap(configuredSkills());
414
+ const mcpAutoConfig = buildMcpAutoConfig(config);
415
+ const mcpServers = buildAutoMcpServers(mcpAutoConfig);
352
416
  const claude = new ClaudeClient({
353
417
  maxTurns: config.maxImplementTurns || 40,
354
418
  cwd: process.cwd(),
355
419
  logger: consoleLogger,
420
+ mcpServers,
356
421
  });
357
422
  console.log(chalk.cyan(`\n sweny implement ${issueId}\n`));
358
423
  const isTTY = process.stderr.isTTY ?? false;
@@ -468,10 +533,13 @@ export async function workflowRunAction(file, options) {
468
533
  const isJson = Boolean(options.json);
469
534
  const isTTY = !isJson && (process.stderr.isTTY ?? false);
470
535
  const skills = createSkillMap(configuredSkills());
536
+ const mcpAutoConfig = buildMcpAutoConfig(config);
537
+ const mcpServers = buildAutoMcpServers(mcpAutoConfig);
471
538
  const claude = new ClaudeClient({
472
539
  maxTurns: config.maxInvestigateTurns || 50,
473
540
  cwd: process.cwd(),
474
541
  logger: consoleLogger,
542
+ mcpServers,
475
543
  });
476
544
  // Track per-node entry time to compute elapsed on exit
477
545
  const nodeEnterTimes = new Map();
@@ -519,6 +587,14 @@ export async function workflowRunAction(file, options) {
519
587
  baseBranch: config.baseBranch,
520
588
  prLabels: config.prLabels,
521
589
  additionalInstructions: config.additionalInstructions,
590
+ observabilityProvider: config.observabilityProvider,
591
+ ...(config.observabilityCredentials.sourceId && {
592
+ betterstackSourceId: config.observabilityCredentials.sourceId,
593
+ }),
594
+ ...(config.observabilityCredentials.tableName && {
595
+ betterstackTableName: config.observabilityCredentials.tableName,
596
+ }),
597
+ context: buildProviderCtx(config, mcpServers),
522
598
  };
523
599
  try {
524
600
  const results = await execute(workflow, workflowInput, {
package/dist/executor.js CHANGED
@@ -46,11 +46,8 @@ export async function execute(workflow, input, options) {
46
46
  return output;
47
47
  },
48
48
  }));
49
- // Prepend additional context to instruction if provided
50
- const additionalContext = typeof input?.additionalContext === "string" ? input.additionalContext : "";
51
- const instruction = additionalContext
52
- ? `## Additional Context & Rules\n\n${additionalContext}\n\n---\n\n${node.instruction}`
53
- : node.instruction;
49
+ // Prepend rules and context to instruction if provided
50
+ const instruction = buildNodeInstruction(node.instruction, input);
54
51
  // Run Claude on this node
55
52
  const result = await claude.run({
56
53
  instruction,
@@ -74,6 +71,36 @@ export async function execute(workflow, input, options) {
74
71
  return results;
75
72
  }
76
73
  // ─── Internals ───────────────────────────────────────────────────
74
+ /**
75
+ * Build the full instruction for a node by prepending rules and context.
76
+ * Rules get "You MUST follow" framing; context gets "Background" framing.
77
+ * Falls back to legacy `additionalContext` if rules/context aren't set.
78
+ */
79
+ function buildNodeInstruction(baseInstruction, input) {
80
+ const inp = input;
81
+ if (!inp)
82
+ return baseInstruction;
83
+ const sections = [];
84
+ // New structured format
85
+ const rules = typeof inp.rules === "string" && inp.rules ? inp.rules : "";
86
+ const context = typeof inp.context === "string" && inp.context ? inp.context : "";
87
+ if (rules) {
88
+ sections.push(`## Rules — You MUST Follow These\n\n${rules}`);
89
+ }
90
+ if (context) {
91
+ sections.push(`## Background Context\n\n${context}`);
92
+ }
93
+ // Legacy fallback
94
+ if (sections.length === 0) {
95
+ const legacy = typeof inp.additionalContext === "string" ? inp.additionalContext : "";
96
+ if (legacy) {
97
+ sections.push(`## Additional Context & Rules\n\n${legacy}`);
98
+ }
99
+ }
100
+ if (sections.length === 0)
101
+ return baseInstruction;
102
+ return `${sections.join("\n\n")}\n\n---\n\n${baseInstruction}`;
103
+ }
77
104
  /** Call observer without letting exceptions crash the workflow */
78
105
  function safeObserve(observer, event, logger) {
79
106
  if (!observer)
package/dist/index.d.ts CHANGED
@@ -31,7 +31,8 @@ export { github, linear, slack, sentry, datadog, notification, builtinSkills, cr
31
31
  export type { SkillValidationResult } from "./skills/index.js";
32
32
  export { workflowZ, nodeZ, edgeZ, skillZ, parseWorkflow, validateWorkflow, workflowJsonSchema } from "./schema.js";
33
33
  export type { WorkflowError } from "./schema.js";
34
- export { buildAutoMcpServers } from "./mcp.js";
34
+ export { buildAutoMcpServers, buildProviderContext } from "./mcp.js";
35
+ export type { ProviderContextOptions } from "./mcp.js";
35
36
  export type { McpServerConfig, McpAutoConfig } from "./types.js";
36
37
  export { buildWorkflow, refineWorkflow } from "./workflow-builder.js";
37
38
  export type { BuildWorkflowOptions } from "./workflow-builder.js";
package/dist/index.js CHANGED
@@ -31,7 +31,7 @@ export { github, linear, slack, sentry, datadog, notification, builtinSkills, cr
31
31
  // Schema & validation
32
32
  export { workflowZ, nodeZ, edgeZ, skillZ, parseWorkflow, validateWorkflow, workflowJsonSchema } from "./schema.js";
33
33
  // MCP auto-injection
34
- export { buildAutoMcpServers } from "./mcp.js";
34
+ export { buildAutoMcpServers, buildProviderContext } from "./mcp.js";
35
35
  // Workflow builder
36
36
  export { buildWorkflow, refineWorkflow } from "./workflow-builder.js";
37
37
  // Templates
package/dist/mcp.d.ts CHANGED
@@ -9,3 +9,18 @@ import type { McpAutoConfig, McpServerConfig } from "./types.js";
9
9
  * User-supplied servers (userMcpServers) always win on key conflicts.
10
10
  */
11
11
  export declare function buildAutoMcpServers(config: McpAutoConfig): Record<string, McpServerConfig>;
12
+ export interface ProviderContextOptions {
13
+ observabilityProvider?: string;
14
+ issueTrackerProvider?: string;
15
+ sourceControlProvider?: string;
16
+ /** Which MCP servers were actually injected (keys from buildAutoMcpServers) */
17
+ mcpServers: string[];
18
+ /** Extra details to include (e.g. betterstack source ID) */
19
+ extras?: Record<string, string>;
20
+ }
21
+ /**
22
+ * Build a human-readable summary of configured providers and MCP tools.
23
+ * Prepended to every node instruction via additionalContext so the agent
24
+ * knows exactly what tools are available and how to use them.
25
+ */
26
+ export declare function buildProviderContext(opts: ProviderContextOptions): string;
package/dist/mcp.js CHANGED
@@ -183,3 +183,44 @@ export function buildAutoMcpServers(config) {
183
183
  // User-supplied servers always win on key conflict.
184
184
  return { ...auto, ...(config.userMcpServers ?? {}) };
185
185
  }
186
+ /**
187
+ * Build a human-readable summary of configured providers and MCP tools.
188
+ * Prepended to every node instruction via additionalContext so the agent
189
+ * knows exactly what tools are available and how to use them.
190
+ */
191
+ export function buildProviderContext(opts) {
192
+ const lines = ["## Available Providers & Tools", ""];
193
+ // Observability
194
+ if (opts.observabilityProvider) {
195
+ const mcpNote = opts.mcpServers.includes(opts.observabilityProvider)
196
+ ? ` (available via MCP — use its tools to query logs, errors, and metrics)`
197
+ : "";
198
+ lines.push(`- **Observability**: ${opts.observabilityProvider}${mcpNote}`);
199
+ }
200
+ // BetterStack as secondary (token present but not primary provider)
201
+ if (opts.observabilityProvider !== "betterstack" && opts.mcpServers.includes("betterstack")) {
202
+ lines.push(`- **Logs**: betterstack (available via MCP — use its tools to query logs)`);
203
+ }
204
+ // Issue tracker
205
+ if (opts.issueTrackerProvider) {
206
+ const mcpNote = opts.mcpServers.includes(opts.issueTrackerProvider === "github-issues" ? "github" : opts.issueTrackerProvider)
207
+ ? ` (available via MCP)`
208
+ : "";
209
+ lines.push(`- **Issue tracker**: ${opts.issueTrackerProvider}${mcpNote}`);
210
+ }
211
+ // Source control
212
+ if (opts.sourceControlProvider) {
213
+ const mcpNote = opts.mcpServers.includes(opts.sourceControlProvider) ? ` (available via MCP)` : "";
214
+ lines.push(`- **Source control**: ${opts.sourceControlProvider}${mcpNote}`);
215
+ }
216
+ // Extras (source IDs, table names, etc.)
217
+ if (opts.extras && Object.keys(opts.extras).length > 0) {
218
+ lines.push("");
219
+ for (const [key, value] of Object.entries(opts.extras)) {
220
+ lines.push(`- **${key}**: ${value}`);
221
+ }
222
+ }
223
+ lines.push("");
224
+ lines.push("Use all available MCP tools to gather data. " + "MCP tools are already connected — just call them directly.");
225
+ return lines.join("\n");
226
+ }
@@ -25,7 +25,11 @@ export declare function resolveTemplates(config: {
25
25
  prTemplate?: string;
26
26
  }, cwd?: string): Promise<Templates>;
27
27
  /**
28
- * Load additional context documents (local files or URLs).
28
+ * Load additional context documents (local files, URLs, or inline text).
29
29
  * Each source is loaded and wrapped with a header.
30
+ * Returns { resolved, urls } — resolved text for files/inline, urls for agent to fetch.
30
31
  */
31
- export declare function loadAdditionalContext(sources: string[], cwd?: string): Promise<string>;
32
+ export declare function loadAdditionalContext(sources: string[], cwd?: string): Promise<{
33
+ resolved: string;
34
+ urls: string[];
35
+ }>;
package/dist/templates.js CHANGED
@@ -89,22 +89,46 @@ export async function resolveTemplates(config, cwd) {
89
89
  return { issueTemplate, prTemplate };
90
90
  }
91
91
  /**
92
- * Load additional context documents (local files or URLs).
92
+ * Classify a source entry as URL, file path, or inline text.
93
+ */
94
+ function classifySource(source) {
95
+ if (source.startsWith("http://") || source.startsWith("https://"))
96
+ return "url";
97
+ if (source.startsWith("./") || source.startsWith("../") || source.startsWith("/"))
98
+ return "file";
99
+ return "inline";
100
+ }
101
+ /**
102
+ * Load additional context documents (local files, URLs, or inline text).
93
103
  * Each source is loaded and wrapped with a header.
104
+ * Returns { resolved, urls } — resolved text for files/inline, urls for agent to fetch.
94
105
  */
95
106
  export async function loadAdditionalContext(sources, cwd = process.cwd()) {
96
107
  if (sources.length === 0)
97
- return "";
108
+ return { resolved: "", urls: [] };
98
109
  const parts = [];
110
+ const urls = [];
99
111
  for (const source of sources) {
100
112
  const trimmed = source.trim();
101
113
  if (!trimmed)
102
114
  continue;
103
- const label = trimmed.startsWith("http") ? trimmed : path.basename(trimmed);
104
- const content = await loadTemplate(trimmed, "", cwd);
105
- if (content) {
106
- parts.push(`### ${label}\n\n${content}`);
115
+ const kind = classifySource(trimmed);
116
+ if (kind === "url") {
117
+ urls.push(trimmed);
118
+ }
119
+ else if (kind === "file") {
120
+ const content = await loadTemplate(trimmed, "", cwd);
121
+ if (content) {
122
+ parts.push(`### ${path.basename(trimmed)}\n\n${content}`);
123
+ }
124
+ }
125
+ else {
126
+ // Inline text — use as-is
127
+ parts.push(trimmed);
107
128
  }
108
129
  }
109
- return parts.length > 0 ? parts.join("\n\n---\n\n") : "";
130
+ return {
131
+ resolved: parts.length > 0 ? parts.join("\n\n---\n\n") : "",
132
+ urls,
133
+ };
110
134
  }
package/dist/types.d.ts CHANGED
@@ -68,7 +68,7 @@ export interface NodeResult {
68
68
  export interface ToolCall {
69
69
  tool: string;
70
70
  input: unknown;
71
- output: unknown;
71
+ output?: unknown;
72
72
  }
73
73
  export type ExecutionEvent = {
74
74
  type: "workflow:start";
@@ -1,8 +1,8 @@
1
1
  /**
2
2
  * Triage Workflow
3
3
  *
4
- * Investigate a production alert → gather context from available
5
- * providersdetermine root cause create issue → notify team.
4
+ * Investigate a production alert → gather context determine root cause →
5
+ * create/update issue implement fixopen PR → notify team.
6
6
  *
7
7
  * Provider-agnostic: nodes list all compatible skills per category.
8
8
  * The executor uses whichever skills are configured. Pre-execution
@@ -1,8 +1,8 @@
1
1
  /**
2
2
  * Triage Workflow
3
3
  *
4
- * Investigate a production alert → gather context from available
5
- * providersdetermine root cause create issue → notify team.
4
+ * Investigate a production alert → gather context determine root cause →
5
+ * create/update issue implement fixopen PR → notify team.
6
6
  *
7
7
  * Provider-agnostic: nodes list all compatible skills per category.
8
8
  * The executor uses whichever skills are configured. Pre-execution
@@ -17,9 +17,23 @@
17
17
  export const triageWorkflow = {
18
18
  id: "triage",
19
19
  name: "Alert Triage",
20
- description: "Investigate a production alert, determine root cause, create an issue, and notify the team",
21
- entry: "gather",
20
+ description: "Investigate a production alert, determine root cause, create an issue, implement a fix, and notify the team",
21
+ entry: "prepare",
22
22
  nodes: {
23
+ prepare: {
24
+ name: "Load Rules & Context",
25
+ instruction: `You are preparing for a triage workflow. Your job is to fetch and review any knowledge documents listed in the input.
26
+
27
+ 1. Check input for \`rules_urls\` and \`context_urls\` arrays.
28
+ 2. For each URL, fetch its content:
29
+ - Linear document URLs → use the Linear MCP tools (get_document or search_documentation)
30
+ - Other HTTP URLs → fetch directly
31
+ 3. Summarize the key rules and context that downstream workflow nodes should follow.
32
+ 4. Output the consolidated information so it's available to all subsequent steps.
33
+
34
+ If there are no URLs to fetch, just pass through — this step is a no-op.`,
35
+ skills: ["linear"],
36
+ },
23
37
  gather: {
24
38
  name: "Gather Context",
25
39
  instruction: `You are investigating a production alert. Gather all relevant context using the available tools:
@@ -40,6 +54,7 @@ Be thorough — the investigation step depends on complete context. Use every to
40
54
  3. Assess severity: critical (service down), high (major feature broken), medium (degraded), low (cosmetic/minor).
41
55
  4. Determine affected services and users.
42
56
  5. Recommend a fix approach.
57
+ 6. Assess fix complexity: "simple" (a few lines, clear change), "moderate" (multiple files but well-understood), or "complex" (architectural, risky, or unclear).
43
58
 
44
59
  **Novelty check (REQUIRED — you MUST do this before finishing):**
45
60
  Search the issue tracker for existing issues (BOTH open AND closed) that cover the same root cause, error pattern, or affected service. Use github_search_issues and/or linear_search_issues with multiple keyword variations.
@@ -61,6 +76,7 @@ Set is_duplicate=true if ANY match is found. Set is_duplicate=false ONLY if you
61
76
  duplicate_of: { type: "string", description: "Issue ID/URL if duplicate" },
62
77
  recommendation: { type: "string" },
63
78
  fix_approach: { type: "string" },
79
+ fix_complexity: { type: "string", enum: ["simple", "moderate", "complex"] },
64
80
  },
65
81
  required: ["root_cause", "severity", "is_duplicate", "recommendation"],
66
82
  },
@@ -81,25 +97,62 @@ If context.issueTemplate is provided, use it as the format for the issue body. O
81
97
  Create the issue in whichever tracker is available to you.`,
82
98
  skills: ["linear", "github"],
83
99
  },
100
+ skip: {
101
+ name: "Skip — Duplicate or Low Priority",
102
+ instruction: `This alert was determined to be a duplicate or low-priority.
103
+
104
+ If this is a **duplicate** of an existing issue (check context for duplicate_of):
105
+ 1. Find the existing issue using the issue tracker tools.
106
+ 2. Add a comment: "+1 — SWEny triage confirmed this issue is still active (seen again at {current UTC timestamp}). Latest context: {1-2 sentence summary of what was found this run}."
107
+ 3. If the issue is closed/done, reopen it or note in the comment that the bug has recurred.
108
+
109
+ If this is just **low priority**, log a brief note about why it was skipped.`,
110
+ skills: ["linear", "github"],
111
+ },
112
+ implement: {
113
+ name: "Implement Fix",
114
+ instruction: `Implement the fix identified during investigation:
115
+
116
+ 1. Create a feature branch from the base branch (check context for baseBranch, default "main").
117
+ 2. Read the relevant source files to understand the current code.
118
+ 3. Make the necessary code changes — fix the bug, nothing more.
119
+ 4. Run any existing tests if available to verify the fix doesn't break anything.
120
+ 5. Stage and commit with a clear commit message referencing the issue.
121
+
122
+ Keep changes minimal and focused. Do not refactor surrounding code or add unrelated improvements.
123
+
124
+ If the fix turns out to be more complex than expected, stop and explain why — do not force a bad fix.`,
125
+ skills: ["github"],
126
+ },
127
+ create_pr: {
128
+ name: "Open Pull Request",
129
+ instruction: `Open a pull request for the fix:
130
+
131
+ 1. Push the branch to the remote.
132
+ 2. Create a PR with a clear title referencing the issue (e.g. "[OFF-1020] fix: guard empty pdf_texts before access").
133
+ 3. In the PR body, include: summary of the bug, what the fix does, and a link to the issue.
134
+ 4. Add appropriate labels if available.
135
+
136
+ If context.prTemplate is provided, use it as the format for the PR body. Otherwise use a clear structure with: Summary, Changes, Testing, and Related Issues.
137
+
138
+ Return the PR URL and number.`,
139
+ skills: ["github"],
140
+ },
84
141
  notify: {
85
142
  name: "Notify Team",
86
143
  instruction: `Send a notification summarizing the triage result:
87
144
 
88
- 1. Include: alert summary, severity, root cause (1-2 sentences), and a link to the created issue.
145
+ 1. Include: alert summary, severity, root cause (1-2 sentences), and links to any created issues or PRs.
89
146
  2. For critical/high severity, make the notification urgent.
90
147
  3. For medium/low, a standard notification is fine.
91
148
 
92
149
  Use whichever notification channel is available to you.`,
93
150
  skills: ["slack", "notification"],
94
151
  },
95
- skip: {
96
- name: "Skip — Duplicate or Low Priority",
97
- instruction: `This alert was determined to be a duplicate or low-priority.
98
- Log a brief note about why it was skipped. No further action needed.`,
99
- skills: [],
100
- },
101
152
  },
102
153
  edges: [
154
+ // prepare → gather (always)
155
+ { from: "prepare", to: "gather" },
103
156
  // gather → investigate (always)
104
157
  { from: "gather", to: "investigate" },
105
158
  // investigate → create_issue (if novel and actionable)
@@ -114,7 +167,33 @@ Log a brief note about why it was skipped. No further action needed.`,
114
167
  to: "skip",
115
168
  when: "is_duplicate is true, OR severity is low",
116
169
  },
117
- // create_issue → notify (always)
118
- { from: "create_issue", to: "notify" },
170
+ // create_issue → implement (if fix is clear and not too complex)
171
+ {
172
+ from: "create_issue",
173
+ to: "implement",
174
+ when: "fix_complexity is simple or moderate AND fix_approach is provided AND dryRun is not true",
175
+ },
176
+ // create_issue → notify (if fix is too complex or risky, or dry run)
177
+ {
178
+ from: "create_issue",
179
+ to: "notify",
180
+ when: "fix_complexity is complex, OR no clear fix_approach, OR dryRun is true",
181
+ },
182
+ // skip → implement (duplicate exists but has a clear unfixed bug with a simple fix)
183
+ {
184
+ from: "skip",
185
+ to: "implement",
186
+ when: "is_duplicate is true AND the duplicate issue is still open/unfixed AND fix_complexity is simple or moderate AND fix_approach is provided AND dryRun is not true",
187
+ },
188
+ // skip → notify (duplicate was +1'd, no implementation needed or too complex)
189
+ {
190
+ from: "skip",
191
+ to: "notify",
192
+ when: "is_duplicate is true AND (fix_complexity is complex OR no fix_approach OR the issue already has a PR in progress OR dryRun is true), OR severity is low",
193
+ },
194
+ // implement → create_pr (always after successful implementation)
195
+ { from: "implement", to: "create_pr" },
196
+ // create_pr → notify (always)
197
+ { from: "create_pr", to: "notify" },
119
198
  ],
120
199
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sweny-ai/core",
3
- "version": "0.1.5",
3
+ "version": "0.1.7",
4
4
  "type": "module",
5
5
  "bin": {
6
6
  "sweny": "./dist/cli/main.js"