@sweny-ai/core 0.1.8 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -274,7 +274,7 @@ export function validateInputs(config) {
274
274
  break;
275
275
  case "betterstack":
276
276
  if (!config.observabilityCredentials.apiToken)
277
- errors.push("Missing: BETTERSTACK_API_TOKEN is required for betterstack provider");
277
+ errors.push("Missing: BETTERSTACK_API_TOKEN, BETTERSTACK_TELEMETRY_TOKEN, or BETTERSTACK_UPTIME_TOKEN is required for betterstack provider");
278
278
  if (!config.observabilityCredentials.sourceId && !config.observabilityCredentials.tableName)
279
279
  errors.push("Missing: either BETTERSTACK_SOURCE_ID (--betterstack-source-id) or BETTERSTACK_TABLE_NAME (--betterstack-table-name) is required for betterstack provider");
280
280
  break;
@@ -501,7 +501,7 @@ function parseObservabilityCredentials(provider, options, fileConfig = {}) {
501
501
  };
502
502
  case "betterstack":
503
503
  return {
504
- apiToken: env.BETTERSTACK_API_TOKEN || "",
504
+ apiToken: env.BETTERSTACK_API_TOKEN || env.BETTERSTACK_TELEMETRY_TOKEN || env.BETTERSTACK_UPTIME_TOKEN || "",
505
505
  sourceId: options.betterstackSourceId || env.BETTERSTACK_SOURCE_ID || f("betterstack-source-id") || "",
506
506
  tableName: options.betterstackTableName || env.BETTERSTACK_TABLE_NAME || f("betterstack-table-name") || "",
507
507
  };
package/dist/cli/main.js CHANGED
@@ -108,6 +108,8 @@ function buildCredentialMap() {
108
108
  "NR_API_KEY",
109
109
  "NR_REGION",
110
110
  "BETTERSTACK_API_TOKEN",
111
+ "BETTERSTACK_TELEMETRY_TOKEN",
112
+ "BETTERSTACK_UPTIME_TOKEN",
111
113
  "BETTERSTACK_SOURCE_ID",
112
114
  "BETTERSTACK_TABLE_NAME",
113
115
  "SLACK_BOT_TOKEN",
@@ -201,6 +201,15 @@ export function formatDagResultHuman(results, durationMs, config) {
201
201
  if (createPrResult?.data?.prUrl) {
202
202
  return formatDagSuccessResult(results, duration);
203
203
  }
204
+ // Dry run — show findings summary, no side effects taken
205
+ if (config?.dryRun) {
206
+ return formatDagDryRunResult(results, duration);
207
+ }
208
+ // Issues created but no PR (fix too complex)
209
+ const createIssueResult = results.get("create_issue");
210
+ if (createIssueResult && createIssueResult.status === "success") {
211
+ return formatDagIssuesCreatedResult(results, duration);
212
+ }
204
213
  // No action / skip
205
214
  return formatDagNoActionResult(results, duration, config);
206
215
  }
@@ -226,6 +235,56 @@ function formatDagSuccessResult(results, duration) {
226
235
  }
227
236
  return ["", boxTop(), ...boxSection(header), boxDivider(), ...boxSection(body), boxBottom(), ""].join("\n");
228
237
  }
238
+ function formatDagIssuesCreatedResult(results, duration) {
239
+ const title = `${c.ok("\u2713")} ${chalk.bold("Issues Created")}`;
240
+ const titlePad = BOX_WIDTH - 4 - visLen(title) - visLen(duration);
241
+ const header = [title + " ".repeat(Math.max(1, titlePad)) + c.subtle(duration)];
242
+ const body = [];
243
+ const issueData = results.get("create_issue")?.data;
244
+ if (issueData?.issueIdentifier) {
245
+ body.push(`${c.subtle("Issue")}${" ".repeat(5)}${chalk.bold(String(issueData.issueIdentifier))}`);
246
+ if (issueData.issueTitle)
247
+ body.push(`${" ".repeat(10)}${String(issueData.issueTitle)}`);
248
+ if (issueData.issueUrl)
249
+ body.push(`${" ".repeat(10)}${c.link(String(issueData.issueUrl))}`);
250
+ body.push("");
251
+ }
252
+ const investigateData = results.get("investigate")?.data;
253
+ const rec = investigateData?.recommendation;
254
+ if (rec) {
255
+ body.push(`${c.subtle("Next")}${" ".repeat(6)}${String(rec)}`);
256
+ }
257
+ return ["", boxTop(), ...boxSection(header), boxDivider(), ...boxSection(body), boxBottom(), ""].join("\n");
258
+ }
259
+ function formatDagDryRunResult(results, duration) {
260
+ const title = `${c.ok("\u2713")} ${chalk.bold("Triage Complete (Dry Run)")}`;
261
+ const titlePad = BOX_WIDTH - 4 - visLen(title) - visLen(duration);
262
+ const header = [title + " ".repeat(Math.max(1, titlePad)) + c.subtle(duration)];
263
+ const body = [];
264
+ const investigateData = results.get("investigate")?.data;
265
+ const findings = investigateData?.findings;
266
+ const novelCount = investigateData?.novel_count;
267
+ const severity = investigateData?.highest_severity;
268
+ if (findings && findings.length > 0) {
269
+ body.push(`${c.subtle("Findings")}${" ".repeat(2)}${chalk.bold(String(findings.length))} total, ${chalk.bold(String(novelCount ?? 0))} novel`);
270
+ if (severity)
271
+ body.push(`${c.subtle("Severity")}${" ".repeat(2)}${chalk.bold(severity)}`);
272
+ body.push("");
273
+ for (const f of findings.slice(0, 5)) {
274
+ const dup = f.is_duplicate ? c.subtle(" (dup)") : "";
275
+ body.push(` ${f.severity === "critical" || f.severity === "high" ? c.fail("\u25CF") : c.subtle("\u25CB")} ${String(f.title)}${dup}`);
276
+ }
277
+ if (findings.length > 5)
278
+ body.push(c.subtle(` ... and ${findings.length - 5} more`));
279
+ body.push("");
280
+ }
281
+ const rec = investigateData?.recommendation;
282
+ if (rec)
283
+ body.push(`${c.subtle("Next")}${" ".repeat(6)}${String(rec)}`);
284
+ body.push("");
285
+ body.push(c.subtle("No side effects — dry run mode"));
286
+ return ["", boxTop(), ...boxSection(header), boxDivider(), ...boxSection(body), boxBottom(), ""].join("\n");
287
+ }
229
288
  function formatDagFailureResult(nodeId, result, duration) {
230
289
  const title = `${c.fail("\u2717")} ${chalk.bold("Workflow Failed")}`;
231
290
  const titlePad = BOX_WIDTH - 4 - visLen(title) - visLen(duration);
@@ -244,11 +303,9 @@ function formatDagNoActionResult(results, duration, config) {
244
303
  const header = [title + " ".repeat(Math.max(1, titlePad)) + c.subtle(duration)];
245
304
  const body = [];
246
305
  const investigateData = results.get("investigate")?.data;
247
- if (investigateData?.is_duplicate) {
248
- body.push("Issue identified as a duplicate of an existing ticket.");
249
- if (investigateData.duplicate_of) {
250
- body.push(`${" ".repeat(2)}${c.link(String(investigateData.duplicate_of))}`);
251
- }
306
+ const novelCount = investigateData?.novel_count;
307
+ if (novelCount === 0) {
308
+ body.push("All findings were duplicates of existing issues.");
252
309
  }
253
310
  else {
254
311
  const rec = investigateData?.recommendation;
package/dist/executor.js CHANGED
@@ -61,8 +61,21 @@ export async function execute(workflow, input, options) {
61
61
  results.set(currentId, result);
62
62
  safeObserve(observer, { type: "node:exit", node: currentId, result }, logger);
63
63
  logger.info(` ✓ ${result.status}`, { node: currentId, toolCalls: result.toolCalls.length });
64
+ // Dry run hard gate — stop at the first conditional routing decision.
65
+ // Unconditional edges are analysis flow (prepare→gather→investigate);
66
+ // conditional edges are action decisions (investigate→create_issue/skip).
67
+ // Enforced in the executor so it cannot be bypassed by LLM evaluation.
68
+ const isDryRun = input && typeof input === "object" && input.dryRun === true;
69
+ if (isDryRun) {
70
+ const outEdges = workflow.edges.filter((e) => e.from === currentId);
71
+ if (outEdges.some((e) => e.when)) {
72
+ safeObserve(observer, { type: "route", from: currentId, to: "(end)", reason: "dry run" }, logger);
73
+ currentId = null;
74
+ continue;
75
+ }
76
+ }
64
77
  // Resolve next node via edge conditions
65
- currentId = await resolveNext(workflow, currentId, results, claude, observer);
78
+ currentId = await resolveNext(workflow, currentId, results, input, claude, observer);
66
79
  }
67
80
  safeObserve(observer, {
68
81
  type: "workflow:end",
@@ -148,7 +161,7 @@ function resolveConfig(skills, overrides) {
148
161
  * - 1 unconditional edge → follow it
149
162
  * - Multiple or conditional → Claude evaluates
150
163
  */
151
- async function resolveNext(workflow, current, results, claude, observer) {
164
+ async function resolveNext(workflow, current, results, input, claude, observer) {
152
165
  const outEdges = workflow.edges.filter((e) => e.from === current);
153
166
  if (outEdges.length === 0)
154
167
  return null;
@@ -160,8 +173,12 @@ async function resolveNext(workflow, current, results, claude, observer) {
160
173
  // Check for a default (unconditional) edge among conditionals
161
174
  const defaultEdge = outEdges.find((e) => !e.when);
162
175
  const conditionalEdges = outEdges.filter((e) => e.when);
163
- // Claude evaluates which condition matches
164
- const context = Object.fromEntries([...results.entries()].map(([k, v]) => [k, v.data]));
176
+ // Claude evaluates which condition matches — include input so conditions
177
+ // can reference workflow-level flags like dryRun
178
+ const context = {
179
+ input,
180
+ ...Object.fromEntries([...results.entries()].map(([k, v]) => [k, v.data])),
181
+ };
165
182
  const choices = conditionalEdges.map((e) => ({
166
183
  id: e.to,
167
184
  description: e.when,
package/dist/mcp.js CHANGED
@@ -106,9 +106,12 @@ export function buildAutoMcpServers(config) {
106
106
  };
107
107
  }
108
108
  // Better Stack MCP — HTTP remote MCP; Bearer token auth.
109
- // Injected whenever the token is present (not just when it's the primary provider)
109
+ // BetterStack uses separate tokens for Uptime vs Telemetry APIs.
110
+ // The MCP server accepts the telemetry token for log/metric queries.
111
+ // Accept: BETTERSTACK_API_TOKEN (legacy), BETTERSTACK_TELEMETRY_TOKEN, or BETTERSTACK_UPTIME_TOKEN.
112
+ // Injected whenever any token is present (not just when it's the primary provider)
110
113
  // because BetterStack logs complement any primary observability provider.
111
- const bsApiToken = creds.BETTERSTACK_API_TOKEN;
114
+ const bsApiToken = creds.BETTERSTACK_API_TOKEN || creds.BETTERSTACK_TELEMETRY_TOKEN || creds.BETTERSTACK_UPTIME_TOKEN;
112
115
  if (bsApiToken) {
113
116
  auto["betterstack"] = {
114
117
  type: "http",
@@ -47,66 +47,82 @@ Be thorough — the investigation step depends on complete context. Use every to
47
47
  },
48
48
  investigate: {
49
49
  name: "Root Cause Analysis",
50
- instruction: `Based on the gathered context, perform a root cause analysis:
51
-
52
- 1. Correlate the error with recent code changes, deploys, or config changes.
53
- 2. Identify the most likely root cause.
54
- 3. Assess severity: critical (service down), high (major feature broken), medium (degraded), low (cosmetic/minor).
55
- 4. Determine affected services and users.
56
- 5. Recommend a fix approach.
57
- 6. Assess fix complexity: "simple" (a few lines, clear change), "moderate" (multiple files but well-understood), or "complex" (architectural, risky, or unclear).
58
-
59
- **Novelty check (REQUIRED you MUST do this before finishing):**
60
- Search the issue tracker for existing issues (BOTH open AND closed) that cover the same root cause, error pattern, or affected service. Use github_search_issues and/or linear_search_issues with multiple keyword variations.
61
-
62
- A match means ANY of:
63
- - An issue about the same root cause (even if closed/fixed)
64
- - An issue about the same error message or pattern in the same service
65
- - An issue that a human would consider "the same bug"
66
-
67
- Set is_duplicate=true if ANY match is found. Set is_duplicate=false ONLY if you searched and found zero matches. You MUST always set this field.`,
50
+ instruction: `Based on the gathered context, classify every distinct issue you found into one of two buckets: **novel** or **duplicate**.
51
+
52
+ For EACH issue found:
53
+ 1. Identify the root cause and affected code/service.
54
+ 2. Assess severity: critical (service down), high (major feature broken), medium (degraded), low (cosmetic/minor).
55
+ 3. Assess fix complexity: "simple" (a few lines, clear change), "moderate" (multiple files but well-understood), or "complex" (architectural, risky, or unclear).
56
+ 4. **Novelty check (REQUIRED):** Search the issue tracker for existing issues (BOTH open AND closed) that cover the same root cause, error pattern, or affected service. Use github_search_issues and/or linear_search_issues with multiple keyword variations.
57
+ - A match = same root cause, same error message/pattern, or a human would call it "the same bug."
58
+ - If matched → it's a **duplicate**. Record the existing issue ID.
59
+ - If no match it's **novel**.
60
+
61
+ **Output rules:**
62
+ - \`findings\`: array of ALL issues found (both novel and duplicate).
63
+ - \`novel_count\`: how many findings are novel (not duplicates).
64
+ - \`highest_severity\`: the highest severity across ALL findings.
65
+ - \`recommendation\`: what should happen next.
66
+
67
+ Downstream nodes will act ONLY on novel findings. Duplicates will be +1'd automatically.`,
68
68
  skills: ["github", "linear"],
69
69
  output: {
70
70
  type: "object",
71
71
  properties: {
72
- root_cause: { type: "string" },
73
- severity: { type: "string", enum: ["critical", "high", "medium", "low"] },
74
- affected_services: { type: "array", items: { type: "string" } },
75
- is_duplicate: { type: "boolean" },
76
- duplicate_of: { type: "string", description: "Issue ID/URL if duplicate" },
72
+ findings: {
73
+ type: "array",
74
+ items: {
75
+ type: "object",
76
+ properties: {
77
+ title: { type: "string", description: "Short description of the issue" },
78
+ root_cause: { type: "string" },
79
+ severity: { type: "string", enum: ["critical", "high", "medium", "low"] },
80
+ affected_services: { type: "array", items: { type: "string" } },
81
+ is_duplicate: { type: "boolean" },
82
+ duplicate_of: { type: "string", description: "Existing issue ID/URL if duplicate" },
83
+ fix_approach: { type: "string" },
84
+ fix_complexity: { type: "string", enum: ["simple", "moderate", "complex"] },
85
+ },
86
+ required: ["title", "root_cause", "severity", "is_duplicate"],
87
+ },
88
+ },
89
+ novel_count: { type: "number", description: "Count of novel (non-duplicate) findings" },
90
+ highest_severity: { type: "string", enum: ["critical", "high", "medium", "low"] },
77
91
  recommendation: { type: "string" },
78
- fix_approach: { type: "string" },
79
- fix_complexity: { type: "string", enum: ["simple", "moderate", "complex"] },
80
92
  },
81
- required: ["root_cause", "severity", "is_duplicate", "recommendation"],
93
+ required: ["findings", "novel_count", "highest_severity", "recommendation"],
82
94
  },
83
95
  },
84
96
  create_issue: {
85
- name: "Create Issue",
86
- instruction: `Create an issue documenting the investigation findings:
97
+ name: "Create Issues & Triage Duplicates",
98
+ instruction: `Process ALL findings from the investigation. The findings array contains both novel and duplicate issues.
87
99
 
88
- 1. Use a clear, actionable title.
100
+ **For each NOVEL finding** (is_duplicate = false):
101
+ 1. Create a new issue with a clear, actionable title.
89
102
  2. Include: root cause, severity, affected services, reproduction steps, and recommended fix.
90
103
  3. Add appropriate labels (bug, severity level, affected service).
91
104
  4. Link to relevant commits, PRs, or existing issues.
92
105
 
93
- **Safety check**: If during creation you notice a very similar issue already exists, add a comment to it using github_add_comment or linear_add_comment instead of creating a duplicate.
106
+ **For each DUPLICATE finding** (is_duplicate = true):
107
+ 1. Find the existing issue using the issue tracker (check duplicate_of field).
108
+ 2. Add a comment: "+1 — SWEny triage confirmed this issue is still active (seen again at {current UTC timestamp}). Latest context: {1-2 sentence summary}."
109
+ 3. If the existing issue is closed/done, reopen it or note in the comment that the bug has recurred.
94
110
 
95
- If context.issueTemplate is provided, use it as the format for the issue body. Otherwise use a clear structure with: Summary, Root Cause, Impact, Steps to Reproduce, and Recommended Fix.
111
+ If context.issueTemplate is provided, use it as the format for new issue bodies. Otherwise use a clear structure with: Summary, Root Cause, Impact, Steps to Reproduce, and Recommended Fix.
96
112
 
97
- Create the issue in whichever tracker is available to you.`,
113
+ Use whichever issue tracker is available to you. Output the created/updated issue identifiers.`,
98
114
  skills: ["linear", "github"],
99
115
  },
100
116
  skip: {
101
- name: "Skip — Duplicate or Low Priority",
102
- instruction: `This alert was determined to be a duplicate or low-priority.
117
+ name: "Skip — All Duplicates or Low Priority",
118
+ instruction: `Every finding from the investigation was either a duplicate or low-priority. No new issues need to be created.
103
119
 
104
- If this is a **duplicate** of an existing issue (check context for duplicate_of):
105
- 1. Find the existing issue using the issue tracker tools.
120
+ For each **duplicate** finding (check the findings array for items where is_duplicate = true):
121
+ 1. Find the existing issue using the issue tracker (check duplicate_of field).
106
122
  2. Add a comment: "+1 — SWEny triage confirmed this issue is still active (seen again at {current UTC timestamp}). Latest context: {1-2 sentence summary of what was found this run}."
107
123
  3. If the issue is closed/done, reopen it or note in the comment that the bug has recurred.
108
124
 
109
- If this is just **low priority**, log a brief note about why it was skipped.`,
125
+ For **low priority** findings, log a brief note about why they were skipped.`,
110
126
  skills: ["linear", "github"],
111
127
  },
112
128
  implement: {
@@ -155,42 +171,32 @@ Use whichever notification channel is available to you.`,
155
171
  { from: "prepare", to: "gather" },
156
172
  // gather → investigate (always)
157
173
  { from: "gather", to: "investigate" },
158
- // investigate → create_issue (if novel and actionable)
174
+ // investigate → create_issue (novel findings worth acting on)
159
175
  {
160
176
  from: "investigate",
161
177
  to: "create_issue",
162
- when: "is_duplicate is false AND severity is medium or higher",
178
+ when: "novel_count is greater than 0 AND highest_severity is medium or higher",
163
179
  },
164
- // investigate → skip (if duplicate or low priority)
180
+ // investigate → skip (everything is a duplicate or low priority)
165
181
  {
166
182
  from: "investigate",
167
183
  to: "skip",
168
- when: "is_duplicate is true, OR severity is low",
184
+ when: "novel_count is 0, OR highest_severity is low",
169
185
  },
170
- // create_issue → implement (if fix is clear and not too complex)
186
+ // create_issue → implement (novel findings have a clear, feasible fix)
171
187
  {
172
188
  from: "create_issue",
173
189
  to: "implement",
174
- when: "fix_complexity is simple or moderate AND fix_approach is provided AND dryRun is not true",
190
+ when: "at least one novel finding has fix_complexity simple or moderate AND fix_approach is provided",
175
191
  },
176
- // create_issue → notify (if fix is too complex or risky, or dry run)
192
+ // create_issue → notify (fixes too complex)
177
193
  {
178
194
  from: "create_issue",
179
195
  to: "notify",
180
- when: "fix_complexity is complex, OR no clear fix_approach, OR dryRun is true",
181
- },
182
- // skip → implement (duplicate exists but has a clear unfixed bug with a simple fix)
183
- {
184
- from: "skip",
185
- to: "implement",
186
- when: "is_duplicate is true AND the duplicate issue is still open/unfixed AND fix_complexity is simple or moderate AND fix_approach is provided AND dryRun is not true",
187
- },
188
- // skip → notify (duplicate was +1'd, no implementation needed or too complex)
189
- {
190
- from: "skip",
191
- to: "notify",
192
- when: "is_duplicate is true AND (fix_complexity is complex OR no fix_approach OR the issue already has a PR in progress OR dryRun is true), OR severity is low",
196
+ when: "all novel findings have fix_complexity complex, OR no clear fix_approach",
193
197
  },
198
+ // skip → notify (nothing to implement — all duplicates +1'd or low priority)
199
+ { from: "skip", to: "notify" },
194
200
  // implement → create_pr (always after successful implementation)
195
201
  { from: "implement", to: "create_pr" },
196
202
  // create_pr → notify (always)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sweny-ai/core",
3
- "version": "0.1.8",
3
+ "version": "0.1.10",
4
4
  "type": "module",
5
5
  "bin": {
6
6
  "sweny": "./dist/cli/main.js"