@sweny-ai/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/claude.test.d.ts +1 -0
- package/dist/__tests__/claude.test.js +328 -0
- package/dist/__tests__/executor.test.d.ts +1 -0
- package/dist/__tests__/executor.test.js +296 -0
- package/dist/__tests__/integration/datadog.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/datadog.integration.test.js +23 -0
- package/dist/__tests__/integration/e2e-workflow.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/e2e-workflow.integration.test.js +75 -0
- package/dist/__tests__/integration/github.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/github.integration.test.js +37 -0
- package/dist/__tests__/integration/harness.d.ts +24 -0
- package/dist/__tests__/integration/harness.js +34 -0
- package/dist/__tests__/integration/linear.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/linear.integration.test.js +15 -0
- package/dist/__tests__/integration/sentry.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/sentry.integration.test.js +20 -0
- package/dist/__tests__/integration/slack.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/slack.integration.test.js +22 -0
- package/dist/__tests__/schema.test.d.ts +1 -0
- package/dist/__tests__/schema.test.js +239 -0
- package/dist/__tests__/skills-index.test.d.ts +1 -0
- package/dist/__tests__/skills-index.test.js +122 -0
- package/dist/__tests__/skills.test.d.ts +1 -0
- package/dist/__tests__/skills.test.js +296 -0
- package/dist/__tests__/studio.test.d.ts +1 -0
- package/dist/__tests__/studio.test.js +172 -0
- package/dist/__tests__/testing.test.d.ts +1 -0
- package/dist/__tests__/testing.test.js +224 -0
- package/dist/browser.d.ts +17 -0
- package/dist/browser.js +22 -0
- package/dist/claude.d.ts +48 -0
- package/dist/claude.js +293 -0
- package/dist/cli/check.d.ts +11 -0
- package/dist/cli/check.js +237 -0
- package/dist/cli/config-file.d.ts +12 -0
- package/dist/cli/config-file.js +208 -0
- package/dist/cli/config.d.ts +77 -0
- package/dist/cli/config.js +565 -0
- package/dist/cli/main.d.ts +10 -0
- package/dist/cli/main.js +744 -0
- package/dist/cli/output.d.ts +26 -0
- package/dist/cli/output.js +357 -0
- package/dist/cli/renderer.d.ts +33 -0
- package/dist/cli/renderer.js +423 -0
- package/dist/cli/renderer.test.d.ts +1 -0
- package/dist/cli/renderer.test.js +302 -0
- package/dist/cli/setup.d.ts +11 -0
- package/dist/cli/setup.js +310 -0
- package/dist/executor.d.ts +29 -0
- package/dist/executor.js +173 -0
- package/dist/executor.test.d.ts +1 -0
- package/dist/executor.test.js +314 -0
- package/dist/index.d.ts +37 -0
- package/dist/index.js +36 -0
- package/dist/mcp.d.ts +11 -0
- package/dist/mcp.js +183 -0
- package/dist/mcp.test.d.ts +1 -0
- package/dist/mcp.test.js +334 -0
- package/dist/schema.d.ts +318 -0
- package/dist/schema.js +207 -0
- package/dist/skills/betterstack.d.ts +7 -0
- package/dist/skills/betterstack.js +114 -0
- package/dist/skills/datadog.d.ts +7 -0
- package/dist/skills/datadog.js +107 -0
- package/dist/skills/github.d.ts +8 -0
- package/dist/skills/github.js +155 -0
- package/dist/skills/index.d.ts +68 -0
- package/dist/skills/index.js +134 -0
- package/dist/skills/linear.d.ts +7 -0
- package/dist/skills/linear.js +89 -0
- package/dist/skills/notification.d.ts +11 -0
- package/dist/skills/notification.js +142 -0
- package/dist/skills/sentry.d.ts +7 -0
- package/dist/skills/sentry.js +105 -0
- package/dist/skills/slack.d.ts +8 -0
- package/dist/skills/slack.js +115 -0
- package/dist/studio.d.ts +124 -0
- package/dist/studio.js +174 -0
- package/dist/testing.d.ts +88 -0
- package/dist/testing.js +253 -0
- package/dist/types.d.ts +144 -0
- package/dist/types.js +11 -0
- package/dist/workflow-builder.d.ts +45 -0
- package/dist/workflow-builder.js +120 -0
- package/dist/workflow-builder.test.d.ts +1 -0
- package/dist/workflow-builder.test.js +117 -0
- package/dist/workflows/implement.d.ts +11 -0
- package/dist/workflows/implement.js +83 -0
- package/dist/workflows/index.d.ts +2 -0
- package/dist/workflows/index.js +2 -0
- package/dist/workflows/triage.d.ts +18 -0
- package/dist/workflows/triage.js +108 -0
- package/package.json +83 -0
|
@@ -0,0 +1,302 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach } from "vitest";
|
|
2
|
+
import { DagRenderer, stripAnsi } from "./renderer.js";
|
|
3
|
+
const testWorkflow = {
|
|
4
|
+
id: "test",
|
|
5
|
+
name: "Test Workflow",
|
|
6
|
+
description: "A simple test workflow",
|
|
7
|
+
nodes: {
|
|
8
|
+
gather: { name: "Gather Context", instruction: "Gather", skills: [] },
|
|
9
|
+
investigate: { name: "Investigate", instruction: "Investigate", skills: [] },
|
|
10
|
+
report: { name: "Report", instruction: "Report", skills: [] },
|
|
11
|
+
},
|
|
12
|
+
edges: [
|
|
13
|
+
{ from: "gather", to: "investigate" },
|
|
14
|
+
{ from: "investigate", to: "report" },
|
|
15
|
+
],
|
|
16
|
+
entry: "gather",
|
|
17
|
+
};
|
|
18
|
+
describe("DagRenderer", () => {
|
|
19
|
+
let renderer;
|
|
20
|
+
beforeEach(() => {
|
|
21
|
+
renderer = new DagRenderer(testWorkflow, { animate: false });
|
|
22
|
+
});
|
|
23
|
+
it("starts all nodes in pending state", () => {
|
|
24
|
+
expect(renderer.getNodeState("gather")).toBe("pending");
|
|
25
|
+
expect(renderer.getNodeState("investigate")).toBe("pending");
|
|
26
|
+
expect(renderer.getNodeState("report")).toBe("pending");
|
|
27
|
+
});
|
|
28
|
+
it("tracks node state: pending → running on node:enter", () => {
|
|
29
|
+
const event = { type: "node:enter", node: "gather", instruction: "Gather" };
|
|
30
|
+
renderer.update(event);
|
|
31
|
+
expect(renderer.getNodeState("gather")).toBe("running");
|
|
32
|
+
expect(renderer.getNodeState("investigate")).toBe("pending");
|
|
33
|
+
});
|
|
34
|
+
it("tracks node state: running → completed on node:exit success", () => {
|
|
35
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
36
|
+
renderer.update({
|
|
37
|
+
type: "node:exit",
|
|
38
|
+
node: "gather",
|
|
39
|
+
result: { status: "success", data: {}, toolCalls: [] },
|
|
40
|
+
});
|
|
41
|
+
expect(renderer.getNodeState("gather")).toBe("completed");
|
|
42
|
+
});
|
|
43
|
+
it("tracks node state: running → failed on node:exit failed", () => {
|
|
44
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
45
|
+
renderer.update({
|
|
46
|
+
type: "node:exit",
|
|
47
|
+
node: "gather",
|
|
48
|
+
result: { status: "failed", data: {}, toolCalls: [] },
|
|
49
|
+
});
|
|
50
|
+
expect(renderer.getNodeState("gather")).toBe("failed");
|
|
51
|
+
});
|
|
52
|
+
it("tracks failed nodes", () => {
|
|
53
|
+
renderer.update({ type: "node:enter", node: "investigate", instruction: "Investigate" });
|
|
54
|
+
renderer.update({
|
|
55
|
+
type: "node:exit",
|
|
56
|
+
node: "investigate",
|
|
57
|
+
result: { status: "failed", data: { error: "timeout" }, toolCalls: [] },
|
|
58
|
+
});
|
|
59
|
+
expect(renderer.getNodeState("investigate")).toBe("failed");
|
|
60
|
+
});
|
|
61
|
+
it("counts tool calls per node", () => {
|
|
62
|
+
expect(renderer.getToolCallCount("gather")).toBe(0);
|
|
63
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
64
|
+
renderer.update({ type: "tool:call", node: "gather", tool: "search", input: {} });
|
|
65
|
+
renderer.update({ type: "tool:call", node: "gather", tool: "fetch", input: {} });
|
|
66
|
+
expect(renderer.getToolCallCount("gather")).toBe(2);
|
|
67
|
+
expect(renderer.getToolCallCount("investigate")).toBe(0);
|
|
68
|
+
});
|
|
69
|
+
it("counts tool calls across multiple nodes independently", () => {
|
|
70
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
71
|
+
renderer.update({ type: "tool:call", node: "gather", tool: "search", input: {} });
|
|
72
|
+
renderer.update({ type: "node:exit", node: "gather", result: { status: "success", data: {}, toolCalls: [] } });
|
|
73
|
+
renderer.update({ type: "node:enter", node: "investigate", instruction: "Investigate" });
|
|
74
|
+
renderer.update({ type: "tool:call", node: "investigate", tool: "read", input: {} });
|
|
75
|
+
renderer.update({ type: "tool:call", node: "investigate", tool: "write", input: {} });
|
|
76
|
+
renderer.update({ type: "tool:call", node: "investigate", tool: "run", input: {} });
|
|
77
|
+
expect(renderer.getToolCallCount("gather")).toBe(1);
|
|
78
|
+
expect(renderer.getToolCallCount("investigate")).toBe(3);
|
|
79
|
+
});
|
|
80
|
+
it("renders to string without crashing", () => {
|
|
81
|
+
const output = renderer.renderToString();
|
|
82
|
+
expect(typeof output).toBe("string");
|
|
83
|
+
expect(output.length).toBeGreaterThan(0);
|
|
84
|
+
});
|
|
85
|
+
it("output contains all node names", () => {
|
|
86
|
+
const output = renderer.renderToString();
|
|
87
|
+
expect(output).toContain("Gather Context");
|
|
88
|
+
expect(output).toContain("Investigate");
|
|
89
|
+
expect(output).toContain("Report");
|
|
90
|
+
});
|
|
91
|
+
it("topological order starts from entry node", () => {
|
|
92
|
+
const output = renderer.renderToString();
|
|
93
|
+
const gatherIdx = output.indexOf("Gather Context");
|
|
94
|
+
const investigateIdx = output.indexOf("Investigate");
|
|
95
|
+
const reportIdx = output.indexOf("Report");
|
|
96
|
+
expect(gatherIdx).toBeLessThan(investigateIdx);
|
|
97
|
+
expect(investigateIdx).toBeLessThan(reportIdx);
|
|
98
|
+
});
|
|
99
|
+
it("shows running status icon for active node", () => {
|
|
100
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
101
|
+
const output = renderer.renderToString();
|
|
102
|
+
// ◉ is the running icon
|
|
103
|
+
expect(output).toContain("◉");
|
|
104
|
+
});
|
|
105
|
+
it("shows completed status icon for finished node", () => {
|
|
106
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
107
|
+
renderer.update({
|
|
108
|
+
type: "node:exit",
|
|
109
|
+
node: "gather",
|
|
110
|
+
result: { status: "success", data: {}, toolCalls: [] },
|
|
111
|
+
});
|
|
112
|
+
const output = renderer.renderToString();
|
|
113
|
+
// ● is the completed icon
|
|
114
|
+
expect(output).toContain("●");
|
|
115
|
+
});
|
|
116
|
+
it("shows failed status icon for failed node", () => {
|
|
117
|
+
renderer.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
118
|
+
renderer.update({
|
|
119
|
+
type: "node:exit",
|
|
120
|
+
node: "gather",
|
|
121
|
+
result: { status: "failed", data: {}, toolCalls: [] },
|
|
122
|
+
});
|
|
123
|
+
const output = renderer.renderToString();
|
|
124
|
+
// ✕ is the failed icon
|
|
125
|
+
expect(output).toContain("✕");
|
|
126
|
+
});
|
|
127
|
+
it("returns pending state for unknown node id", () => {
|
|
128
|
+
expect(renderer.getNodeState("nonexistent")).toBe("pending");
|
|
129
|
+
});
|
|
130
|
+
it("returns 0 tool calls for unknown node id", () => {
|
|
131
|
+
expect(renderer.getToolCallCount("nonexistent")).toBe(0);
|
|
132
|
+
});
|
|
133
|
+
it("includes a legend in the output", () => {
|
|
134
|
+
const output = renderer.renderToString();
|
|
135
|
+
// Legend should mention at least pending and completed
|
|
136
|
+
expect(output).toContain("○");
|
|
137
|
+
expect(output).toContain("●");
|
|
138
|
+
});
|
|
139
|
+
it("handles workflow:start event without crashing", () => {
|
|
140
|
+
const event = { type: "workflow:start", workflow: "test" };
|
|
141
|
+
expect(() => renderer.update(event)).not.toThrow();
|
|
142
|
+
});
|
|
143
|
+
it("handles workflow:end event without crashing", () => {
|
|
144
|
+
const event = {
|
|
145
|
+
type: "workflow:end",
|
|
146
|
+
results: {
|
|
147
|
+
gather: { status: "success", data: {}, toolCalls: [] },
|
|
148
|
+
},
|
|
149
|
+
};
|
|
150
|
+
expect(() => renderer.update(event)).not.toThrow();
|
|
151
|
+
});
|
|
152
|
+
it("handles route event without crashing", () => {
|
|
153
|
+
const event = { type: "route", from: "gather", to: "investigate", reason: "next" };
|
|
154
|
+
expect(() => renderer.update(event)).not.toThrow();
|
|
155
|
+
});
|
|
156
|
+
describe("uniform box widths", () => {
|
|
157
|
+
const mixedWidthWorkflow = {
|
|
158
|
+
id: "mixed",
|
|
159
|
+
name: "Mixed Width Workflow",
|
|
160
|
+
description: "Nodes with varying name lengths",
|
|
161
|
+
nodes: {
|
|
162
|
+
a: { name: "A", instruction: "Short", skills: [] },
|
|
163
|
+
b: { name: "Gather Context and Analyze", instruction: "Long name", skills: [] },
|
|
164
|
+
c: { name: "Report", instruction: "Medium", skills: [] },
|
|
165
|
+
},
|
|
166
|
+
edges: [
|
|
167
|
+
{ from: "a", to: "b" },
|
|
168
|
+
{ from: "b", to: "c" },
|
|
169
|
+
],
|
|
170
|
+
entry: "a",
|
|
171
|
+
};
|
|
172
|
+
it("all top-border lines have the same width", () => {
|
|
173
|
+
const r = new DagRenderer(mixedWidthWorkflow, { animate: false });
|
|
174
|
+
const output = stripAnsi(r.renderToString());
|
|
175
|
+
const topLines = output.split("\n").filter((l) => l.includes("┌") && l.includes("┐"));
|
|
176
|
+
expect(topLines.length).toBe(3);
|
|
177
|
+
const widths = topLines.map((l) => l.trim().length);
|
|
178
|
+
expect(new Set(widths).size).toBe(1); // all same width
|
|
179
|
+
});
|
|
180
|
+
it("output contains ▼ arrowhead on non-first boxes", () => {
|
|
181
|
+
const r = new DagRenderer(mixedWidthWorkflow, { animate: false });
|
|
182
|
+
const output = stripAnsi(r.renderToString());
|
|
183
|
+
expect(output).toContain("▼");
|
|
184
|
+
});
|
|
185
|
+
it("▼ is positioned between box borders", () => {
|
|
186
|
+
const r = new DagRenderer(mixedWidthWorkflow, { animate: false });
|
|
187
|
+
const output = stripAnsi(r.renderToString());
|
|
188
|
+
const arrowLines = output.split("\n").filter((l) => l.includes("▼"));
|
|
189
|
+
for (const line of arrowLines) {
|
|
190
|
+
// ▼ should be inside a top border: ┌───▼───┐
|
|
191
|
+
expect(line).toMatch(/┌─*▼─*┐/);
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
it("bottom of non-terminal boxes has centered ┬", () => {
|
|
195
|
+
const r = new DagRenderer(mixedWidthWorkflow, { animate: false });
|
|
196
|
+
const output = stripAnsi(r.renderToString());
|
|
197
|
+
const bottomLinesWithT = output.split("\n").filter((l) => l.includes("┬"));
|
|
198
|
+
// A and B are non-terminal, so 2 lines with ┬
|
|
199
|
+
expect(bottomLinesWithT.length).toBe(2);
|
|
200
|
+
for (const line of bottomLinesWithT) {
|
|
201
|
+
expect(line).toMatch(/└─*┬─*┘/);
|
|
202
|
+
}
|
|
203
|
+
});
|
|
204
|
+
it("vertical connector uses centered │", () => {
|
|
205
|
+
const r = new DagRenderer(mixedWidthWorkflow, { animate: false });
|
|
206
|
+
const output = stripAnsi(r.renderToString());
|
|
207
|
+
// Find lines that are purely vertical connectors (between boxes)
|
|
208
|
+
const lines = output.split("\n");
|
|
209
|
+
const connectorLines = lines.filter((l) => l.trim() === "│");
|
|
210
|
+
expect(connectorLines.length).toBeGreaterThan(0);
|
|
211
|
+
// The │ should be centered relative to the box width
|
|
212
|
+
const topLine = lines.find((l) => l.includes("┌") && l.includes("┐"));
|
|
213
|
+
const tIdx = topLine.indexOf("┌");
|
|
214
|
+
const boxWidth = topLine.lastIndexOf("┐") - tIdx + 1;
|
|
215
|
+
const expectedCol = tIdx + Math.floor(boxWidth / 2);
|
|
216
|
+
for (const cl of connectorLines) {
|
|
217
|
+
expect(cl.indexOf("│")).toBe(expectedCol);
|
|
218
|
+
}
|
|
219
|
+
});
|
|
220
|
+
});
|
|
221
|
+
describe("branching layout", () => {
|
|
222
|
+
const branchingWorkflow = {
|
|
223
|
+
id: "triage",
|
|
224
|
+
name: "Triage Test",
|
|
225
|
+
description: "A branching workflow",
|
|
226
|
+
nodes: {
|
|
227
|
+
gather: { name: "Gather Context", instruction: "Gather", skills: [] },
|
|
228
|
+
investigate: { name: "Investigate", instruction: "Investigate", skills: [] },
|
|
229
|
+
create_ticket: { name: "Create Ticket", instruction: "Create", skills: [] },
|
|
230
|
+
skip: { name: "Skip", instruction: "Skip", skills: [] },
|
|
231
|
+
notify: { name: "Notify", instruction: "Notify", skills: [] },
|
|
232
|
+
},
|
|
233
|
+
edges: [
|
|
234
|
+
{ from: "gather", to: "investigate" },
|
|
235
|
+
{ from: "investigate", to: "create_ticket" },
|
|
236
|
+
{ from: "investigate", to: "skip" },
|
|
237
|
+
{ from: "create_ticket", to: "notify" },
|
|
238
|
+
],
|
|
239
|
+
entry: "gather",
|
|
240
|
+
};
|
|
241
|
+
it("output contains both branch children", () => {
|
|
242
|
+
const r = new DagRenderer(branchingWorkflow, { animate: false });
|
|
243
|
+
const output = stripAnsi(r.renderToString());
|
|
244
|
+
expect(output).toContain("Create Ticket");
|
|
245
|
+
expect(output).toContain("Skip");
|
|
246
|
+
});
|
|
247
|
+
it("output contains ┴ fork character", () => {
|
|
248
|
+
const r = new DagRenderer(branchingWorkflow, { animate: false });
|
|
249
|
+
const output = stripAnsi(r.renderToString());
|
|
250
|
+
expect(output).toContain("┴");
|
|
251
|
+
});
|
|
252
|
+
it("state tracking works through branches", () => {
|
|
253
|
+
const r = new DagRenderer(branchingWorkflow, { animate: false });
|
|
254
|
+
r.update({ type: "node:enter", node: "gather", instruction: "Gather" });
|
|
255
|
+
r.update({ type: "node:exit", node: "gather", result: { status: "success", data: {}, toolCalls: [] } });
|
|
256
|
+
r.update({ type: "node:enter", node: "investigate", instruction: "Investigate" });
|
|
257
|
+
r.update({ type: "node:exit", node: "investigate", result: { status: "success", data: {}, toolCalls: [] } });
|
|
258
|
+
r.update({ type: "node:enter", node: "create_ticket", instruction: "Create" });
|
|
259
|
+
expect(r.getNodeState("gather")).toBe("completed");
|
|
260
|
+
expect(r.getNodeState("investigate")).toBe("completed");
|
|
261
|
+
expect(r.getNodeState("create_ticket")).toBe("running");
|
|
262
|
+
expect(r.getNodeState("skip")).toBe("pending");
|
|
263
|
+
expect(r.getNodeState("notify")).toBe("pending");
|
|
264
|
+
});
|
|
265
|
+
it("topological order: Gather before Investigate before Notify", () => {
|
|
266
|
+
const r = new DagRenderer(branchingWorkflow, { animate: false });
|
|
267
|
+
const output = stripAnsi(r.renderToString());
|
|
268
|
+
const gatherIdx = output.indexOf("Gather Context");
|
|
269
|
+
const investigateIdx = output.indexOf("Investigate");
|
|
270
|
+
const notifyIdx = output.indexOf("Notify");
|
|
271
|
+
expect(gatherIdx).toBeLessThan(investigateIdx);
|
|
272
|
+
expect(investigateIdx).toBeLessThan(notifyIdx);
|
|
273
|
+
});
|
|
274
|
+
it("falls back to sequential for 3+ children", () => {
|
|
275
|
+
const tripleWorkflow = {
|
|
276
|
+
id: "triple",
|
|
277
|
+
name: "Triple Branch",
|
|
278
|
+
description: "Three-way branch",
|
|
279
|
+
nodes: {
|
|
280
|
+
start: { name: "Start", instruction: "Start", skills: [] },
|
|
281
|
+
a: { name: "Branch A", instruction: "A", skills: [] },
|
|
282
|
+
b: { name: "Branch B", instruction: "B", skills: [] },
|
|
283
|
+
c: { name: "Branch C", instruction: "C", skills: [] },
|
|
284
|
+
},
|
|
285
|
+
edges: [
|
|
286
|
+
{ from: "start", to: "a" },
|
|
287
|
+
{ from: "start", to: "b" },
|
|
288
|
+
{ from: "start", to: "c" },
|
|
289
|
+
],
|
|
290
|
+
entry: "start",
|
|
291
|
+
};
|
|
292
|
+
const r = new DagRenderer(tripleWorkflow, { animate: false });
|
|
293
|
+
const output = stripAnsi(r.renderToString());
|
|
294
|
+
// Should NOT have ┴ fork — sequential fallback
|
|
295
|
+
expect(output).not.toContain("┴");
|
|
296
|
+
// But all branches are present
|
|
297
|
+
expect(output).toContain("Branch A");
|
|
298
|
+
expect(output).toContain("Branch B");
|
|
299
|
+
expect(output).toContain("Branch C");
|
|
300
|
+
});
|
|
301
|
+
});
|
|
302
|
+
});
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* sweny setup <provider>
|
|
3
|
+
*
|
|
4
|
+
* Creates the standard SWEny label set in the configured issue tracker / source control
|
|
5
|
+
* provider, then prints a config snippet with the resolved IDs ready to paste into
|
|
6
|
+
* .sweny.yml or GitHub Actions secrets.
|
|
7
|
+
*/
|
|
8
|
+
import type { Command } from "commander";
|
|
9
|
+
export declare function setupLinear(apiKey: string, teamId: string): Promise<void>;
|
|
10
|
+
export declare function setupGithub(token: string, repo: string): Promise<void>;
|
|
11
|
+
export declare function registerSetupCommand(program: Command): void;
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* sweny setup <provider>
|
|
3
|
+
*
|
|
4
|
+
* Creates the standard SWEny label set in the configured issue tracker / source control
|
|
5
|
+
* provider, then prints a config snippet with the resolved IDs ready to paste into
|
|
6
|
+
* .sweny.yml or GitHub Actions secrets.
|
|
7
|
+
*/
|
|
8
|
+
import chalk from "chalk";
|
|
9
|
+
const LINEAR_API_URL = "https://api.linear.app/graphql";
|
|
10
|
+
const GITHUB_API_URL = "https://api.github.com";
|
|
11
|
+
const SIGNAL_LABELS = [
|
|
12
|
+
{
|
|
13
|
+
name: "agent-needs-input",
|
|
14
|
+
color: "CA8A04",
|
|
15
|
+
description: "Agent hit a decision point — needs human clarification before it can proceed",
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
name: "agent-error",
|
|
19
|
+
color: "B91C1C",
|
|
20
|
+
description: "Unexpected technical failure during agent execution — needs human investigation",
|
|
21
|
+
},
|
|
22
|
+
{
|
|
23
|
+
name: "human-only",
|
|
24
|
+
color: "6B7280",
|
|
25
|
+
description: "Guard rail — automation must not touch this issue or PR",
|
|
26
|
+
},
|
|
27
|
+
{
|
|
28
|
+
name: "needs-review",
|
|
29
|
+
color: "0EA5E9",
|
|
30
|
+
description: "PR opened by the agent and waiting for human review",
|
|
31
|
+
},
|
|
32
|
+
];
|
|
33
|
+
const WORK_TYPE_LABELS = [
|
|
34
|
+
{
|
|
35
|
+
name: "triage",
|
|
36
|
+
color: "EA580C",
|
|
37
|
+
description: "Production log analysis and bug detection by the agent",
|
|
38
|
+
isWorkType: true,
|
|
39
|
+
},
|
|
40
|
+
{
|
|
41
|
+
name: "feature",
|
|
42
|
+
color: "2563EB",
|
|
43
|
+
description: "Feature implementation by the agent",
|
|
44
|
+
isWorkType: true,
|
|
45
|
+
},
|
|
46
|
+
{
|
|
47
|
+
name: "optimization",
|
|
48
|
+
color: "059669",
|
|
49
|
+
description: "Performance or code optimization by the agent",
|
|
50
|
+
isWorkType: true,
|
|
51
|
+
},
|
|
52
|
+
{
|
|
53
|
+
name: "research",
|
|
54
|
+
color: "D97706",
|
|
55
|
+
description: "Spike, investigation, or report by the agent — exploratory work without a direct code change",
|
|
56
|
+
isWorkType: true,
|
|
57
|
+
},
|
|
58
|
+
{
|
|
59
|
+
name: "support",
|
|
60
|
+
color: "0891B2",
|
|
61
|
+
description: "Work initiated from a support request by the agent",
|
|
62
|
+
isWorkType: true,
|
|
63
|
+
},
|
|
64
|
+
{
|
|
65
|
+
name: "spec",
|
|
66
|
+
color: "BE185D",
|
|
67
|
+
description: "Spec generation — agent converted non-technical input into a structured spec",
|
|
68
|
+
isWorkType: true,
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
name: "task",
|
|
72
|
+
color: "78716C",
|
|
73
|
+
description: "Open-ended prompt or generic work by the agent that does not fit another category",
|
|
74
|
+
isWorkType: true,
|
|
75
|
+
},
|
|
76
|
+
];
|
|
77
|
+
const AGENT_PARENT = {
|
|
78
|
+
name: "agent",
|
|
79
|
+
color: "7C3AED",
|
|
80
|
+
description: "Parent group — marks all autonomous agent work",
|
|
81
|
+
};
|
|
82
|
+
const BUG_LABEL = {
|
|
83
|
+
name: "bug",
|
|
84
|
+
color: "DC2626",
|
|
85
|
+
description: "A confirmed bug",
|
|
86
|
+
};
|
|
87
|
+
async function linearRequest(apiKey, query, variables) {
|
|
88
|
+
const res = await fetch(LINEAR_API_URL, {
|
|
89
|
+
method: "POST",
|
|
90
|
+
headers: { "Content-Type": "application/json", Authorization: apiKey },
|
|
91
|
+
body: JSON.stringify({ query, variables }),
|
|
92
|
+
});
|
|
93
|
+
if (!res.ok)
|
|
94
|
+
throw new Error(`Linear API error: ${res.status} ${res.statusText}`);
|
|
95
|
+
const body = (await res.json());
|
|
96
|
+
if (body.errors?.length)
|
|
97
|
+
throw new Error(`Linear GraphQL: ${body.errors.map((e) => e.message).join(", ")}`);
|
|
98
|
+
return body.data;
|
|
99
|
+
}
|
|
100
|
+
async function listLinearLabels(apiKey, teamId) {
|
|
101
|
+
const data = await linearRequest(apiKey, `query($teamId: String!) {
|
|
102
|
+
team(id: $teamId) {
|
|
103
|
+
labels(first: 250) {
|
|
104
|
+
nodes { id name parent { id } }
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
}`, { teamId });
|
|
108
|
+
return data.team.labels.nodes;
|
|
109
|
+
}
|
|
110
|
+
async function createLinearLabel(apiKey, teamId, def, parentId) {
|
|
111
|
+
const data = await linearRequest(apiKey, `mutation($input: IssueLabelCreateInput!) {
|
|
112
|
+
issueLabelCreate(input: $input) {
|
|
113
|
+
issueLabel { id name }
|
|
114
|
+
success
|
|
115
|
+
}
|
|
116
|
+
}`, {
|
|
117
|
+
input: {
|
|
118
|
+
teamId,
|
|
119
|
+
name: def.name,
|
|
120
|
+
color: `#${def.color}`,
|
|
121
|
+
description: def.description,
|
|
122
|
+
...(parentId ? { parentId } : {}),
|
|
123
|
+
},
|
|
124
|
+
});
|
|
125
|
+
if (!data.issueLabelCreate.success)
|
|
126
|
+
throw new Error(`Failed to create Linear label "${def.name}"`);
|
|
127
|
+
return data.issueLabelCreate.issueLabel;
|
|
128
|
+
}
|
|
129
|
+
export async function setupLinear(apiKey, teamId) {
|
|
130
|
+
console.log(chalk.dim("\n Fetching existing labels…\n"));
|
|
131
|
+
const existing = await listLinearLabels(apiKey, teamId);
|
|
132
|
+
const byName = new Map(existing.map((l) => [l.name.toLowerCase(), l]));
|
|
133
|
+
const created = [];
|
|
134
|
+
const skipped = [];
|
|
135
|
+
// Helper: find or create
|
|
136
|
+
async function ensure(def, parentId) {
|
|
137
|
+
const found = byName.get(def.name.toLowerCase());
|
|
138
|
+
if (found) {
|
|
139
|
+
skipped.push(def.name);
|
|
140
|
+
console.log(` ${chalk.dim("–")} ${chalk.dim(def.name)} ${chalk.dim("(already exists)")}`);
|
|
141
|
+
return found;
|
|
142
|
+
}
|
|
143
|
+
const label = await createLinearLabel(apiKey, teamId, def, parentId);
|
|
144
|
+
byName.set(def.name.toLowerCase(), label);
|
|
145
|
+
created.push(def.name);
|
|
146
|
+
console.log(` ${chalk.green("+")} ${chalk.bold(def.name)}`);
|
|
147
|
+
return label;
|
|
148
|
+
}
|
|
149
|
+
// 1. agent group parent
|
|
150
|
+
const agentLabel = await ensure(AGENT_PARENT);
|
|
151
|
+
// 2. Work type labels (children of agent)
|
|
152
|
+
const workTypeIds = {};
|
|
153
|
+
for (const def of WORK_TYPE_LABELS) {
|
|
154
|
+
const label = await ensure(def, agentLabel.id);
|
|
155
|
+
workTypeIds[def.name] = label.id;
|
|
156
|
+
}
|
|
157
|
+
// 3. Signal labels (standalone)
|
|
158
|
+
const signalIds = {};
|
|
159
|
+
for (const def of SIGNAL_LABELS) {
|
|
160
|
+
const label = await ensure(def);
|
|
161
|
+
signalIds[def.name] = label.id;
|
|
162
|
+
}
|
|
163
|
+
// 4. Bug label (standalone, may already exist)
|
|
164
|
+
const bugLabel = byName.get("bug") ?? (await ensure(BUG_LABEL));
|
|
165
|
+
const bugId = bugLabel.id;
|
|
166
|
+
// ── Print config snippet ─────────────────────────────────────────────────
|
|
167
|
+
const triageId = workTypeIds["triage"] ?? byName.get("triage")?.id ?? "";
|
|
168
|
+
console.log(`
|
|
169
|
+
${chalk.bold(" Done.")} ${created.length} created, ${skipped.length} already existed.
|
|
170
|
+
|
|
171
|
+
${chalk.bold(" Add these to your .sweny.yml:")}
|
|
172
|
+
|
|
173
|
+
${chalk.cyan(` issue-tracker-provider: linear
|
|
174
|
+
linear-team-id: ${teamId}
|
|
175
|
+
linear-triage-label-id: ${triageId}
|
|
176
|
+
linear-bug-label-id: ${bugId}
|
|
177
|
+
issue-labels: ${agentLabel.id}`)}
|
|
178
|
+
|
|
179
|
+
${chalk.bold(" Or as environment variables / GitHub Actions secrets:")}
|
|
180
|
+
|
|
181
|
+
${chalk.cyan(` LINEAR_TEAM_ID=${teamId}
|
|
182
|
+
LINEAR_TRIAGE_LABEL_ID=${triageId}
|
|
183
|
+
LINEAR_BUG_LABEL_ID=${bugId}
|
|
184
|
+
SWENY_ISSUE_LABELS=${agentLabel.id}`)}
|
|
185
|
+
|
|
186
|
+
${chalk.dim(" Full label inventory:")}
|
|
187
|
+
${chalk.dim(` agent ${agentLabel.id}`)}
|
|
188
|
+
${Object.entries(workTypeIds)
|
|
189
|
+
.map(([name, id]) => chalk.dim(` ${name.padEnd(20)}${id}`))
|
|
190
|
+
.join("\n")}
|
|
191
|
+
${Object.entries(signalIds)
|
|
192
|
+
.map(([name, id]) => chalk.dim(` ${name.padEnd(20)}${id}`))
|
|
193
|
+
.join("\n")}
|
|
194
|
+
${bugId ? chalk.dim(` bug ${bugId}`) : ""}
|
|
195
|
+
`);
|
|
196
|
+
}
|
|
197
|
+
async function githubRequest(token, method, path, body) {
|
|
198
|
+
const res = await fetch(`${GITHUB_API_URL}${path}`, {
|
|
199
|
+
method,
|
|
200
|
+
headers: {
|
|
201
|
+
Authorization: `Bearer ${token}`,
|
|
202
|
+
Accept: "application/vnd.github+json",
|
|
203
|
+
"X-GitHub-Api-Version": "2022-11-28",
|
|
204
|
+
"Content-Type": "application/json",
|
|
205
|
+
},
|
|
206
|
+
...(body ? { body: JSON.stringify(body) } : {}),
|
|
207
|
+
});
|
|
208
|
+
// 201 Created and 200 OK are both success; 422 means label already exists
|
|
209
|
+
if (!res.ok && res.status !== 422) {
|
|
210
|
+
throw new Error(`GitHub API error: ${res.status} ${res.statusText}`);
|
|
211
|
+
}
|
|
212
|
+
return (await res.json());
|
|
213
|
+
}
|
|
214
|
+
async function listAllGithubLabels(token, repo) {
|
|
215
|
+
const labels = [];
|
|
216
|
+
let page = 1;
|
|
217
|
+
while (true) {
|
|
218
|
+
const batch = await githubRequest(token, "GET", `/repos/${repo}/labels?per_page=100&page=${page}`);
|
|
219
|
+
labels.push(...batch);
|
|
220
|
+
if (batch.length < 100)
|
|
221
|
+
break;
|
|
222
|
+
page++;
|
|
223
|
+
}
|
|
224
|
+
return labels;
|
|
225
|
+
}
|
|
226
|
+
export async function setupGithub(token, repo) {
|
|
227
|
+
console.log(chalk.dim("\n Fetching existing labels…\n"));
|
|
228
|
+
const existing = await listAllGithubLabels(token, repo);
|
|
229
|
+
const byName = new Set(existing.map((l) => l.name.toLowerCase()));
|
|
230
|
+
const allLabels = [AGENT_PARENT, ...WORK_TYPE_LABELS, ...SIGNAL_LABELS, BUG_LABEL];
|
|
231
|
+
const created = [];
|
|
232
|
+
const skipped = [];
|
|
233
|
+
for (const def of allLabels) {
|
|
234
|
+
if (byName.has(def.name.toLowerCase())) {
|
|
235
|
+
skipped.push(def.name);
|
|
236
|
+
console.log(` ${chalk.dim("–")} ${chalk.dim(def.name)} ${chalk.dim("(already exists)")}`);
|
|
237
|
+
continue;
|
|
238
|
+
}
|
|
239
|
+
await githubRequest(token, "POST", `/repos/${repo}/labels`, {
|
|
240
|
+
name: def.name,
|
|
241
|
+
color: def.color,
|
|
242
|
+
description: def.description,
|
|
243
|
+
});
|
|
244
|
+
created.push(def.name);
|
|
245
|
+
console.log(` ${chalk.green("+")} ${chalk.bold(def.name)}`);
|
|
246
|
+
}
|
|
247
|
+
console.log(`
|
|
248
|
+
${chalk.bold(" Done.")} ${created.length} created, ${skipped.length} already existed.
|
|
249
|
+
|
|
250
|
+
${chalk.bold(" Add these to your .sweny.yml:")}
|
|
251
|
+
|
|
252
|
+
${chalk.cyan(` source-control-provider: github
|
|
253
|
+
pr-labels: agent,triage,needs-review
|
|
254
|
+
issue-labels: agent`)}
|
|
255
|
+
|
|
256
|
+
${chalk.dim(" GitHub labels are referenced by name — no UUIDs needed.")}
|
|
257
|
+
`);
|
|
258
|
+
}
|
|
259
|
+
// ── Command registration ───────────────────────────────────────────────────
|
|
260
|
+
export function registerSetupCommand(program) {
|
|
261
|
+
const setupCmd = program
|
|
262
|
+
.command("setup")
|
|
263
|
+
.description("Create the standard SWEny label set in your issue tracker or source control provider");
|
|
264
|
+
setupCmd
|
|
265
|
+
.command("linear")
|
|
266
|
+
.description("Create SWEny labels in a Linear workspace")
|
|
267
|
+
.requiredOption("--team-id <id>", "Linear team ID", process.env.LINEAR_TEAM_ID)
|
|
268
|
+
.action(async (opts) => {
|
|
269
|
+
const apiKey = process.env.LINEAR_API_KEY;
|
|
270
|
+
if (!apiKey) {
|
|
271
|
+
console.error(chalk.red("\n Missing: LINEAR_API_KEY environment variable\n"));
|
|
272
|
+
process.exit(1);
|
|
273
|
+
}
|
|
274
|
+
if (!opts.teamId) {
|
|
275
|
+
console.error(chalk.red("\n Missing: --team-id or LINEAR_TEAM_ID\n"));
|
|
276
|
+
process.exit(1);
|
|
277
|
+
}
|
|
278
|
+
console.log(chalk.bold(`\n sweny setup linear (team: ${opts.teamId})\n`));
|
|
279
|
+
try {
|
|
280
|
+
await setupLinear(apiKey, opts.teamId);
|
|
281
|
+
}
|
|
282
|
+
catch (err) {
|
|
283
|
+
console.error(chalk.red(`\n Error: ${err instanceof Error ? err.message : String(err)}\n`));
|
|
284
|
+
process.exit(1);
|
|
285
|
+
}
|
|
286
|
+
});
|
|
287
|
+
setupCmd
|
|
288
|
+
.command("github")
|
|
289
|
+
.description("Create SWEny labels in a GitHub repository")
|
|
290
|
+
.requiredOption("--repo <owner/repo>", "GitHub repository (e.g. my-org/my-repo)", process.env.GITHUB_REPOSITORY)
|
|
291
|
+
.action(async (opts) => {
|
|
292
|
+
const token = process.env.GITHUB_TOKEN || process.env.BOT_TOKEN;
|
|
293
|
+
if (!token) {
|
|
294
|
+
console.error(chalk.red("\n Missing: GITHUB_TOKEN environment variable\n"));
|
|
295
|
+
process.exit(1);
|
|
296
|
+
}
|
|
297
|
+
if (!opts.repo) {
|
|
298
|
+
console.error(chalk.red("\n Missing: --repo or GITHUB_REPOSITORY\n"));
|
|
299
|
+
process.exit(1);
|
|
300
|
+
}
|
|
301
|
+
console.log(chalk.bold(`\n sweny setup github (repo: ${opts.repo})\n`));
|
|
302
|
+
try {
|
|
303
|
+
await setupGithub(token, opts.repo);
|
|
304
|
+
}
|
|
305
|
+
catch (err) {
|
|
306
|
+
console.error(chalk.red(`\n Error: ${err instanceof Error ? err.message : String(err)}\n`));
|
|
307
|
+
process.exit(1);
|
|
308
|
+
}
|
|
309
|
+
});
|
|
310
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DAG Workflow Executor
|
|
3
|
+
*
|
|
4
|
+
* Walks a workflow graph node-by-node. At each node, Claude gets
|
|
5
|
+
* the node's instruction + available skill tools + context from
|
|
6
|
+
* prior nodes. Claude does the work, then the executor resolves
|
|
7
|
+
* which edge to follow next.
|
|
8
|
+
*
|
|
9
|
+
* This replaces ~8k lines of engine + recipe step code.
|
|
10
|
+
*/
|
|
11
|
+
import type { Workflow, Skill, Claude, Observer, NodeResult, Logger } from "./types.js";
|
|
12
|
+
export interface ExecuteOptions {
|
|
13
|
+
/** Registered skills (id → Skill) */
|
|
14
|
+
skills: Map<string, Skill>;
|
|
15
|
+
/** Config values — env vars + explicit overrides */
|
|
16
|
+
config?: Record<string, string>;
|
|
17
|
+
/** Claude client */
|
|
18
|
+
claude: Claude;
|
|
19
|
+
/** Event observer for streaming/logging */
|
|
20
|
+
observer?: Observer;
|
|
21
|
+
/** Logger */
|
|
22
|
+
logger?: Logger;
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Execute a workflow from entry to completion.
|
|
26
|
+
*
|
|
27
|
+
* Returns a map of node ID → result for every node that ran.
|
|
28
|
+
*/
|
|
29
|
+
export declare function execute(workflow: Workflow, input: unknown, options: ExecuteOptions): Promise<Map<string, NodeResult>>;
|