@sweny-ai/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/claude.test.d.ts +1 -0
- package/dist/__tests__/claude.test.js +328 -0
- package/dist/__tests__/executor.test.d.ts +1 -0
- package/dist/__tests__/executor.test.js +296 -0
- package/dist/__tests__/integration/datadog.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/datadog.integration.test.js +23 -0
- package/dist/__tests__/integration/e2e-workflow.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/e2e-workflow.integration.test.js +75 -0
- package/dist/__tests__/integration/github.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/github.integration.test.js +37 -0
- package/dist/__tests__/integration/harness.d.ts +24 -0
- package/dist/__tests__/integration/harness.js +34 -0
- package/dist/__tests__/integration/linear.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/linear.integration.test.js +15 -0
- package/dist/__tests__/integration/sentry.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/sentry.integration.test.js +20 -0
- package/dist/__tests__/integration/slack.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/slack.integration.test.js +22 -0
- package/dist/__tests__/schema.test.d.ts +1 -0
- package/dist/__tests__/schema.test.js +239 -0
- package/dist/__tests__/skills-index.test.d.ts +1 -0
- package/dist/__tests__/skills-index.test.js +122 -0
- package/dist/__tests__/skills.test.d.ts +1 -0
- package/dist/__tests__/skills.test.js +296 -0
- package/dist/__tests__/studio.test.d.ts +1 -0
- package/dist/__tests__/studio.test.js +172 -0
- package/dist/__tests__/testing.test.d.ts +1 -0
- package/dist/__tests__/testing.test.js +224 -0
- package/dist/browser.d.ts +17 -0
- package/dist/browser.js +22 -0
- package/dist/claude.d.ts +48 -0
- package/dist/claude.js +293 -0
- package/dist/cli/check.d.ts +11 -0
- package/dist/cli/check.js +237 -0
- package/dist/cli/config-file.d.ts +12 -0
- package/dist/cli/config-file.js +208 -0
- package/dist/cli/config.d.ts +77 -0
- package/dist/cli/config.js +565 -0
- package/dist/cli/main.d.ts +10 -0
- package/dist/cli/main.js +744 -0
- package/dist/cli/output.d.ts +26 -0
- package/dist/cli/output.js +357 -0
- package/dist/cli/renderer.d.ts +33 -0
- package/dist/cli/renderer.js +423 -0
- package/dist/cli/renderer.test.d.ts +1 -0
- package/dist/cli/renderer.test.js +302 -0
- package/dist/cli/setup.d.ts +11 -0
- package/dist/cli/setup.js +310 -0
- package/dist/executor.d.ts +29 -0
- package/dist/executor.js +173 -0
- package/dist/executor.test.d.ts +1 -0
- package/dist/executor.test.js +314 -0
- package/dist/index.d.ts +37 -0
- package/dist/index.js +36 -0
- package/dist/mcp.d.ts +11 -0
- package/dist/mcp.js +183 -0
- package/dist/mcp.test.d.ts +1 -0
- package/dist/mcp.test.js +334 -0
- package/dist/schema.d.ts +318 -0
- package/dist/schema.js +207 -0
- package/dist/skills/betterstack.d.ts +7 -0
- package/dist/skills/betterstack.js +114 -0
- package/dist/skills/datadog.d.ts +7 -0
- package/dist/skills/datadog.js +107 -0
- package/dist/skills/github.d.ts +8 -0
- package/dist/skills/github.js +155 -0
- package/dist/skills/index.d.ts +68 -0
- package/dist/skills/index.js +134 -0
- package/dist/skills/linear.d.ts +7 -0
- package/dist/skills/linear.js +89 -0
- package/dist/skills/notification.d.ts +11 -0
- package/dist/skills/notification.js +142 -0
- package/dist/skills/sentry.d.ts +7 -0
- package/dist/skills/sentry.js +105 -0
- package/dist/skills/slack.d.ts +8 -0
- package/dist/skills/slack.js +115 -0
- package/dist/studio.d.ts +124 -0
- package/dist/studio.js +174 -0
- package/dist/testing.d.ts +88 -0
- package/dist/testing.js +253 -0
- package/dist/types.d.ts +144 -0
- package/dist/types.js +11 -0
- package/dist/workflow-builder.d.ts +45 -0
- package/dist/workflow-builder.js +120 -0
- package/dist/workflow-builder.test.d.ts +1 -0
- package/dist/workflow-builder.test.js +117 -0
- package/dist/workflows/implement.d.ts +11 -0
- package/dist/workflows/implement.js +83 -0
- package/dist/workflows/index.d.ts +2 -0
- package/dist/workflows/index.js +2 -0
- package/dist/workflows/triage.d.ts +18 -0
- package/dist/workflows/triage.js +108 -0
- package/package.json +83 -0
package/dist/executor.js
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* DAG Workflow Executor
|
|
3
|
+
*
|
|
4
|
+
* Walks a workflow graph node-by-node. At each node, Claude gets
|
|
5
|
+
* the node's instruction + available skill tools + context from
|
|
6
|
+
* prior nodes. Claude does the work, then the executor resolves
|
|
7
|
+
* which edge to follow next.
|
|
8
|
+
*
|
|
9
|
+
* This replaces ~8k lines of engine + recipe step code.
|
|
10
|
+
*/
|
|
11
|
+
import { consoleLogger } from "./types.js";
|
|
12
|
+
/**
|
|
13
|
+
* Execute a workflow from entry to completion.
|
|
14
|
+
*
|
|
15
|
+
* Returns a map of node ID → result for every node that ran.
|
|
16
|
+
*/
|
|
17
|
+
export async function execute(workflow, input, options) {
|
|
18
|
+
const { skills, claude, observer } = options;
|
|
19
|
+
const config = resolveConfig(skills, options.config);
|
|
20
|
+
const logger = options.logger ?? consoleLogger;
|
|
21
|
+
const results = new Map();
|
|
22
|
+
validate(workflow, skills);
|
|
23
|
+
safeObserve(observer, { type: "workflow:start", workflow: workflow.id }, logger);
|
|
24
|
+
let currentId = workflow.entry;
|
|
25
|
+
while (currentId) {
|
|
26
|
+
const node = workflow.nodes[currentId];
|
|
27
|
+
if (!node)
|
|
28
|
+
throw new Error(`Unknown node: "${currentId}"`);
|
|
29
|
+
safeObserve(observer, { type: "node:enter", node: currentId, instruction: node.instruction }, logger);
|
|
30
|
+
logger.info(`→ ${node.name}`, { node: currentId });
|
|
31
|
+
// Gather tools from the node's skills
|
|
32
|
+
const tools = resolveTools(node.skills, skills);
|
|
33
|
+
// Build context: input + all prior node results
|
|
34
|
+
const context = {
|
|
35
|
+
input,
|
|
36
|
+
...Object.fromEntries([...results.entries()].map(([k, v]) => [k, v.data])),
|
|
37
|
+
};
|
|
38
|
+
// Wrap tool handlers to emit events + inject context
|
|
39
|
+
const trackedTools = tools.map((t) => ({
|
|
40
|
+
...t,
|
|
41
|
+
handler: async (toolInput) => {
|
|
42
|
+
safeObserve(observer, { type: "tool:call", node: currentId, tool: t.name, input: toolInput }, logger);
|
|
43
|
+
const toolCtx = { config, logger };
|
|
44
|
+
const output = await t.handler(toolInput, toolCtx);
|
|
45
|
+
safeObserve(observer, { type: "tool:result", node: currentId, tool: t.name, output }, logger);
|
|
46
|
+
return output;
|
|
47
|
+
},
|
|
48
|
+
}));
|
|
49
|
+
// Run Claude on this node
|
|
50
|
+
const result = await claude.run({
|
|
51
|
+
instruction: node.instruction,
|
|
52
|
+
context,
|
|
53
|
+
tools: trackedTools,
|
|
54
|
+
outputSchema: node.output,
|
|
55
|
+
});
|
|
56
|
+
results.set(currentId, result);
|
|
57
|
+
safeObserve(observer, { type: "node:exit", node: currentId, result }, logger);
|
|
58
|
+
logger.info(` ✓ ${result.status}`, { node: currentId, toolCalls: result.toolCalls.length });
|
|
59
|
+
// Resolve next node via edge conditions
|
|
60
|
+
currentId = await resolveNext(workflow, currentId, results, claude, observer);
|
|
61
|
+
}
|
|
62
|
+
safeObserve(observer, {
|
|
63
|
+
type: "workflow:end",
|
|
64
|
+
results: Object.fromEntries(results),
|
|
65
|
+
}, logger);
|
|
66
|
+
return results;
|
|
67
|
+
}
|
|
68
|
+
// ─── Internals ───────────────────────────────────────────────────
|
|
69
|
+
/** Call observer without letting exceptions crash the workflow */
|
|
70
|
+
function safeObserve(observer, event, logger) {
|
|
71
|
+
if (!observer)
|
|
72
|
+
return;
|
|
73
|
+
try {
|
|
74
|
+
observer(event);
|
|
75
|
+
}
|
|
76
|
+
catch (err) {
|
|
77
|
+
(logger ?? consoleLogger).warn(`Observer error (non-fatal): ${err.message}`);
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
function resolveTools(skillIds, skills) {
|
|
81
|
+
return skillIds
|
|
82
|
+
.map((id) => skills.get(id))
|
|
83
|
+
.filter((s) => s != null)
|
|
84
|
+
.flatMap((s) => s.tools);
|
|
85
|
+
}
|
|
86
|
+
/**
|
|
87
|
+
* Resolve config values: check explicit overrides first, then env vars.
|
|
88
|
+
* Throws if a required field is missing.
|
|
89
|
+
*/
|
|
90
|
+
function resolveConfig(skills, overrides) {
|
|
91
|
+
const config = {};
|
|
92
|
+
const missing = [];
|
|
93
|
+
for (const skill of skills.values()) {
|
|
94
|
+
for (const [key, field] of Object.entries(skill.config)) {
|
|
95
|
+
const value = overrides?.[key] ?? (field.env ? process.env[field.env] : undefined);
|
|
96
|
+
if (value) {
|
|
97
|
+
config[key] = value;
|
|
98
|
+
}
|
|
99
|
+
else if (field.required) {
|
|
100
|
+
missing.push(`${skill.id}.${key} (env: ${field.env ?? "none"})`);
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
if (missing.length > 0) {
|
|
105
|
+
throw new Error(`Missing required config:\n ${missing.join("\n ")}`);
|
|
106
|
+
}
|
|
107
|
+
return config;
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Resolve which edge to follow from the current node.
|
|
111
|
+
*
|
|
112
|
+
* - 0 out-edges → terminal (return null)
|
|
113
|
+
* - 1 unconditional edge → follow it
|
|
114
|
+
* - Multiple or conditional → Claude evaluates
|
|
115
|
+
*/
|
|
116
|
+
async function resolveNext(workflow, current, results, claude, observer) {
|
|
117
|
+
const outEdges = workflow.edges.filter((e) => e.from === current);
|
|
118
|
+
if (outEdges.length === 0)
|
|
119
|
+
return null;
|
|
120
|
+
// Single unconditional edge — just follow it
|
|
121
|
+
if (outEdges.length === 1 && !outEdges[0].when) {
|
|
122
|
+
safeObserve(observer, { type: "route", from: current, to: outEdges[0].to, reason: "only path" });
|
|
123
|
+
return outEdges[0].to;
|
|
124
|
+
}
|
|
125
|
+
// Check for a default (unconditional) edge among conditionals
|
|
126
|
+
const defaultEdge = outEdges.find((e) => !e.when);
|
|
127
|
+
const conditionalEdges = outEdges.filter((e) => e.when);
|
|
128
|
+
// Claude evaluates which condition matches
|
|
129
|
+
const context = Object.fromEntries([...results.entries()].map(([k, v]) => [k, v.data]));
|
|
130
|
+
const choices = conditionalEdges.map((e) => ({
|
|
131
|
+
id: e.to,
|
|
132
|
+
description: e.when,
|
|
133
|
+
}));
|
|
134
|
+
if (defaultEdge) {
|
|
135
|
+
choices.push({ id: defaultEdge.to, description: "None of the above / default path" });
|
|
136
|
+
}
|
|
137
|
+
const chosen = await claude.evaluate({
|
|
138
|
+
question: "Based on the results so far, which condition is true?",
|
|
139
|
+
context,
|
|
140
|
+
choices,
|
|
141
|
+
});
|
|
142
|
+
// Validate that Claude returned a valid target
|
|
143
|
+
const validTargets = new Set(outEdges.map((e) => e.to));
|
|
144
|
+
const resolved = validTargets.has(chosen) ? chosen : (defaultEdge?.to ?? outEdges[0].to); // fall back to default or first edge
|
|
145
|
+
safeObserve(observer, {
|
|
146
|
+
type: "route",
|
|
147
|
+
from: current,
|
|
148
|
+
to: resolved,
|
|
149
|
+
reason: choices.find((c) => c.id === resolved)?.description ?? "default",
|
|
150
|
+
});
|
|
151
|
+
return resolved;
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Validate a workflow definition before execution.
|
|
155
|
+
*/
|
|
156
|
+
function validate(workflow, skills) {
|
|
157
|
+
if (!workflow.nodes[workflow.entry]) {
|
|
158
|
+
throw new Error(`Entry node "${workflow.entry}" not found`);
|
|
159
|
+
}
|
|
160
|
+
for (const edge of workflow.edges) {
|
|
161
|
+
if (!workflow.nodes[edge.from])
|
|
162
|
+
throw new Error(`Edge references unknown node: "${edge.from}"`);
|
|
163
|
+
if (!workflow.nodes[edge.to])
|
|
164
|
+
throw new Error(`Edge references unknown node: "${edge.to}"`);
|
|
165
|
+
}
|
|
166
|
+
// Warn about missing skills (non-fatal — skills might be registered later)
|
|
167
|
+
const allSkillIds = new Set(Object.values(workflow.nodes).flatMap((n) => n.skills));
|
|
168
|
+
for (const id of allSkillIds) {
|
|
169
|
+
if (!skills.has(id)) {
|
|
170
|
+
consoleLogger.warn(`Workflow references unregistered skill: "${id}"`);
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach } from "vitest";
|
|
2
|
+
import { mkdirSync, writeFileSync, readFileSync, existsSync } from "node:fs";
|
|
3
|
+
import { rmSync } from "node:fs";
|
|
4
|
+
import * as path from "node:path";
|
|
5
|
+
import { tmpdir } from "node:os";
|
|
6
|
+
import { execute } from "./executor.js";
|
|
7
|
+
import { MockClaude, createFileSkill } from "./testing.js";
|
|
8
|
+
import { createSkillMap } from "./skills/index.js";
|
|
9
|
+
import { validateWorkflow, parseWorkflow } from "./schema.js";
|
|
10
|
+
import { triageWorkflow } from "./workflows/triage.js";
|
|
11
|
+
import { implementWorkflow } from "./workflows/implement.js";
|
|
12
|
+
// ─── Test fixtures ───────────────────────────────────────────────
|
|
13
|
+
const tmpBase = path.join(tmpdir(), "sweny-core-test");
|
|
14
|
+
function freshDir(name) {
|
|
15
|
+
const dir = path.join(tmpBase, `${name}-${Date.now()}`);
|
|
16
|
+
rmSync(dir, { recursive: true, force: true });
|
|
17
|
+
mkdirSync(dir, { recursive: true });
|
|
18
|
+
return dir;
|
|
19
|
+
}
|
|
20
|
+
// Minimal 3-node DAG for unit testing
|
|
21
|
+
const simpleWorkflow = {
|
|
22
|
+
id: "test-simple",
|
|
23
|
+
name: "Simple Test",
|
|
24
|
+
description: "A→B→C linear workflow",
|
|
25
|
+
entry: "step-a",
|
|
26
|
+
nodes: {
|
|
27
|
+
"step-a": {
|
|
28
|
+
name: "Step A",
|
|
29
|
+
instruction: "Read the input and produce context",
|
|
30
|
+
skills: ["filesystem"],
|
|
31
|
+
},
|
|
32
|
+
"step-b": {
|
|
33
|
+
name: "Step B",
|
|
34
|
+
instruction: "Process the context from step A",
|
|
35
|
+
skills: ["filesystem"],
|
|
36
|
+
},
|
|
37
|
+
"step-c": {
|
|
38
|
+
name: "Step C",
|
|
39
|
+
instruction: "Write the final result",
|
|
40
|
+
skills: ["filesystem"],
|
|
41
|
+
},
|
|
42
|
+
},
|
|
43
|
+
edges: [
|
|
44
|
+
{ from: "step-a", to: "step-b" },
|
|
45
|
+
{ from: "step-b", to: "step-c" },
|
|
46
|
+
],
|
|
47
|
+
};
|
|
48
|
+
// Branching DAG for conditional routing
|
|
49
|
+
const branchingWorkflow = {
|
|
50
|
+
id: "test-branching",
|
|
51
|
+
name: "Branching Test",
|
|
52
|
+
description: "A → B or C depending on condition",
|
|
53
|
+
entry: "check",
|
|
54
|
+
nodes: {
|
|
55
|
+
check: {
|
|
56
|
+
name: "Check Input",
|
|
57
|
+
instruction: "Examine the input and determine severity",
|
|
58
|
+
skills: ["filesystem"],
|
|
59
|
+
},
|
|
60
|
+
handle_high: {
|
|
61
|
+
name: "Handle High Severity",
|
|
62
|
+
instruction: "Handle a high-severity alert",
|
|
63
|
+
skills: ["filesystem"],
|
|
64
|
+
},
|
|
65
|
+
handle_low: {
|
|
66
|
+
name: "Handle Low Severity",
|
|
67
|
+
instruction: "Handle a low-severity alert",
|
|
68
|
+
skills: [],
|
|
69
|
+
},
|
|
70
|
+
},
|
|
71
|
+
edges: [
|
|
72
|
+
{ from: "check", to: "handle_high", when: "severity is high or critical" },
|
|
73
|
+
{ from: "check", to: "handle_low", when: "severity is low or medium" },
|
|
74
|
+
],
|
|
75
|
+
};
|
|
76
|
+
// ─── Schema validation tests ─────────────────────────────────────
|
|
77
|
+
describe("schema validation", () => {
|
|
78
|
+
it("validates a correct workflow", () => {
|
|
79
|
+
const errors = validateWorkflow(simpleWorkflow);
|
|
80
|
+
expect(errors).toEqual([]);
|
|
81
|
+
});
|
|
82
|
+
it("detects missing entry node", () => {
|
|
83
|
+
const bad = { ...simpleWorkflow, entry: "nonexistent" };
|
|
84
|
+
const errors = validateWorkflow(bad);
|
|
85
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "MISSING_ENTRY" }));
|
|
86
|
+
});
|
|
87
|
+
it("detects unknown edge targets", () => {
|
|
88
|
+
const bad = {
|
|
89
|
+
...simpleWorkflow,
|
|
90
|
+
edges: [{ from: "step-a", to: "nonexistent" }],
|
|
91
|
+
};
|
|
92
|
+
const errors = validateWorkflow(bad);
|
|
93
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNKNOWN_EDGE_TARGET" }));
|
|
94
|
+
});
|
|
95
|
+
it("detects unreachable nodes", () => {
|
|
96
|
+
const disconnected = {
|
|
97
|
+
...simpleWorkflow,
|
|
98
|
+
edges: [{ from: "step-a", to: "step-b" }],
|
|
99
|
+
// step-c has no incoming edge
|
|
100
|
+
};
|
|
101
|
+
const errors = validateWorkflow(disconnected);
|
|
102
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNREACHABLE_NODE", nodeId: "step-c" }));
|
|
103
|
+
});
|
|
104
|
+
it("detects self-loops", () => {
|
|
105
|
+
const loopy = {
|
|
106
|
+
...simpleWorkflow,
|
|
107
|
+
edges: [
|
|
108
|
+
{ from: "step-a", to: "step-a" },
|
|
109
|
+
{ from: "step-a", to: "step-b" },
|
|
110
|
+
{ from: "step-b", to: "step-c" },
|
|
111
|
+
],
|
|
112
|
+
};
|
|
113
|
+
const errors = validateWorkflow(loopy);
|
|
114
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "SELF_LOOP", nodeId: "step-a" }));
|
|
115
|
+
});
|
|
116
|
+
it("detects unknown skill references", () => {
|
|
117
|
+
const errors = validateWorkflow(simpleWorkflow, new Set(["github"]));
|
|
118
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNKNOWN_SKILL", nodeId: "step-a" }));
|
|
119
|
+
});
|
|
120
|
+
it("validates the triage workflow", () => {
|
|
121
|
+
const errors = validateWorkflow(triageWorkflow);
|
|
122
|
+
expect(errors).toEqual([]);
|
|
123
|
+
});
|
|
124
|
+
it("validates the implement workflow", () => {
|
|
125
|
+
const errors = validateWorkflow(implementWorkflow);
|
|
126
|
+
expect(errors).toEqual([]);
|
|
127
|
+
});
|
|
128
|
+
it("parses a raw JSON workflow via Zod", () => {
|
|
129
|
+
const raw = JSON.parse(JSON.stringify(simpleWorkflow));
|
|
130
|
+
const parsed = parseWorkflow(raw);
|
|
131
|
+
expect(parsed.id).toBe("test-simple");
|
|
132
|
+
expect(parsed.nodes["step-a"].instruction).toBe("Read the input and produce context");
|
|
133
|
+
});
|
|
134
|
+
it("rejects invalid workflow JSON", () => {
|
|
135
|
+
expect(() => parseWorkflow({ id: "" })).toThrow();
|
|
136
|
+
expect(() => parseWorkflow({ id: "x", name: "x", nodes: {}, edges: [] })).toThrow(); // missing entry
|
|
137
|
+
});
|
|
138
|
+
});
|
|
139
|
+
// ─── Executor tests ──────────────────────────────────────────────
|
|
140
|
+
describe("executor", () => {
|
|
141
|
+
let outputDir;
|
|
142
|
+
beforeEach(() => {
|
|
143
|
+
outputDir = freshDir("executor");
|
|
144
|
+
});
|
|
145
|
+
it("executes a linear workflow end-to-end", async () => {
|
|
146
|
+
const fileSkill = createFileSkill(outputDir);
|
|
147
|
+
const skills = createSkillMap([fileSkill]);
|
|
148
|
+
// Write a fixture input file
|
|
149
|
+
writeFileSync(path.join(outputDir, "input.json"), JSON.stringify({ alert: "test" }));
|
|
150
|
+
const claude = new MockClaude({
|
|
151
|
+
responses: {
|
|
152
|
+
"step-a": {
|
|
153
|
+
toolCalls: [{ tool: "fs_read_json", input: { path: "input.json" } }],
|
|
154
|
+
data: { context: "loaded from file" },
|
|
155
|
+
},
|
|
156
|
+
"step-b": {
|
|
157
|
+
data: { analysis: "processed" },
|
|
158
|
+
},
|
|
159
|
+
"step-c": {
|
|
160
|
+
toolCalls: [
|
|
161
|
+
{
|
|
162
|
+
tool: "fs_write_json",
|
|
163
|
+
input: { path: "result.json", data: { status: "done" } },
|
|
164
|
+
},
|
|
165
|
+
],
|
|
166
|
+
data: { result: "written" },
|
|
167
|
+
},
|
|
168
|
+
},
|
|
169
|
+
});
|
|
170
|
+
const events = [];
|
|
171
|
+
const results = await execute(simpleWorkflow, { alert: "test" }, {
|
|
172
|
+
skills,
|
|
173
|
+
claude,
|
|
174
|
+
observer: (e) => events.push(e),
|
|
175
|
+
config: {},
|
|
176
|
+
});
|
|
177
|
+
// All 3 nodes executed
|
|
178
|
+
expect(results.size).toBe(3);
|
|
179
|
+
expect(results.get("step-a")?.status).toBe("success");
|
|
180
|
+
expect(results.get("step-b")?.status).toBe("success");
|
|
181
|
+
expect(results.get("step-c")?.status).toBe("success");
|
|
182
|
+
// Node A read the input file
|
|
183
|
+
expect(results.get("step-a")?.data.context).toBe("loaded from file");
|
|
184
|
+
// Node C wrote the result file
|
|
185
|
+
expect(existsSync(path.join(outputDir, "result.json"))).toBe(true);
|
|
186
|
+
const written = JSON.parse(readFileSync(path.join(outputDir, "result.json"), "utf-8"));
|
|
187
|
+
expect(written.status).toBe("done");
|
|
188
|
+
// Events were emitted correctly
|
|
189
|
+
expect(events[0]).toEqual({ type: "workflow:start", workflow: "test-simple" });
|
|
190
|
+
const nodeEnters = events.filter((e) => e.type === "node:enter");
|
|
191
|
+
expect(nodeEnters).toHaveLength(3);
|
|
192
|
+
expect(events[events.length - 1].type).toBe("workflow:end");
|
|
193
|
+
});
|
|
194
|
+
it("executes conditional branches", async () => {
|
|
195
|
+
const fileSkill = createFileSkill(outputDir);
|
|
196
|
+
const skills = createSkillMap([fileSkill]);
|
|
197
|
+
const claude = new MockClaude({
|
|
198
|
+
responses: {
|
|
199
|
+
check: { data: { severity: "high" } },
|
|
200
|
+
handle_high: { data: { handled: true } },
|
|
201
|
+
},
|
|
202
|
+
routes: {
|
|
203
|
+
check: "handle_high", // Route to high severity handler
|
|
204
|
+
},
|
|
205
|
+
});
|
|
206
|
+
const results = await execute(branchingWorkflow, { alert: "cpu spike" }, {
|
|
207
|
+
skills,
|
|
208
|
+
claude,
|
|
209
|
+
config: {},
|
|
210
|
+
});
|
|
211
|
+
expect(results.size).toBe(2);
|
|
212
|
+
expect(results.has("check")).toBe(true);
|
|
213
|
+
expect(results.has("handle_high")).toBe(true);
|
|
214
|
+
expect(results.has("handle_low")).toBe(false);
|
|
215
|
+
});
|
|
216
|
+
it("routes to alternative branch", async () => {
|
|
217
|
+
const fileSkill = createFileSkill(outputDir);
|
|
218
|
+
const skills = createSkillMap([fileSkill]);
|
|
219
|
+
const claude = new MockClaude({
|
|
220
|
+
responses: {
|
|
221
|
+
check: { data: { severity: "low" } },
|
|
222
|
+
handle_low: { data: { skipped: true } },
|
|
223
|
+
},
|
|
224
|
+
routes: {
|
|
225
|
+
check: "handle_low",
|
|
226
|
+
},
|
|
227
|
+
});
|
|
228
|
+
const results = await execute(branchingWorkflow, { alert: "minor" }, {
|
|
229
|
+
skills,
|
|
230
|
+
claude,
|
|
231
|
+
config: {},
|
|
232
|
+
});
|
|
233
|
+
expect(results.size).toBe(2);
|
|
234
|
+
expect(results.has("handle_low")).toBe(true);
|
|
235
|
+
expect(results.has("handle_high")).toBe(false);
|
|
236
|
+
});
|
|
237
|
+
it("emits tool:call and tool:result events", async () => {
|
|
238
|
+
const fileSkill = createFileSkill(outputDir);
|
|
239
|
+
writeFileSync(path.join(outputDir, "data.json"), '{"key": "value"}');
|
|
240
|
+
const claude = new MockClaude({
|
|
241
|
+
responses: {
|
|
242
|
+
"step-a": {
|
|
243
|
+
toolCalls: [{ tool: "fs_read_json", input: { path: "data.json" } }],
|
|
244
|
+
data: { read: true },
|
|
245
|
+
},
|
|
246
|
+
"step-b": { data: {} },
|
|
247
|
+
"step-c": { data: {} },
|
|
248
|
+
},
|
|
249
|
+
});
|
|
250
|
+
const events = [];
|
|
251
|
+
await execute(simpleWorkflow, {}, {
|
|
252
|
+
skills: createSkillMap([fileSkill]),
|
|
253
|
+
claude,
|
|
254
|
+
observer: (e) => events.push(e),
|
|
255
|
+
config: {},
|
|
256
|
+
});
|
|
257
|
+
const toolCalls = events.filter((e) => e.type === "tool:call");
|
|
258
|
+
const toolResults = events.filter((e) => e.type === "tool:result");
|
|
259
|
+
expect(toolCalls).toHaveLength(1);
|
|
260
|
+
expect(toolResults).toHaveLength(1);
|
|
261
|
+
expect(toolCalls[0]).toMatchObject({
|
|
262
|
+
type: "tool:call",
|
|
263
|
+
node: "step-a",
|
|
264
|
+
tool: "fs_read_json",
|
|
265
|
+
});
|
|
266
|
+
});
|
|
267
|
+
it("throws on missing entry node", async () => {
|
|
268
|
+
const bad = { ...simpleWorkflow, entry: "nonexistent" };
|
|
269
|
+
const claude = new MockClaude({ responses: {} });
|
|
270
|
+
await expect(execute(bad, {}, { skills: createSkillMap([]), claude, config: {} })).rejects.toThrow("Entry node");
|
|
271
|
+
});
|
|
272
|
+
it("throws on invalid edge reference", async () => {
|
|
273
|
+
const bad = {
|
|
274
|
+
...simpleWorkflow,
|
|
275
|
+
edges: [
|
|
276
|
+
{ from: "step-a", to: "nonexistent" },
|
|
277
|
+
{ from: "step-a", to: "step-b" },
|
|
278
|
+
],
|
|
279
|
+
};
|
|
280
|
+
const claude = new MockClaude({ responses: {} });
|
|
281
|
+
await expect(execute(bad, {}, { skills: createSkillMap([]), claude, config: {} })).rejects.toThrow("unknown node");
|
|
282
|
+
});
|
|
283
|
+
});
|
|
284
|
+
// ─── File skill tests ────────────────────────────────────────────
|
|
285
|
+
describe("file skill", () => {
|
|
286
|
+
let outputDir;
|
|
287
|
+
beforeEach(() => {
|
|
288
|
+
outputDir = freshDir("fileskill");
|
|
289
|
+
});
|
|
290
|
+
it("reads and writes JSON", async () => {
|
|
291
|
+
const skill = createFileSkill(outputDir);
|
|
292
|
+
const write = skill.tools.find((t) => t.name === "fs_write_json");
|
|
293
|
+
const read = skill.tools.find((t) => t.name === "fs_read_json");
|
|
294
|
+
const ctx = { config: {}, logger: console };
|
|
295
|
+
await write.handler({ path: "test.json", data: { hello: "world" } }, ctx);
|
|
296
|
+
const result = await read.handler({ path: "test.json" }, ctx);
|
|
297
|
+
expect(result).toEqual({ hello: "world" });
|
|
298
|
+
});
|
|
299
|
+
it("writes markdown", async () => {
|
|
300
|
+
const skill = createFileSkill(outputDir);
|
|
301
|
+
const write = skill.tools.find((t) => t.name === "fs_write_markdown");
|
|
302
|
+
const ctx = { config: {}, logger: console };
|
|
303
|
+
await write.handler({ path: "issues/ISSUE-1.md", content: "# Bug\n\nSomething broke" }, ctx);
|
|
304
|
+
const content = readFileSync(path.join(outputDir, "issues/ISSUE-1.md"), "utf-8");
|
|
305
|
+
expect(content).toContain("# Bug");
|
|
306
|
+
});
|
|
307
|
+
it("creates nested directories", async () => {
|
|
308
|
+
const skill = createFileSkill(outputDir);
|
|
309
|
+
const write = skill.tools.find((t) => t.name === "fs_write_json");
|
|
310
|
+
const ctx = { config: {}, logger: console };
|
|
311
|
+
await write.handler({ path: "deep/nested/file.json", data: { ok: true } }, ctx);
|
|
312
|
+
expect(existsSync(path.join(outputDir, "deep/nested/file.json"))).toBe(true);
|
|
313
|
+
});
|
|
314
|
+
});
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @sweny-ai/core — Skill library + DAG workflow orchestration
|
|
3
|
+
*
|
|
4
|
+
* Three concepts:
|
|
5
|
+
* Skill — a group of tools Claude can call (replaces "providers")
|
|
6
|
+
* Workflow — a DAG of nodes connected by edges (replaces "engine + recipes")
|
|
7
|
+
* execute() — walk the DAG, run Claude at each node
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* ```ts
|
|
11
|
+
* import { execute, ClaudeClient, createSkillMap, github, sentry, slack } from '@sweny-ai/core'
|
|
12
|
+
* import { triageWorkflow } from '@sweny-ai/core/workflows'
|
|
13
|
+
*
|
|
14
|
+
* const skills = createSkillMap([github, sentry, slack])
|
|
15
|
+
* const claude = new ClaudeClient()
|
|
16
|
+
*
|
|
17
|
+
* const results = await execute(triageWorkflow, alertPayload, {
|
|
18
|
+
* skills,
|
|
19
|
+
* claude,
|
|
20
|
+
* observer: (event) => console.log(event),
|
|
21
|
+
* })
|
|
22
|
+
* ```
|
|
23
|
+
*/
|
|
24
|
+
export type { Skill, SkillCategory, Tool, ToolContext, ConfigField, JSONSchema, Workflow, Node, Edge, NodeResult, ToolCall, ExecutionEvent, Observer, Claude, Logger, } from "./types.js";
|
|
25
|
+
export { consoleLogger } from "./types.js";
|
|
26
|
+
export { execute } from "./executor.js";
|
|
27
|
+
export type { ExecuteOptions } from "./executor.js";
|
|
28
|
+
export { ClaudeClient } from "./claude.js";
|
|
29
|
+
export type { ClaudeClientOptions } from "./claude.js";
|
|
30
|
+
export { github, linear, slack, sentry, datadog, betterstack, notification, builtinSkills, createSkillMap, allSkills, isSkillConfigured, configuredSkills, validateWorkflowSkills, } from "./skills/index.js";
|
|
31
|
+
export type { SkillValidationResult } from "./skills/index.js";
|
|
32
|
+
export { workflowZ, nodeZ, edgeZ, skillZ, parseWorkflow, validateWorkflow, workflowJsonSchema } from "./schema.js";
|
|
33
|
+
export type { WorkflowError } from "./schema.js";
|
|
34
|
+
export { buildAutoMcpServers } from "./mcp.js";
|
|
35
|
+
export type { McpServerConfig, McpAutoConfig } from "./types.js";
|
|
36
|
+
export { buildWorkflow, refineWorkflow } from "./workflow-builder.js";
|
|
37
|
+
export type { BuildWorkflowOptions } from "./workflow-builder.js";
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @sweny-ai/core — Skill library + DAG workflow orchestration
|
|
3
|
+
*
|
|
4
|
+
* Three concepts:
|
|
5
|
+
* Skill — a group of tools Claude can call (replaces "providers")
|
|
6
|
+
* Workflow — a DAG of nodes connected by edges (replaces "engine + recipes")
|
|
7
|
+
* execute() — walk the DAG, run Claude at each node
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* ```ts
|
|
11
|
+
* import { execute, ClaudeClient, createSkillMap, github, sentry, slack } from '@sweny-ai/core'
|
|
12
|
+
* import { triageWorkflow } from '@sweny-ai/core/workflows'
|
|
13
|
+
*
|
|
14
|
+
* const skills = createSkillMap([github, sentry, slack])
|
|
15
|
+
* const claude = new ClaudeClient()
|
|
16
|
+
*
|
|
17
|
+
* const results = await execute(triageWorkflow, alertPayload, {
|
|
18
|
+
* skills,
|
|
19
|
+
* claude,
|
|
20
|
+
* observer: (event) => console.log(event),
|
|
21
|
+
* })
|
|
22
|
+
* ```
|
|
23
|
+
*/
|
|
24
|
+
export { consoleLogger } from "./types.js";
|
|
25
|
+
// Executor
|
|
26
|
+
export { execute } from "./executor.js";
|
|
27
|
+
// Claude client
|
|
28
|
+
export { ClaudeClient } from "./claude.js";
|
|
29
|
+
// Skills
|
|
30
|
+
export { github, linear, slack, sentry, datadog, betterstack, notification, builtinSkills, createSkillMap, allSkills, isSkillConfigured, configuredSkills, validateWorkflowSkills, } from "./skills/index.js";
|
|
31
|
+
// Schema & validation
|
|
32
|
+
export { workflowZ, nodeZ, edgeZ, skillZ, parseWorkflow, validateWorkflow, workflowJsonSchema } from "./schema.js";
|
|
33
|
+
// MCP auto-injection
|
|
34
|
+
export { buildAutoMcpServers } from "./mcp.js";
|
|
35
|
+
// Workflow builder
|
|
36
|
+
export { buildWorkflow, refineWorkflow } from "./workflow-builder.js";
|
package/dist/mcp.d.ts
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { McpAutoConfig, McpServerConfig } from "./types.js";
|
|
2
|
+
/**
|
|
3
|
+
* Auto-configure well-known MCP servers based on which providers
|
|
4
|
+
* and workspace tools the user has enabled.
|
|
5
|
+
*
|
|
6
|
+
* Category A: triggered by sourceControlProvider / issueTrackerProvider / observabilityProvider
|
|
7
|
+
* Category B: workspace tools — explicit opt-in via workspaceTools array
|
|
8
|
+
*
|
|
9
|
+
* User-supplied servers (userMcpServers) always win on key conflicts.
|
|
10
|
+
*/
|
|
11
|
+
export declare function buildAutoMcpServers(config: McpAutoConfig): Record<string, McpServerConfig>;
|