@sweny-ai/core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/__tests__/claude.test.d.ts +1 -0
- package/dist/__tests__/claude.test.js +328 -0
- package/dist/__tests__/executor.test.d.ts +1 -0
- package/dist/__tests__/executor.test.js +296 -0
- package/dist/__tests__/integration/datadog.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/datadog.integration.test.js +23 -0
- package/dist/__tests__/integration/e2e-workflow.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/e2e-workflow.integration.test.js +75 -0
- package/dist/__tests__/integration/github.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/github.integration.test.js +37 -0
- package/dist/__tests__/integration/harness.d.ts +24 -0
- package/dist/__tests__/integration/harness.js +34 -0
- package/dist/__tests__/integration/linear.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/linear.integration.test.js +15 -0
- package/dist/__tests__/integration/sentry.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/sentry.integration.test.js +20 -0
- package/dist/__tests__/integration/slack.integration.test.d.ts +1 -0
- package/dist/__tests__/integration/slack.integration.test.js +22 -0
- package/dist/__tests__/schema.test.d.ts +1 -0
- package/dist/__tests__/schema.test.js +239 -0
- package/dist/__tests__/skills-index.test.d.ts +1 -0
- package/dist/__tests__/skills-index.test.js +122 -0
- package/dist/__tests__/skills.test.d.ts +1 -0
- package/dist/__tests__/skills.test.js +296 -0
- package/dist/__tests__/studio.test.d.ts +1 -0
- package/dist/__tests__/studio.test.js +172 -0
- package/dist/__tests__/testing.test.d.ts +1 -0
- package/dist/__tests__/testing.test.js +224 -0
- package/dist/browser.d.ts +17 -0
- package/dist/browser.js +22 -0
- package/dist/claude.d.ts +48 -0
- package/dist/claude.js +293 -0
- package/dist/cli/check.d.ts +11 -0
- package/dist/cli/check.js +237 -0
- package/dist/cli/config-file.d.ts +12 -0
- package/dist/cli/config-file.js +208 -0
- package/dist/cli/config.d.ts +77 -0
- package/dist/cli/config.js +565 -0
- package/dist/cli/main.d.ts +10 -0
- package/dist/cli/main.js +744 -0
- package/dist/cli/output.d.ts +26 -0
- package/dist/cli/output.js +357 -0
- package/dist/cli/renderer.d.ts +33 -0
- package/dist/cli/renderer.js +423 -0
- package/dist/cli/renderer.test.d.ts +1 -0
- package/dist/cli/renderer.test.js +302 -0
- package/dist/cli/setup.d.ts +11 -0
- package/dist/cli/setup.js +310 -0
- package/dist/executor.d.ts +29 -0
- package/dist/executor.js +173 -0
- package/dist/executor.test.d.ts +1 -0
- package/dist/executor.test.js +314 -0
- package/dist/index.d.ts +37 -0
- package/dist/index.js +36 -0
- package/dist/mcp.d.ts +11 -0
- package/dist/mcp.js +183 -0
- package/dist/mcp.test.d.ts +1 -0
- package/dist/mcp.test.js +334 -0
- package/dist/schema.d.ts +318 -0
- package/dist/schema.js +207 -0
- package/dist/skills/betterstack.d.ts +7 -0
- package/dist/skills/betterstack.js +114 -0
- package/dist/skills/datadog.d.ts +7 -0
- package/dist/skills/datadog.js +107 -0
- package/dist/skills/github.d.ts +8 -0
- package/dist/skills/github.js +155 -0
- package/dist/skills/index.d.ts +68 -0
- package/dist/skills/index.js +134 -0
- package/dist/skills/linear.d.ts +7 -0
- package/dist/skills/linear.js +89 -0
- package/dist/skills/notification.d.ts +11 -0
- package/dist/skills/notification.js +142 -0
- package/dist/skills/sentry.d.ts +7 -0
- package/dist/skills/sentry.js +105 -0
- package/dist/skills/slack.d.ts +8 -0
- package/dist/skills/slack.js +115 -0
- package/dist/studio.d.ts +124 -0
- package/dist/studio.js +174 -0
- package/dist/testing.d.ts +88 -0
- package/dist/testing.js +253 -0
- package/dist/types.d.ts +144 -0
- package/dist/types.js +11 -0
- package/dist/workflow-builder.d.ts +45 -0
- package/dist/workflow-builder.js +120 -0
- package/dist/workflow-builder.test.d.ts +1 -0
- package/dist/workflow-builder.test.js +117 -0
- package/dist/workflows/implement.d.ts +11 -0
- package/dist/workflows/implement.js +83 -0
- package/dist/workflows/index.d.ts +2 -0
- package/dist/workflows/index.js +2 -0
- package/dist/workflows/triage.d.ts +18 -0
- package/dist/workflows/triage.js +108 -0
- package/package.json +83 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { available, logAvailability } from "./harness.js";
|
|
3
|
+
import { execute } from "../../executor.js";
|
|
4
|
+
import { ClaudeClient } from "../../claude.js";
|
|
5
|
+
import { createSkillMap } from "../../skills/index.js";
|
|
6
|
+
logAvailability();
|
|
7
|
+
// A minimal 2-node workflow that only uses Claude (no external tools)
|
|
8
|
+
const analysisWorkflow = {
|
|
9
|
+
id: "e2e-analysis",
|
|
10
|
+
name: "E2E Analysis",
|
|
11
|
+
description: "Two-node workflow using real Claude",
|
|
12
|
+
entry: "analyze",
|
|
13
|
+
nodes: {
|
|
14
|
+
analyze: {
|
|
15
|
+
name: "Analyze Input",
|
|
16
|
+
instruction: "Analyze the input data and determine if it represents a critical issue. Return JSON with fields: severity (high/medium/low) and summary (one sentence).",
|
|
17
|
+
skills: [],
|
|
18
|
+
output: {
|
|
19
|
+
type: "object",
|
|
20
|
+
properties: {
|
|
21
|
+
severity: { type: "string", enum: ["high", "medium", "low"] },
|
|
22
|
+
summary: { type: "string" },
|
|
23
|
+
},
|
|
24
|
+
required: ["severity", "summary"],
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
report: {
|
|
28
|
+
name: "Generate Report",
|
|
29
|
+
instruction: "Based on the analysis, write a brief incident report. Return JSON with fields: title (string) and recommendation (string).",
|
|
30
|
+
skills: [],
|
|
31
|
+
output: {
|
|
32
|
+
type: "object",
|
|
33
|
+
properties: {
|
|
34
|
+
title: { type: "string" },
|
|
35
|
+
recommendation: { type: "string" },
|
|
36
|
+
},
|
|
37
|
+
required: ["title", "recommendation"],
|
|
38
|
+
},
|
|
39
|
+
},
|
|
40
|
+
},
|
|
41
|
+
edges: [{ from: "analyze", to: "report" }],
|
|
42
|
+
};
|
|
43
|
+
describe.runIf(available.claude)("e2e workflow with real Claude", () => {
|
|
44
|
+
it("executes a 2-node analysis workflow", async () => {
|
|
45
|
+
const claude = new ClaudeClient({
|
|
46
|
+
model: "claude-haiku-4-5-20251001", // Use Haiku for speed/cost in tests
|
|
47
|
+
maxTurns: 3,
|
|
48
|
+
});
|
|
49
|
+
const events = [];
|
|
50
|
+
const results = await execute(analysisWorkflow, {
|
|
51
|
+
alert: "CPU usage at 98% for 15 minutes on production server web-03",
|
|
52
|
+
service: "api-gateway",
|
|
53
|
+
timestamp: "2025-03-25T08:00:00Z",
|
|
54
|
+
}, {
|
|
55
|
+
skills: createSkillMap([]),
|
|
56
|
+
claude,
|
|
57
|
+
observer: (e) => events.push(e),
|
|
58
|
+
config: {},
|
|
59
|
+
});
|
|
60
|
+
// Both nodes executed
|
|
61
|
+
expect(results.size).toBe(2);
|
|
62
|
+
expect(results.get("analyze")?.status).toBe("success");
|
|
63
|
+
expect(results.get("report")?.status).toBe("success");
|
|
64
|
+
// Analysis should have severity
|
|
65
|
+
const analysis = results.get("analyze").data;
|
|
66
|
+
expect(analysis).toBeDefined();
|
|
67
|
+
// Report should have title
|
|
68
|
+
const report = results.get("report").data;
|
|
69
|
+
expect(report).toBeDefined();
|
|
70
|
+
// Events stream should be complete
|
|
71
|
+
expect(events[0].type).toBe("workflow:start");
|
|
72
|
+
expect(events[events.length - 1].type).toBe("workflow:end");
|
|
73
|
+
expect(events.filter((e) => e.type === "node:enter")).toHaveLength(2);
|
|
74
|
+
}, 60_000); // 60s timeout for real API calls
|
|
75
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { available, logAvailability } from "./harness.js";
|
|
3
|
+
import { github } from "../../skills/github.js";
|
|
4
|
+
logAvailability();
|
|
5
|
+
const ctx = () => ({
|
|
6
|
+
config: { GITHUB_TOKEN: process.env.GITHUB_TOKEN },
|
|
7
|
+
logger: console,
|
|
8
|
+
});
|
|
9
|
+
describe.runIf(available.github)("github integration", () => {
|
|
10
|
+
it("searches code in a public repo", async () => {
|
|
11
|
+
const tool = github.tools.find((t) => t.name === "github_search_code");
|
|
12
|
+
const result = await tool.handler({ query: "README", repo: "octocat/Hello-World" }, ctx());
|
|
13
|
+
expect(result).toHaveProperty("items");
|
|
14
|
+
}, 15_000);
|
|
15
|
+
it("gets an issue from a public repo", async () => {
|
|
16
|
+
const tool = github.tools.find((t) => t.name === "github_get_issue");
|
|
17
|
+
const result = await tool.handler({ repo: "octocat/Hello-World", number: 1 }, ctx());
|
|
18
|
+
expect(result).toHaveProperty("title");
|
|
19
|
+
expect(result).toHaveProperty("state");
|
|
20
|
+
}, 15_000);
|
|
21
|
+
it("lists recent commits", async () => {
|
|
22
|
+
const tool = github.tools.find((t) => t.name === "github_list_recent_commits");
|
|
23
|
+
const result = await tool.handler({ repo: "octocat/Hello-World", branch: "master", per_page: 3 }, ctx());
|
|
24
|
+
expect(Array.isArray(result)).toBe(true);
|
|
25
|
+
expect(result.length).toBeGreaterThan(0);
|
|
26
|
+
}, 15_000);
|
|
27
|
+
it("searches issues", async () => {
|
|
28
|
+
const tool = github.tools.find((t) => t.name === "github_search_issues");
|
|
29
|
+
const result = await tool.handler({ query: "is:issue", repo: "octocat/Hello-World" }, ctx());
|
|
30
|
+
expect(result).toHaveProperty("items");
|
|
31
|
+
}, 15_000);
|
|
32
|
+
it("gets a file from a public repo", async () => {
|
|
33
|
+
const tool = github.tools.find((t) => t.name === "github_get_file");
|
|
34
|
+
const result = await tool.handler({ repo: "octocat/Hello-World", path: "README" }, ctx());
|
|
35
|
+
expect(result).toHaveProperty("content");
|
|
36
|
+
}, 15_000);
|
|
37
|
+
});
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Integration Test Harness
|
|
3
|
+
*
|
|
4
|
+
* Detects which external services are available via environment variables.
|
|
5
|
+
* Integration tests use `describe.runIf(available.xxx)` to skip cleanly
|
|
6
|
+
* when credentials aren't present.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* npx vitest run --config vitest.integration.config.ts
|
|
10
|
+
*
|
|
11
|
+
* Or with env vars:
|
|
12
|
+
* GITHUB_TOKEN=ghp_... npx vitest run --config vitest.integration.config.ts
|
|
13
|
+
*/
|
|
14
|
+
export declare const available: {
|
|
15
|
+
github: boolean;
|
|
16
|
+
linear: boolean;
|
|
17
|
+
sentry: boolean;
|
|
18
|
+
datadog: boolean;
|
|
19
|
+
slack: boolean;
|
|
20
|
+
discord: boolean;
|
|
21
|
+
claude: boolean;
|
|
22
|
+
};
|
|
23
|
+
/** Print which services are available (for CI logs) */
|
|
24
|
+
export declare function logAvailability(): void;
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Integration Test Harness
|
|
3
|
+
*
|
|
4
|
+
* Detects which external services are available via environment variables.
|
|
5
|
+
* Integration tests use `describe.runIf(available.xxx)` to skip cleanly
|
|
6
|
+
* when credentials aren't present.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* npx vitest run --config vitest.integration.config.ts
|
|
10
|
+
*
|
|
11
|
+
* Or with env vars:
|
|
12
|
+
* GITHUB_TOKEN=ghp_... npx vitest run --config vitest.integration.config.ts
|
|
13
|
+
*/
|
|
14
|
+
export const available = {
|
|
15
|
+
github: !!process.env.GITHUB_TOKEN,
|
|
16
|
+
linear: !!process.env.LINEAR_API_KEY,
|
|
17
|
+
sentry: !!(process.env.SENTRY_AUTH_TOKEN && process.env.SENTRY_ORG),
|
|
18
|
+
datadog: !!(process.env.DD_API_KEY && process.env.DD_APP_KEY),
|
|
19
|
+
slack: !!(process.env.SLACK_WEBHOOK_URL || process.env.SLACK_BOT_TOKEN),
|
|
20
|
+
discord: !!process.env.DISCORD_WEBHOOK_URL,
|
|
21
|
+
claude: !!process.env.ANTHROPIC_API_KEY,
|
|
22
|
+
};
|
|
23
|
+
/** Print which services are available (for CI logs) */
|
|
24
|
+
export function logAvailability() {
|
|
25
|
+
const entries = Object.entries(available);
|
|
26
|
+
const on = entries.filter(([, v]) => v).map(([k]) => k);
|
|
27
|
+
const off = entries.filter(([, v]) => !v).map(([k]) => k);
|
|
28
|
+
console.log(`\n🔌 Integration test services:`);
|
|
29
|
+
if (on.length > 0)
|
|
30
|
+
console.log(` ✅ Available: ${on.join(", ")}`);
|
|
31
|
+
if (off.length > 0)
|
|
32
|
+
console.log(` ⏭️ Skipped: ${off.join(", ")}`);
|
|
33
|
+
console.log();
|
|
34
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { available } from "./harness.js";
|
|
3
|
+
import { linear } from "../../skills/linear.js";
|
|
4
|
+
const ctx = () => ({
|
|
5
|
+
config: { LINEAR_API_KEY: process.env.LINEAR_API_KEY },
|
|
6
|
+
logger: console,
|
|
7
|
+
});
|
|
8
|
+
describe.runIf(available.linear)("linear integration", () => {
|
|
9
|
+
it("searches issues", async () => {
|
|
10
|
+
const tool = linear.tools.find((t) => t.name === "linear_search_issues");
|
|
11
|
+
const result = await tool.handler({ query: "bug", limit: 3 }, ctx());
|
|
12
|
+
expect(result).toHaveProperty("searchIssues");
|
|
13
|
+
expect(result.searchIssues).toHaveProperty("nodes");
|
|
14
|
+
}, 15_000);
|
|
15
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { available } from "./harness.js";
|
|
3
|
+
import { sentry } from "../../skills/sentry.js";
|
|
4
|
+
const ctx = () => ({
|
|
5
|
+
config: {
|
|
6
|
+
SENTRY_AUTH_TOKEN: process.env.SENTRY_AUTH_TOKEN,
|
|
7
|
+
SENTRY_ORG: process.env.SENTRY_ORG,
|
|
8
|
+
SENTRY_BASE_URL: process.env.SENTRY_BASE_URL ?? "",
|
|
9
|
+
},
|
|
10
|
+
logger: console,
|
|
11
|
+
});
|
|
12
|
+
describe.runIf(available.sentry)("sentry integration", () => {
|
|
13
|
+
// These tests require at least one project in the org
|
|
14
|
+
it("list_issues returns an array", async () => {
|
|
15
|
+
const tool = sentry.tools.find((t) => t.name === "sentry_list_issues");
|
|
16
|
+
// Use the first project found, or skip if none
|
|
17
|
+
const result = await tool.handler({ project: process.env.SENTRY_PROJECT ?? "javascript" }, ctx());
|
|
18
|
+
expect(Array.isArray(result)).toBe(true);
|
|
19
|
+
}, 15_000);
|
|
20
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { available } from "./harness.js";
|
|
3
|
+
import { slack } from "../../skills/slack.js";
|
|
4
|
+
const ctx = () => ({
|
|
5
|
+
config: {
|
|
6
|
+
SLACK_WEBHOOK_URL: process.env.SLACK_WEBHOOK_URL ?? "",
|
|
7
|
+
SLACK_BOT_TOKEN: process.env.SLACK_BOT_TOKEN ?? "",
|
|
8
|
+
},
|
|
9
|
+
logger: console,
|
|
10
|
+
});
|
|
11
|
+
// Only run Slack integration tests when explicitly opted in
|
|
12
|
+
// to avoid spamming channels during routine test runs
|
|
13
|
+
const slackEnabled = available.slack && process.env.SLACK_INTEGRATION_TEST === "1";
|
|
14
|
+
describe.runIf(slackEnabled)("slack integration", () => {
|
|
15
|
+
it("sends a test message via webhook", async () => {
|
|
16
|
+
if (!process.env.SLACK_WEBHOOK_URL)
|
|
17
|
+
return;
|
|
18
|
+
const tool = slack.tools.find((t) => t.name === "slack_send_message");
|
|
19
|
+
const result = await tool.handler({ text: `🧪 SWEny integration test — ${new Date().toISOString()}` }, ctx());
|
|
20
|
+
expect(result).toHaveProperty("ok");
|
|
21
|
+
}, 15_000);
|
|
22
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { workflowZ, nodeZ, edgeZ, skillZ, toolZ, parseWorkflow, validateWorkflow, workflowJsonSchema, } from "../schema.js";
|
|
3
|
+
import { triageWorkflow } from "../workflows/triage.js";
|
|
4
|
+
import { implementWorkflow } from "../workflows/implement.js";
|
|
5
|
+
// ─── Test fixtures ───────────────────────────────────────────────
|
|
6
|
+
const validWorkflow = {
|
|
7
|
+
id: "test-wf",
|
|
8
|
+
name: "Test Workflow",
|
|
9
|
+
description: "A→B→C",
|
|
10
|
+
entry: "a",
|
|
11
|
+
nodes: {
|
|
12
|
+
a: { name: "A", instruction: "Do A", skills: ["github"] },
|
|
13
|
+
b: { name: "B", instruction: "Do B", skills: [] },
|
|
14
|
+
c: { name: "C", instruction: "Do C", skills: [] },
|
|
15
|
+
},
|
|
16
|
+
edges: [
|
|
17
|
+
{ from: "a", to: "b" },
|
|
18
|
+
{ from: "b", to: "c" },
|
|
19
|
+
],
|
|
20
|
+
};
|
|
21
|
+
// ─── Zod schema tests ────────────────────────────────────────────
|
|
22
|
+
describe("Zod schemas", () => {
|
|
23
|
+
describe("nodeZ", () => {
|
|
24
|
+
it("parses a valid node", () => {
|
|
25
|
+
const result = nodeZ.parse({ name: "Step", instruction: "Do it", skills: ["github"] });
|
|
26
|
+
expect(result.name).toBe("Step");
|
|
27
|
+
expect(result.skills).toEqual(["github"]);
|
|
28
|
+
});
|
|
29
|
+
it("defaults skills to empty array", () => {
|
|
30
|
+
const result = nodeZ.parse({ name: "Step", instruction: "Do it" });
|
|
31
|
+
expect(result.skills).toEqual([]);
|
|
32
|
+
});
|
|
33
|
+
it("rejects empty name", () => {
|
|
34
|
+
expect(() => nodeZ.parse({ name: "", instruction: "x" })).toThrow();
|
|
35
|
+
});
|
|
36
|
+
it("rejects empty instruction", () => {
|
|
37
|
+
expect(() => nodeZ.parse({ name: "x", instruction: "" })).toThrow();
|
|
38
|
+
});
|
|
39
|
+
it("accepts optional output schema", () => {
|
|
40
|
+
const result = nodeZ.parse({
|
|
41
|
+
name: "S",
|
|
42
|
+
instruction: "I",
|
|
43
|
+
output: { type: "object", properties: { severity: { type: "string" } } },
|
|
44
|
+
});
|
|
45
|
+
expect(result.output).toBeDefined();
|
|
46
|
+
});
|
|
47
|
+
});
|
|
48
|
+
describe("edgeZ", () => {
|
|
49
|
+
it("parses a simple edge", () => {
|
|
50
|
+
const result = edgeZ.parse({ from: "a", to: "b" });
|
|
51
|
+
expect(result.from).toBe("a");
|
|
52
|
+
expect(result.when).toBeUndefined();
|
|
53
|
+
});
|
|
54
|
+
it("parses an edge with condition", () => {
|
|
55
|
+
const result = edgeZ.parse({ from: "a", to: "b", when: "severity is high" });
|
|
56
|
+
expect(result.when).toBe("severity is high");
|
|
57
|
+
});
|
|
58
|
+
it("rejects empty from", () => {
|
|
59
|
+
expect(() => edgeZ.parse({ from: "", to: "b" })).toThrow();
|
|
60
|
+
});
|
|
61
|
+
it("rejects empty to", () => {
|
|
62
|
+
expect(() => edgeZ.parse({ from: "a", to: "" })).toThrow();
|
|
63
|
+
});
|
|
64
|
+
});
|
|
65
|
+
describe("toolZ", () => {
|
|
66
|
+
it("parses a valid tool", () => {
|
|
67
|
+
const result = toolZ.parse({
|
|
68
|
+
name: "my_tool",
|
|
69
|
+
description: "Does things",
|
|
70
|
+
input_schema: { type: "object", properties: {} },
|
|
71
|
+
});
|
|
72
|
+
expect(result.name).toBe("my_tool");
|
|
73
|
+
});
|
|
74
|
+
it("rejects empty name", () => {
|
|
75
|
+
expect(() => toolZ.parse({ name: "", description: "x", input_schema: {} })).toThrow();
|
|
76
|
+
});
|
|
77
|
+
});
|
|
78
|
+
describe("skillZ", () => {
|
|
79
|
+
it("parses a valid skill", () => {
|
|
80
|
+
const result = skillZ.parse({
|
|
81
|
+
id: "test",
|
|
82
|
+
name: "Test",
|
|
83
|
+
description: "A test skill",
|
|
84
|
+
category: "general",
|
|
85
|
+
config: {
|
|
86
|
+
TOKEN: { description: "API token", required: true, env: "TOKEN" },
|
|
87
|
+
},
|
|
88
|
+
tools: [{ name: "t", description: "d", input_schema: {} }],
|
|
89
|
+
});
|
|
90
|
+
expect(result.id).toBe("test");
|
|
91
|
+
expect(result.tools).toHaveLength(1);
|
|
92
|
+
});
|
|
93
|
+
});
|
|
94
|
+
describe("workflowZ", () => {
|
|
95
|
+
it("parses a valid workflow", () => {
|
|
96
|
+
const raw = JSON.parse(JSON.stringify(validWorkflow));
|
|
97
|
+
const result = workflowZ.parse(raw);
|
|
98
|
+
expect(result.id).toBe("test-wf");
|
|
99
|
+
expect(Object.keys(result.nodes)).toHaveLength(3);
|
|
100
|
+
});
|
|
101
|
+
it("defaults description to empty string", () => {
|
|
102
|
+
const raw = {
|
|
103
|
+
id: "x",
|
|
104
|
+
name: "X",
|
|
105
|
+
entry: "a",
|
|
106
|
+
nodes: { a: { name: "A", instruction: "Do A" } },
|
|
107
|
+
edges: [],
|
|
108
|
+
};
|
|
109
|
+
const result = workflowZ.parse(raw);
|
|
110
|
+
expect(result.description).toBe("");
|
|
111
|
+
});
|
|
112
|
+
it("rejects missing id", () => {
|
|
113
|
+
expect(() => workflowZ.parse({ name: "x", entry: "a", nodes: {}, edges: [] })).toThrow();
|
|
114
|
+
});
|
|
115
|
+
it("rejects missing entry", () => {
|
|
116
|
+
expect(() => workflowZ.parse({ id: "x", name: "x", nodes: {}, edges: [] })).toThrow();
|
|
117
|
+
});
|
|
118
|
+
it("rejects empty id", () => {
|
|
119
|
+
expect(() => workflowZ.parse({ id: "", name: "x", entry: "a", nodes: {}, edges: [] })).toThrow();
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
describe("parseWorkflow", () => {
|
|
123
|
+
it("parses a raw JSON workflow", () => {
|
|
124
|
+
const raw = JSON.parse(JSON.stringify(validWorkflow));
|
|
125
|
+
const parsed = parseWorkflow(raw);
|
|
126
|
+
expect(parsed.id).toBe("test-wf");
|
|
127
|
+
expect(parsed.nodes["a"].instruction).toBe("Do A");
|
|
128
|
+
});
|
|
129
|
+
it("rejects invalid input", () => {
|
|
130
|
+
expect(() => parseWorkflow({ id: "" })).toThrow();
|
|
131
|
+
expect(() => parseWorkflow(null)).toThrow();
|
|
132
|
+
expect(() => parseWorkflow("not an object")).toThrow();
|
|
133
|
+
});
|
|
134
|
+
});
|
|
135
|
+
});
|
|
136
|
+
// ─── Structural validation tests ─────────────────────────────────
|
|
137
|
+
describe("validateWorkflow", () => {
|
|
138
|
+
it("validates a correct workflow", () => {
|
|
139
|
+
expect(validateWorkflow(validWorkflow)).toEqual([]);
|
|
140
|
+
});
|
|
141
|
+
it("detects missing entry node", () => {
|
|
142
|
+
const bad = { ...validWorkflow, entry: "nonexistent" };
|
|
143
|
+
const errors = validateWorkflow(bad);
|
|
144
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "MISSING_ENTRY" }));
|
|
145
|
+
});
|
|
146
|
+
it("detects unknown edge source", () => {
|
|
147
|
+
const bad = {
|
|
148
|
+
...validWorkflow,
|
|
149
|
+
edges: [{ from: "ghost", to: "b" }],
|
|
150
|
+
};
|
|
151
|
+
const errors = validateWorkflow(bad);
|
|
152
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNKNOWN_EDGE_SOURCE" }));
|
|
153
|
+
});
|
|
154
|
+
it("detects unknown edge target", () => {
|
|
155
|
+
const bad = {
|
|
156
|
+
...validWorkflow,
|
|
157
|
+
edges: [{ from: "a", to: "ghost" }],
|
|
158
|
+
};
|
|
159
|
+
const errors = validateWorkflow(bad);
|
|
160
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNKNOWN_EDGE_TARGET" }));
|
|
161
|
+
});
|
|
162
|
+
it("detects self-loops", () => {
|
|
163
|
+
const loopy = {
|
|
164
|
+
...validWorkflow,
|
|
165
|
+
edges: [
|
|
166
|
+
{ from: "a", to: "a" },
|
|
167
|
+
{ from: "a", to: "b" },
|
|
168
|
+
{ from: "b", to: "c" },
|
|
169
|
+
],
|
|
170
|
+
};
|
|
171
|
+
const errors = validateWorkflow(loopy);
|
|
172
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "SELF_LOOP", nodeId: "a" }));
|
|
173
|
+
});
|
|
174
|
+
it("detects unreachable nodes", () => {
|
|
175
|
+
const disconnected = {
|
|
176
|
+
...validWorkflow,
|
|
177
|
+
edges: [{ from: "a", to: "b" }],
|
|
178
|
+
// c has no incoming edge from the reachable set
|
|
179
|
+
};
|
|
180
|
+
const errors = validateWorkflow(disconnected);
|
|
181
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNREACHABLE_NODE", nodeId: "c" }));
|
|
182
|
+
});
|
|
183
|
+
it("detects unknown skill references", () => {
|
|
184
|
+
const errors = validateWorkflow(validWorkflow, new Set(["not-github"]));
|
|
185
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "UNKNOWN_SKILL", nodeId: "a" }));
|
|
186
|
+
});
|
|
187
|
+
it("passes when known skills match", () => {
|
|
188
|
+
const errors = validateWorkflow(validWorkflow, new Set(["github"]));
|
|
189
|
+
expect(errors).toEqual([]);
|
|
190
|
+
});
|
|
191
|
+
it("validates triage workflow", () => {
|
|
192
|
+
expect(validateWorkflow(triageWorkflow)).toEqual([]);
|
|
193
|
+
});
|
|
194
|
+
it("validates implement workflow", () => {
|
|
195
|
+
expect(validateWorkflow(implementWorkflow)).toEqual([]);
|
|
196
|
+
});
|
|
197
|
+
it("handles empty nodes object", () => {
|
|
198
|
+
const empty = {
|
|
199
|
+
id: "empty",
|
|
200
|
+
name: "Empty",
|
|
201
|
+
description: "",
|
|
202
|
+
entry: "x",
|
|
203
|
+
nodes: {},
|
|
204
|
+
edges: [],
|
|
205
|
+
};
|
|
206
|
+
const errors = validateWorkflow(empty);
|
|
207
|
+
expect(errors).toContainEqual(expect.objectContaining({ code: "MISSING_ENTRY" }));
|
|
208
|
+
});
|
|
209
|
+
});
|
|
210
|
+
// ─── JSON Schema shape tests ─────────────────────────────────────
|
|
211
|
+
describe("workflowJsonSchema", () => {
|
|
212
|
+
it("has the correct $id", () => {
|
|
213
|
+
expect(workflowJsonSchema.$id).toBe("https://sweny.ai/schemas/workflow.json");
|
|
214
|
+
});
|
|
215
|
+
it("requires all top-level fields", () => {
|
|
216
|
+
expect(workflowJsonSchema.required).toContain("id");
|
|
217
|
+
expect(workflowJsonSchema.required).toContain("name");
|
|
218
|
+
expect(workflowJsonSchema.required).toContain("nodes");
|
|
219
|
+
expect(workflowJsonSchema.required).toContain("edges");
|
|
220
|
+
expect(workflowJsonSchema.required).toContain("entry");
|
|
221
|
+
});
|
|
222
|
+
it("has additionalProperties: false at top level", () => {
|
|
223
|
+
expect(workflowJsonSchema.additionalProperties).toBe(false);
|
|
224
|
+
});
|
|
225
|
+
it("has additionalProperties: false on edge items", () => {
|
|
226
|
+
expect(workflowJsonSchema.properties.edges.items.additionalProperties).toBe(false);
|
|
227
|
+
});
|
|
228
|
+
it("has additionalProperties: false on node items", () => {
|
|
229
|
+
expect(workflowJsonSchema.properties.nodes.additionalProperties.additionalProperties).toBe(false);
|
|
230
|
+
});
|
|
231
|
+
it("edge items require from and to", () => {
|
|
232
|
+
expect(workflowJsonSchema.properties.edges.items.required).toContain("from");
|
|
233
|
+
expect(workflowJsonSchema.properties.edges.items.required).toContain("to");
|
|
234
|
+
});
|
|
235
|
+
it("node items require name and instruction", () => {
|
|
236
|
+
expect(workflowJsonSchema.properties.nodes.additionalProperties.required).toContain("name");
|
|
237
|
+
expect(workflowJsonSchema.properties.nodes.additionalProperties.required).toContain("instruction");
|
|
238
|
+
});
|
|
239
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import { describe, it, expect } from "vitest";
|
|
2
|
+
import { github, linear, slack, sentry, datadog, betterstack, notification, builtinSkills, createSkillMap, allSkills, isSkillConfigured, validateWorkflowSkills, } from "../skills/index.js";
|
|
3
|
+
describe("skills registry", () => {
|
|
4
|
+
it("exports all builtin skills", () => {
|
|
5
|
+
expect(builtinSkills.length).toBeGreaterThanOrEqual(7);
|
|
6
|
+
const ids = builtinSkills.map((s) => s.id);
|
|
7
|
+
expect(ids).toContain("github");
|
|
8
|
+
expect(ids).toContain("linear");
|
|
9
|
+
expect(ids).toContain("slack");
|
|
10
|
+
expect(ids).toContain("sentry");
|
|
11
|
+
expect(ids).toContain("datadog");
|
|
12
|
+
expect(ids).toContain("betterstack");
|
|
13
|
+
expect(ids).toContain("notification");
|
|
14
|
+
});
|
|
15
|
+
it("individual skill exports match builtins", () => {
|
|
16
|
+
expect(builtinSkills).toContain(github);
|
|
17
|
+
expect(builtinSkills).toContain(linear);
|
|
18
|
+
expect(builtinSkills).toContain(slack);
|
|
19
|
+
expect(builtinSkills).toContain(sentry);
|
|
20
|
+
expect(builtinSkills).toContain(datadog);
|
|
21
|
+
expect(builtinSkills).toContain(betterstack);
|
|
22
|
+
expect(builtinSkills).toContain(notification);
|
|
23
|
+
});
|
|
24
|
+
it("createSkillMap builds a correct map", () => {
|
|
25
|
+
const map = createSkillMap([github, slack]);
|
|
26
|
+
expect(map.size).toBe(2);
|
|
27
|
+
expect(map.get("github")).toBe(github);
|
|
28
|
+
expect(map.get("slack")).toBe(slack);
|
|
29
|
+
});
|
|
30
|
+
it("createSkillMap throws on duplicate IDs", () => {
|
|
31
|
+
expect(() => createSkillMap([github, github])).toThrow("Duplicate skill ID");
|
|
32
|
+
});
|
|
33
|
+
it("allSkills returns all builtin skills as a map", () => {
|
|
34
|
+
const map = allSkills();
|
|
35
|
+
expect(map.size).toBe(builtinSkills.length);
|
|
36
|
+
for (const skill of builtinSkills) {
|
|
37
|
+
expect(map.has(skill.id)).toBe(true);
|
|
38
|
+
}
|
|
39
|
+
});
|
|
40
|
+
it("no tool name collisions across skills", () => {
|
|
41
|
+
const seen = new Set();
|
|
42
|
+
for (const skill of builtinSkills) {
|
|
43
|
+
for (const tool of skill.tools) {
|
|
44
|
+
expect(seen.has(tool.name)).toBe(false);
|
|
45
|
+
seen.add(tool.name);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
});
|
|
49
|
+
it("all tools have name, description, and input_schema", () => {
|
|
50
|
+
for (const skill of builtinSkills) {
|
|
51
|
+
for (const tool of skill.tools) {
|
|
52
|
+
expect(tool.name).toBeTruthy();
|
|
53
|
+
expect(tool.description).toBeTruthy();
|
|
54
|
+
expect(tool.input_schema).toBeDefined();
|
|
55
|
+
expect(typeof tool.handler).toBe("function");
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
});
|
|
59
|
+
it("all skills have valid metadata and category", () => {
|
|
60
|
+
const validCategories = ["git", "observability", "tasks", "notification", "general"];
|
|
61
|
+
for (const skill of builtinSkills) {
|
|
62
|
+
expect(skill.id).toBeTruthy();
|
|
63
|
+
expect(skill.name).toBeTruthy();
|
|
64
|
+
expect(skill.description).toBeTruthy();
|
|
65
|
+
expect(skill.config).toBeDefined();
|
|
66
|
+
expect(Array.isArray(skill.tools)).toBe(true);
|
|
67
|
+
expect(validCategories).toContain(skill.category);
|
|
68
|
+
}
|
|
69
|
+
});
|
|
70
|
+
it("isSkillConfigured checks env vars", () => {
|
|
71
|
+
expect(isSkillConfigured(github, { GITHUB_TOKEN: "ghp_xxx" })).toBe(true);
|
|
72
|
+
expect(isSkillConfigured(github, {})).toBe(false);
|
|
73
|
+
// Slack has all-optional config — needs at least one to be usable
|
|
74
|
+
expect(isSkillConfigured(slack, {})).toBe(false);
|
|
75
|
+
expect(isSkillConfigured(slack, { SLACK_WEBHOOK_URL: "https://hooks.slack.com/..." })).toBe(true);
|
|
76
|
+
expect(isSkillConfigured(slack, { SLACK_BOT_TOKEN: "xoxb-..." })).toBe(true);
|
|
77
|
+
});
|
|
78
|
+
it("validateWorkflowSkills detects missing providers", () => {
|
|
79
|
+
const workflow = {
|
|
80
|
+
nodes: {
|
|
81
|
+
gather: { skills: ["github", "sentry", "betterstack"] },
|
|
82
|
+
notify: { skills: ["slack"] },
|
|
83
|
+
},
|
|
84
|
+
};
|
|
85
|
+
// Only github is available
|
|
86
|
+
const available = createSkillMap([github]);
|
|
87
|
+
const result = validateWorkflowSkills(workflow, available);
|
|
88
|
+
expect(result.configured).toHaveLength(1);
|
|
89
|
+
expect(result.configured[0].id).toBe("github");
|
|
90
|
+
expect(result.missing.some((m) => m.id === "sentry")).toBe(true);
|
|
91
|
+
expect(result.missing.some((m) => m.id === "betterstack")).toBe(true);
|
|
92
|
+
// gather node has github (git) but no observability → error
|
|
93
|
+
expect(result.errors.length).toBeGreaterThan(0);
|
|
94
|
+
// notify node has no notification → warning (not error)
|
|
95
|
+
expect(result.warnings.length).toBeGreaterThan(0);
|
|
96
|
+
});
|
|
97
|
+
it("validateWorkflowSkills passes when all categories covered", () => {
|
|
98
|
+
const workflow = {
|
|
99
|
+
nodes: {
|
|
100
|
+
gather: { skills: ["github", "sentry"] },
|
|
101
|
+
},
|
|
102
|
+
};
|
|
103
|
+
const available = createSkillMap([github, sentry]);
|
|
104
|
+
const result = validateWorkflowSkills(workflow, available);
|
|
105
|
+
expect(result.errors).toHaveLength(0);
|
|
106
|
+
expect(result.warnings).toHaveLength(0);
|
|
107
|
+
expect(result.configured).toHaveLength(2);
|
|
108
|
+
});
|
|
109
|
+
it("config fields have env vars matching canonical names", () => {
|
|
110
|
+
// Datadog should use DD_* prefix
|
|
111
|
+
for (const [key, field] of Object.entries(datadog.config)) {
|
|
112
|
+
if (field.env) {
|
|
113
|
+
expect(field.env).toMatch(/^DD_/);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
// Sentry base URL should be SENTRY_BASE_URL not SENTRY_URL
|
|
117
|
+
const sentryConfig = Object.values(sentry.config).find((f) => f.env?.includes("SENTRY") && f.env?.includes("URL"));
|
|
118
|
+
if (sentryConfig) {
|
|
119
|
+
expect(sentryConfig.env).toBe("SENTRY_BASE_URL");
|
|
120
|
+
}
|
|
121
|
+
});
|
|
122
|
+
});
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|