@usezombie/zombiectl 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +76 -0
- package/bin/zombiectl.js +11 -0
- package/bun.lock +29 -0
- package/package.json +28 -0
- package/scripts/run-tests.mjs +38 -0
- package/src/cli.js +275 -0
- package/src/commands/admin.js +39 -0
- package/src/commands/agent.js +98 -0
- package/src/commands/agent_harness.js +43 -0
- package/src/commands/agent_improvement_report.js +42 -0
- package/src/commands/agent_profile.js +39 -0
- package/src/commands/agent_proposals.js +158 -0
- package/src/commands/agent_scores.js +44 -0
- package/src/commands/core-ops.js +108 -0
- package/src/commands/core.js +537 -0
- package/src/commands/harness.js +35 -0
- package/src/commands/harness_activate.js +53 -0
- package/src/commands/harness_active.js +32 -0
- package/src/commands/harness_compile.js +40 -0
- package/src/commands/harness_source.js +72 -0
- package/src/commands/run_preview.js +212 -0
- package/src/commands/run_preview_walk.js +1 -0
- package/src/commands/runs.js +35 -0
- package/src/commands/spec_init.js +287 -0
- package/src/commands/workspace_billing.js +26 -0
- package/src/constants/error-codes.js +1 -0
- package/src/lib/agent-loop.js +106 -0
- package/src/lib/analytics.js +114 -0
- package/src/lib/api-paths.js +2 -0
- package/src/lib/browser.js +96 -0
- package/src/lib/http.js +149 -0
- package/src/lib/sse-parser.js +50 -0
- package/src/lib/state.js +67 -0
- package/src/lib/tool-executors.js +110 -0
- package/src/lib/walk-dir.js +41 -0
- package/src/program/args.js +95 -0
- package/src/program/auth-guard.js +12 -0
- package/src/program/auth-token.js +44 -0
- package/src/program/banner.js +46 -0
- package/src/program/command-registry.js +17 -0
- package/src/program/http-client.js +38 -0
- package/src/program/io.js +83 -0
- package/src/program/routes.js +20 -0
- package/src/program/suggest.js +76 -0
- package/src/program/validate.js +24 -0
- package/src/ui-progress.js +59 -0
- package/src/ui-theme.js +62 -0
- package/test/admin_config.unit.test.js +25 -0
- package/test/agent-loop.unit.test.js +497 -0
- package/test/agent_harness.unit.test.js +52 -0
- package/test/agent_improvement_report.unit.test.js +74 -0
- package/test/agent_profile.unit.test.js +156 -0
- package/test/agent_proposals.unit.test.js +167 -0
- package/test/agent_scores.unit.test.js +220 -0
- package/test/analytics.unit.test.js +41 -0
- package/test/args.unit.test.js +69 -0
- package/test/auth-guard.test.js +33 -0
- package/test/auth-token.unit.test.js +112 -0
- package/test/banner.unit.test.js +442 -0
- package/test/browser.unit.test.js +16 -0
- package/test/cli-analytics.unit.test.js +296 -0
- package/test/did-you-mean.integration.test.js +76 -0
- package/test/doctor-json.test.js +81 -0
- package/test/error-codes.unit.test.js +7 -0
- package/test/harness-command.unit.test.js +180 -0
- package/test/harness-compile.test.js +81 -0
- package/test/harness-lifecycle.integration.test.js +339 -0
- package/test/harness-source-put.test.js +72 -0
- package/test/harness_activate.unit.test.js +48 -0
- package/test/harness_active.unit.test.js +53 -0
- package/test/harness_compile.unit.test.js +54 -0
- package/test/harness_source.unit.test.js +59 -0
- package/test/help.test.js +276 -0
- package/test/helpers-fs.js +32 -0
- package/test/helpers.js +31 -0
- package/test/io.unit.test.js +57 -0
- package/test/login.unit.test.js +115 -0
- package/test/logout.unit.test.js +65 -0
- package/test/parse.test.js +16 -0
- package/test/run-preview.edge.test.js +422 -0
- package/test/run-preview.integration.test.js +135 -0
- package/test/run-preview.security.test.js +246 -0
- package/test/run-preview.unit.test.js +131 -0
- package/test/run.unit.test.js +149 -0
- package/test/runs-cancel.unit.test.js +288 -0
- package/test/runs-list.unit.test.js +105 -0
- package/test/skill-secret.unit.test.js +94 -0
- package/test/spec-init.edge.test.js +232 -0
- package/test/spec-init.integration.test.js +128 -0
- package/test/spec-init.security.test.js +285 -0
- package/test/spec-init.unit.test.js +160 -0
- package/test/specs-sync.unit.test.js +164 -0
- package/test/sse-parser.unit.test.js +54 -0
- package/test/state.unit.test.js +34 -0
- package/test/streamfetch.unit.test.js +211 -0
- package/test/suggest.test.js +75 -0
- package/test/tool-executors.unit.test.js +165 -0
- package/test/validate.test.js +81 -0
- package/test/workspace-add.test.js +106 -0
- package/test/workspace.unit.test.js +230 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import fs from "node:fs/promises";
|
|
2
|
+
import path from "node:path";
|
|
3
|
+
import { queueCliAnalyticsEvent, setCliAnalyticsContext } from "../lib/analytics.js";
|
|
4
|
+
|
|
5
|
+
export async function commandHarnessSourcePut(ctx, parsed, workspaceId, deps) {
|
|
6
|
+
const {
|
|
7
|
+
request,
|
|
8
|
+
apiHeaders,
|
|
9
|
+
ui,
|
|
10
|
+
printJson,
|
|
11
|
+
printKeyValue = () => {},
|
|
12
|
+
printSection = () => {},
|
|
13
|
+
writeLine,
|
|
14
|
+
readFile = fs.readFile,
|
|
15
|
+
resolvePath = path.resolve,
|
|
16
|
+
} = deps;
|
|
17
|
+
|
|
18
|
+
const file = parsed.options.file;
|
|
19
|
+
if (!file) {
|
|
20
|
+
writeLine(ctx.stderr, ui.err("harness source put requires --file"));
|
|
21
|
+
return 2;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const fileContent = await readFile(resolvePath(file), "utf8");
|
|
25
|
+
|
|
26
|
+
const MAX_SIZE = 2 * 1024 * 1024;
|
|
27
|
+
const sizeBytes = Buffer.byteLength(fileContent, "utf8");
|
|
28
|
+
if (sizeBytes > MAX_SIZE) {
|
|
29
|
+
writeLine(ctx.stderr, ui.err(`file too large: ${sizeBytes} bytes (max 2MB)`));
|
|
30
|
+
return 2;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if (!ctx.jsonMode) {
|
|
34
|
+
writeLine(ctx.stdout, ui.info(`uploading ${path.basename(String(file))} (${sizeBytes} bytes)`));
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
const inferredName = path.basename(String(file), path.extname(String(file)));
|
|
38
|
+
const body = {
|
|
39
|
+
agent_id: parsed.options["agent-id"] || null,
|
|
40
|
+
name: parsed.options.name || inferredName || "Workspace Harness",
|
|
41
|
+
source_markdown: fileContent,
|
|
42
|
+
};
|
|
43
|
+
|
|
44
|
+
const res = await request(ctx, `/v1/workspaces/${encodeURIComponent(workspaceId)}/harness/source`, {
|
|
45
|
+
method: "PUT",
|
|
46
|
+
headers: apiHeaders(ctx),
|
|
47
|
+
body: JSON.stringify(body),
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
setCliAnalyticsContext(ctx, {
|
|
51
|
+
workspace_id: workspaceId,
|
|
52
|
+
agent_id: body.agent_id,
|
|
53
|
+
harness_name: body.name,
|
|
54
|
+
harness_config_version_id: res.config_version_id,
|
|
55
|
+
harness_source_bytes: sizeBytes,
|
|
56
|
+
});
|
|
57
|
+
queueCliAnalyticsEvent(ctx, "harness_source_uploaded", {
|
|
58
|
+
workspace_id: workspaceId,
|
|
59
|
+
harness_config_version_id: res.config_version_id,
|
|
60
|
+
});
|
|
61
|
+
if (ctx.jsonMode) printJson(ctx.stdout, res);
|
|
62
|
+
else {
|
|
63
|
+
printSection(ctx.stdout, "Harness source stored");
|
|
64
|
+
printKeyValue(ctx.stdout, {
|
|
65
|
+
workspace_id: workspaceId,
|
|
66
|
+
config_version_id: res.config_version_id,
|
|
67
|
+
name: body.name,
|
|
68
|
+
size_bytes: sizeBytes,
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
return 0;
|
|
72
|
+
}
|
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
import { readFileSync, existsSync } from "node:fs";
|
|
2
|
+
import { resolve, relative } from "node:path";
|
|
3
|
+
import { agentLoop } from "../lib/agent-loop.js";
|
|
4
|
+
|
|
5
|
+
// Confidence display config: icon (TTY) + bracket label (no-TTY) + ANSI code
|
|
6
|
+
const CONF_DISPLAY = {
|
|
7
|
+
high: { ansi: "32", icon: "\u{25CF}", label: "[HIGH]" }, // green
|
|
8
|
+
medium: { ansi: "33", icon: "\u{25C6}", label: "[MED] " }, // yellow
|
|
9
|
+
low: { ansi: "2", icon: "\u{25CB}", label: "[LOW] " }, // dim
|
|
10
|
+
};
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Returns the confidence indicator string appropriate for the output stream.
|
|
14
|
+
* TTY -> colored Unicode icon. Non-TTY / NO_COLOR -> plain text label.
|
|
15
|
+
*/
|
|
16
|
+
export function confIndicator(confidence, stream) {
|
|
17
|
+
const d = CONF_DISPLAY[confidence] ?? CONF_DISPLAY.low;
|
|
18
|
+
const noColor = Boolean(process.env.NO_COLOR) || !stream?.isTTY;
|
|
19
|
+
if (noColor) return d.label;
|
|
20
|
+
return `\u001b[${d.ansi}m${d.icon}\u001b[0m`;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Strip ANSI escape sequences from a string.
|
|
25
|
+
*/
|
|
26
|
+
export function sanitizeDisplay(str) {
|
|
27
|
+
// eslint-disable-next-line no-control-regex
|
|
28
|
+
return str.replace(/\x1b\[[0-9;]*m/g, "").replace(/[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]/g, "");
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Print the predicted impact list.
|
|
33
|
+
*/
|
|
34
|
+
const PREVIEW_TITLE = "Predicted file impact";
|
|
35
|
+
|
|
36
|
+
export function printPreview(stdout, matches, { writeLine, ui }) {
|
|
37
|
+
if (matches.length === 0) {
|
|
38
|
+
writeLine(stdout, ui.info("no file references detected in spec"));
|
|
39
|
+
return;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
writeLine(stdout, ui.head(PREVIEW_TITLE));
|
|
43
|
+
writeLine(stdout, ui.dim("\u2500".repeat(PREVIEW_TITLE.length)));
|
|
44
|
+
writeLine(stdout);
|
|
45
|
+
|
|
46
|
+
for (const { file, confidence } of matches) {
|
|
47
|
+
const indicator = confIndicator(confidence, stdout);
|
|
48
|
+
writeLine(stdout, ` ${indicator} ${sanitizeDisplay(file)}`);
|
|
49
|
+
}
|
|
50
|
+
writeLine(stdout);
|
|
51
|
+
writeLine(stdout, ui.dim(` ${matches.length} file(s) in blast radius`));
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Parse agent text output into structured matches.
|
|
56
|
+
* Expected format: "● src/file.go high — reason" or similar.
|
|
57
|
+
*/
|
|
58
|
+
function parseAgentMatches(text) {
|
|
59
|
+
const matches = [];
|
|
60
|
+
const lineRe = /^[●◆○\s]*(\S+)\s+(high|medium|low)/gm;
|
|
61
|
+
let m;
|
|
62
|
+
while ((m = lineRe.exec(text)) !== null) {
|
|
63
|
+
matches.push({ file: m[1], confidence: m[2] });
|
|
64
|
+
}
|
|
65
|
+
return matches;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Run preview using agent relay: agent reads spec + explores repo.
|
|
70
|
+
* Returns { matches } or null on error.
|
|
71
|
+
*/
|
|
72
|
+
export async function runPreview(specFile, repoPath, ctx, deps) {
|
|
73
|
+
const { writeLine, ui } = deps;
|
|
74
|
+
|
|
75
|
+
if (!existsSync(specFile)) {
|
|
76
|
+
writeLine(ctx.stderr, ui.err(`spec file not found: ${specFile}`));
|
|
77
|
+
return null;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
let markdown;
|
|
81
|
+
try {
|
|
82
|
+
markdown = readFileSync(specFile, "utf8");
|
|
83
|
+
} catch (err) {
|
|
84
|
+
writeLine(ctx.stderr, ui.err(`failed to read spec file: ${err.message}`));
|
|
85
|
+
return null;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
const absRepoPath = resolve(repoPath);
|
|
89
|
+
|
|
90
|
+
// If workspace is available, use agent-backed preview
|
|
91
|
+
if (ctx.workspaceId) {
|
|
92
|
+
return agentPreview(markdown, absRepoPath, ctx, deps);
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
// Fallback: local heuristic preview (legacy, kept for offline use)
|
|
96
|
+
return localPreview(markdown, absRepoPath, ctx, deps);
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
async function agentPreview(markdown, repoPath, ctx, deps) {
|
|
100
|
+
const { writeLine, ui } = deps;
|
|
101
|
+
const endpoint = `/v1/workspaces/${ctx.workspaceId}/spec/preview`;
|
|
102
|
+
|
|
103
|
+
if (!ctx.jsonMode) {
|
|
104
|
+
writeLine(ctx.stdout, "");
|
|
105
|
+
writeLine(ctx.stdout, ui.dim(" \u{1F9DF} analyzing your repo against spec..."));
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
const result = await agentLoop(
|
|
109
|
+
endpoint,
|
|
110
|
+
`Which files will this spec touch?\n\n${markdown}`,
|
|
111
|
+
repoPath,
|
|
112
|
+
ctx,
|
|
113
|
+
{
|
|
114
|
+
onToolCall: (tc) => {
|
|
115
|
+
if (!ctx.jsonMode) {
|
|
116
|
+
const label = tc.name === "read_file" ? `read ${tc.input.path}`
|
|
117
|
+
: tc.name === "list_dir" ? `listed ${tc.input.path || "./"}`
|
|
118
|
+
: `glob ${tc.input.pattern}`;
|
|
119
|
+
writeLine(ctx.stdout, ui.dim(` \u{2192} ${label}`));
|
|
120
|
+
}
|
|
121
|
+
},
|
|
122
|
+
onError: (msg) => {
|
|
123
|
+
writeLine(ctx.stderr, ui.err(msg));
|
|
124
|
+
},
|
|
125
|
+
},
|
|
126
|
+
);
|
|
127
|
+
|
|
128
|
+
if (!result.text) {
|
|
129
|
+
writeLine(ctx.stderr, ui.err("agent returned no content"));
|
|
130
|
+
return null;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
const matches = parseAgentMatches(result.text);
|
|
134
|
+
if (!ctx.jsonMode) {
|
|
135
|
+
writeLine(ctx.stdout, "");
|
|
136
|
+
printPreview(ctx.stdout, matches, deps);
|
|
137
|
+
const secs = (result.wallMs / 1000).toFixed(1);
|
|
138
|
+
const tokens = result.usage?.total_tokens ?? "?";
|
|
139
|
+
writeLine(ctx.stdout, ui.dim(` ${secs}s | ${result.toolCalls} reads | ${tokens} tokens`));
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
return { matches };
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* Local heuristic preview — kept for offline/no-auth scenarios.
|
|
147
|
+
* Uses regex to extract file refs from spec and match against repo tree.
|
|
148
|
+
*/
|
|
149
|
+
async function localPreview(markdown, repoPath, ctx, deps) {
|
|
150
|
+
const { walkDirForPreview } = await import("./run_preview_walk.js");
|
|
151
|
+
|
|
152
|
+
const refs = extractSpecRefs(markdown);
|
|
153
|
+
const repoFiles = walkDirForPreview(repoPath);
|
|
154
|
+
const relFiles = repoFiles.map((f) => relative(repoPath, f).replace(/\\/g, "/"));
|
|
155
|
+
const matches = matchRefsToFiles(refs, relFiles);
|
|
156
|
+
printPreview(ctx.stdout, matches, deps);
|
|
157
|
+
return { matches };
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
// ── Legacy heuristic functions (used by localPreview fallback) ──────────────
|
|
161
|
+
|
|
162
|
+
export function extractSpecRefs(markdown) {
|
|
163
|
+
const refs = new Set();
|
|
164
|
+
const quotedRe = /["`']([a-zA-Z0-9_./-]{3,}[/.][a-zA-Z0-9_/-]+)["`']/g;
|
|
165
|
+
for (const m of markdown.matchAll(quotedRe)) refs.add(m[1]);
|
|
166
|
+
const prefixRe = /\b((?:src|tests?|lib|pkg|cmd|internal|docs|app|web|api|workers?|scripts?)\/[a-zA-Z0-9_./-]+)/g;
|
|
167
|
+
for (const m of markdown.matchAll(prefixRe)) refs.add(m[1]);
|
|
168
|
+
const fileRe = /\b([a-zA-Z0-9_-]+\.(?:go|rs|ts|tsx|js|mjs|py|zig|rb|java|kt|c|cpp|cs|swift|ex|exs|sh|yaml|yml|toml|json|md))\b/g;
|
|
169
|
+
for (const m of markdown.matchAll(fileRe)) refs.add(m[1]);
|
|
170
|
+
return [...refs];
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
function scoreMatch(filePath, ref) {
|
|
174
|
+
const fp = filePath.replace(/\\/g, "/");
|
|
175
|
+
const r = ref.replace(/\\/g, "/");
|
|
176
|
+
if (fp.endsWith(r) || fp === r) return "high";
|
|
177
|
+
const refParts = r.split("/").filter(Boolean);
|
|
178
|
+
const fileParts = fp.split("/").filter(Boolean);
|
|
179
|
+
if (refParts.length >= 2) {
|
|
180
|
+
const refStr = refParts.join("/");
|
|
181
|
+
if (fp.includes(refStr)) return "medium";
|
|
182
|
+
}
|
|
183
|
+
if (refParts.length === 1) {
|
|
184
|
+
const name = fileParts[fileParts.length - 1];
|
|
185
|
+
if (name === refParts[0]) return "medium";
|
|
186
|
+
if (name.startsWith(refParts[0]) || fp.includes(`/${refParts[0]}/`)) return "low";
|
|
187
|
+
}
|
|
188
|
+
if (fp.includes(r)) return "low";
|
|
189
|
+
return null;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
const CONFIDENCE_ORDER = { high: 0, medium: 1, low: 2 };
|
|
193
|
+
|
|
194
|
+
export function matchRefsToFiles(refs, repoFiles) {
|
|
195
|
+
const best = new Map();
|
|
196
|
+
for (const ref of refs) {
|
|
197
|
+
for (const file of repoFiles) {
|
|
198
|
+
const conf = scoreMatch(file, ref);
|
|
199
|
+
if (!conf) continue;
|
|
200
|
+
const existing = best.get(file);
|
|
201
|
+
if (!existing || CONFIDENCE_ORDER[conf] < CONFIDENCE_ORDER[existing]) {
|
|
202
|
+
best.set(file, conf);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
return [...best.entries()]
|
|
207
|
+
.map(([file, confidence]) => ({ file, confidence }))
|
|
208
|
+
.sort((a, b) => {
|
|
209
|
+
const cmp = CONFIDENCE_ORDER[a.confidence] - CONFIDENCE_ORDER[b.confidence];
|
|
210
|
+
return cmp !== 0 ? cmp : a.file.localeCompare(b.file);
|
|
211
|
+
});
|
|
212
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export { walkDir as walkDirForPreview } from "../lib/walk-dir.js";
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
// M17_001 §3: zombiectl runs cancel <run_id>
|
|
2
|
+
|
|
3
|
+
function commandRuns(ctx, args, deps) {
|
|
4
|
+
const { parseFlags, printJson, request, apiHeaders, ui, writeLine } = deps;
|
|
5
|
+
|
|
6
|
+
async function cancel(subArgs) {
|
|
7
|
+
const parsed = parseFlags(subArgs);
|
|
8
|
+
const runId = parsed.positionals[0];
|
|
9
|
+
if (!runId) {
|
|
10
|
+
writeLine(ctx.stderr, ui.err("usage: zombiectl runs cancel <run_id>"));
|
|
11
|
+
return 2;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
const result = await request(ctx, `/v1/runs/${encodeURIComponent(runId)}:cancel`, {
|
|
15
|
+
method: "POST",
|
|
16
|
+
headers: apiHeaders(ctx),
|
|
17
|
+
});
|
|
18
|
+
|
|
19
|
+
if (ctx.jsonMode) {
|
|
20
|
+
printJson(ctx.stdout, result);
|
|
21
|
+
} else {
|
|
22
|
+
writeLine(ctx.stdout, ui.ok(`Run ${runId} cancel requested`));
|
|
23
|
+
}
|
|
24
|
+
return 0;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
const action = args[0];
|
|
28
|
+
if (action === "cancel") return cancel(args.slice(1));
|
|
29
|
+
|
|
30
|
+
writeLine(ctx.stderr, ui.err(`unknown runs subcommand: ${action}`));
|
|
31
|
+
writeLine(ctx.stderr, ui.err("available: cancel"));
|
|
32
|
+
return Promise.resolve(2);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export { commandRuns };
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
import { readFileSync, existsSync, statSync, writeFileSync, mkdirSync } from "node:fs";
|
|
2
|
+
import { join, dirname, resolve } from "node:path";
|
|
3
|
+
import { walkDir } from "../lib/walk-dir.js";
|
|
4
|
+
import { agentLoop } from "../lib/agent-loop.js";
|
|
5
|
+
|
|
6
|
+
/**
|
|
7
|
+
* Parse Makefile and return list of target names.
|
|
8
|
+
*/
|
|
9
|
+
export function parseMakeTargets(repoPath) {
|
|
10
|
+
const makefile = join(repoPath, "Makefile");
|
|
11
|
+
if (!existsSync(makefile)) return [];
|
|
12
|
+
|
|
13
|
+
let content;
|
|
14
|
+
try {
|
|
15
|
+
content = readFileSync(makefile, "utf8");
|
|
16
|
+
} catch {
|
|
17
|
+
return [];
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
const targets = new Set();
|
|
21
|
+
for (const line of content.split("\n")) {
|
|
22
|
+
const m = line.match(/^([a-zA-Z][a-zA-Z0-9_.-]*)\s*:/);
|
|
23
|
+
if (m && !m[1].startsWith(".")) targets.add(m[1]);
|
|
24
|
+
}
|
|
25
|
+
return [...targets];
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Detect test patterns from file paths.
|
|
30
|
+
*/
|
|
31
|
+
export function detectTestPatterns(files) {
|
|
32
|
+
const patterns = new Set();
|
|
33
|
+
for (const f of files) {
|
|
34
|
+
const base = f.replace(/\\/g, "/");
|
|
35
|
+
if (/(^|\/)tests?\//.test(base)) patterns.add("tests/ directory");
|
|
36
|
+
if (/\.(test|spec)\.[a-z]+$/.test(base)) patterns.add("*.test.* / *.spec.*");
|
|
37
|
+
if (/_test\.[a-z]+$/.test(base)) patterns.add("*_test.*");
|
|
38
|
+
if (/\.test\.[a-z]+$/.test(base)) patterns.add("*.test.*");
|
|
39
|
+
}
|
|
40
|
+
return [...patterns];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Detect project structure indicators.
|
|
45
|
+
*/
|
|
46
|
+
export function detectProjectStructure(repoPath) {
|
|
47
|
+
const indicators = [];
|
|
48
|
+
for (const dir of ["src", "tests", "test", "docs", "lib", "pkg", "cmd", "internal"]) {
|
|
49
|
+
if (existsSync(join(repoPath, dir))) indicators.push(`${dir}/`);
|
|
50
|
+
}
|
|
51
|
+
return indicators;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
/**
|
|
55
|
+
* Scan a repo and return detected context.
|
|
56
|
+
*/
|
|
57
|
+
export function scanRepo(repoPath) {
|
|
58
|
+
const files = walkDir(repoPath);
|
|
59
|
+
return {
|
|
60
|
+
makeTargets: parseMakeTargets(repoPath),
|
|
61
|
+
testPatterns: detectTestPatterns(files),
|
|
62
|
+
projectStructure: detectProjectStructure(repoPath),
|
|
63
|
+
fileCount: files.length,
|
|
64
|
+
};
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* commandSpecInit: implement `zombiectl spec init [--path DIR] [--output PATH] [--describe TEXT]`
|
|
69
|
+
*
|
|
70
|
+
* With --describe: uses agent relay to generate a spec from intent (requires auth).
|
|
71
|
+
* Without --describe: falls back to local template generation (no auth required).
|
|
72
|
+
*/
|
|
73
|
+
export async function commandSpecInit(args, ctx, deps) {
|
|
74
|
+
const { parseFlags, writeLine, ui } = deps;
|
|
75
|
+
const parsed = parseFlags(args);
|
|
76
|
+
const repoPath = resolve(parsed.options.path || ".");
|
|
77
|
+
const outputPath = parsed.options.output || "docs/spec/new-feature.md";
|
|
78
|
+
const describe = parsed.options.describe;
|
|
79
|
+
|
|
80
|
+
if (!existsSync(repoPath)) {
|
|
81
|
+
writeLine(ctx.stderr, ui.err(`path not found: ${repoPath}`));
|
|
82
|
+
return 2;
|
|
83
|
+
}
|
|
84
|
+
if (!statSync(repoPath).isDirectory()) {
|
|
85
|
+
writeLine(ctx.stderr, ui.err(`path is not a directory: ${repoPath}`));
|
|
86
|
+
return 2;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// Agent-backed generation when --describe is provided
|
|
90
|
+
if (describe && ctx.workspaceId) {
|
|
91
|
+
return agentSpecInit(describe, repoPath, outputPath, ctx, deps);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
// Fallback: local template generation (no auth required)
|
|
95
|
+
return localSpecInit(repoPath, outputPath, ctx, deps);
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async function agentSpecInit(describe, repoPath, outputPath, ctx, deps) {
|
|
99
|
+
const { writeLine, ui, printJson } = deps;
|
|
100
|
+
const endpoint = `/v1/workspaces/${ctx.workspaceId}/spec/template`;
|
|
101
|
+
|
|
102
|
+
if (!ctx.jsonMode) {
|
|
103
|
+
writeLine(ctx.stdout, "");
|
|
104
|
+
writeLine(ctx.stdout, ui.dim(" \u{1F9DF} analyzing your repo..."));
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const result = await agentLoop(endpoint, `Generate a spec template for: ${describe}`, repoPath, ctx, {
|
|
108
|
+
onToolCall: (tc) => {
|
|
109
|
+
if (!ctx.jsonMode) {
|
|
110
|
+
const label = tc.name === "read_file" ? `read ${tc.input.path}`
|
|
111
|
+
: tc.name === "list_dir" ? `listed ${tc.input.path || "./"}`
|
|
112
|
+
: `glob ${tc.input.pattern}`;
|
|
113
|
+
writeLine(ctx.stdout, ui.dim(` \u{2192} ${label}`));
|
|
114
|
+
}
|
|
115
|
+
},
|
|
116
|
+
onError: (msg) => {
|
|
117
|
+
writeLine(ctx.stderr, ui.err(msg));
|
|
118
|
+
},
|
|
119
|
+
});
|
|
120
|
+
|
|
121
|
+
if (!result.text) {
|
|
122
|
+
writeLine(ctx.stderr, ui.err("agent returned no content"));
|
|
123
|
+
return 1;
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
if (!ctx.jsonMode) {
|
|
127
|
+
writeLine(ctx.stdout, "");
|
|
128
|
+
writeLine(ctx.stdout, ui.dim(" \u{1F9DF} drafting spec..."));
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
const outDir = dirname(outputPath);
|
|
132
|
+
try {
|
|
133
|
+
mkdirSync(outDir, { recursive: true });
|
|
134
|
+
writeFileSync(outputPath, result.text, "utf8");
|
|
135
|
+
} catch (err) {
|
|
136
|
+
writeLine(ctx.stderr, ui.err(`failed to write spec: ${err.message}`));
|
|
137
|
+
return 1;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
if (ctx.jsonMode) {
|
|
141
|
+
printJson(ctx.stdout, {
|
|
142
|
+
output: outputPath,
|
|
143
|
+
tool_calls: result.toolCalls,
|
|
144
|
+
wall_ms: result.wallMs,
|
|
145
|
+
usage: result.usage,
|
|
146
|
+
});
|
|
147
|
+
} else {
|
|
148
|
+
writeLine(ctx.stdout, "");
|
|
149
|
+
writeLine(ctx.stdout, ui.ok(`\u{2713} wrote ${outputPath}`));
|
|
150
|
+
const secs = (result.wallMs / 1000).toFixed(1);
|
|
151
|
+
const tokens = result.usage?.total_tokens ?? "?";
|
|
152
|
+
writeLine(ctx.stdout, ui.dim(` ${secs}s | ${result.toolCalls} reads | ${tokens} tokens`));
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
return 0;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
function localSpecInit(repoPath, outputPath, ctx, deps) {
|
|
159
|
+
const { writeLine, ui, printJson } = deps;
|
|
160
|
+
const scan = scanRepo(repoPath);
|
|
161
|
+
const template = generateTemplate(scan);
|
|
162
|
+
|
|
163
|
+
const outDir = dirname(outputPath);
|
|
164
|
+
try {
|
|
165
|
+
mkdirSync(outDir, { recursive: true });
|
|
166
|
+
writeFileSync(outputPath, template, "utf8");
|
|
167
|
+
} catch (err) {
|
|
168
|
+
writeLine(ctx.stderr, ui.err(`failed to write template: ${err.message}`));
|
|
169
|
+
return 1;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
if (ctx.jsonMode) {
|
|
173
|
+
printJson(ctx.stdout, {
|
|
174
|
+
output: outputPath,
|
|
175
|
+
detected: {
|
|
176
|
+
make_targets: scan.makeTargets,
|
|
177
|
+
test_patterns: scan.testPatterns,
|
|
178
|
+
project_structure: scan.projectStructure,
|
|
179
|
+
file_count: scan.fileCount,
|
|
180
|
+
},
|
|
181
|
+
});
|
|
182
|
+
} else {
|
|
183
|
+
writeLine(ctx.stdout, ui.ok(`template written \u{2192} ${outputPath}`));
|
|
184
|
+
writeLine(ctx.stdout);
|
|
185
|
+
const rows = [];
|
|
186
|
+
if (scan.makeTargets.length > 0) rows.push(["make targets", scan.makeTargets.join(", ")]);
|
|
187
|
+
if (scan.testPatterns.length > 0) rows.push(["test patterns", scan.testPatterns.join(", ")]);
|
|
188
|
+
if (scan.projectStructure.length > 0) rows.push(["structure", scan.projectStructure.join(" ")]);
|
|
189
|
+
if (rows.length > 0) {
|
|
190
|
+
const w = Math.max(...rows.map(([k]) => k.length));
|
|
191
|
+
const sep = ui.dim(" \u{00B7} ");
|
|
192
|
+
for (const [k, v] of rows) {
|
|
193
|
+
writeLine(ctx.stdout, ` ${ui.dim(k.padEnd(w))}${sep}${v}`);
|
|
194
|
+
}
|
|
195
|
+
writeLine(ctx.stdout);
|
|
196
|
+
}
|
|
197
|
+
writeLine(ctx.stdout, ui.dim(`${scan.fileCount} file(s) scanned`));
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
return 0;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
function currentDateStr() {
|
|
204
|
+
const d = new Date();
|
|
205
|
+
const months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"];
|
|
206
|
+
return `${months[d.getMonth()]} ${String(d.getDate()).padStart(2,"0")}, ${d.getFullYear()}`;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/**
|
|
210
|
+
* Generate a spec template markdown string from scan results.
|
|
211
|
+
*/
|
|
212
|
+
export function generateTemplate(scan) {
|
|
213
|
+
const { makeTargets, testPatterns, projectStructure } = scan;
|
|
214
|
+
|
|
215
|
+
const GATE_TARGETS = new Set(["lint","test","build","check","fmt","format","verify","qa","qa-smoke"]);
|
|
216
|
+
const detectedGates = makeTargets.filter((t) => GATE_TARGETS.has(t));
|
|
217
|
+
|
|
218
|
+
const gatesBlock = detectedGates.length > 0
|
|
219
|
+
? detectedGates.map((t) => `- \`make ${t}\``).join("\n")
|
|
220
|
+
: "- _(no Makefile gates detected)_";
|
|
221
|
+
|
|
222
|
+
const structureBlock = projectStructure.length > 0
|
|
223
|
+
? projectStructure.map((d) => `- \`${d}\``).join("\n")
|
|
224
|
+
: "- _(empty or minimal repo)_";
|
|
225
|
+
|
|
226
|
+
return `# M{N}_001: {Feature Title}
|
|
227
|
+
|
|
228
|
+
**Prototype:** v1.0.0
|
|
229
|
+
**Milestone:** M{N}
|
|
230
|
+
**Workstream:** 001
|
|
231
|
+
**Date:** ${currentDateStr()}
|
|
232
|
+
**Status:** PENDING
|
|
233
|
+
**Priority:** P1 \u{2014} {one-line description of what this workstream delivers}
|
|
234
|
+
**Batch:** B1
|
|
235
|
+
**Depends on:** \u{2014}
|
|
236
|
+
|
|
237
|
+
---
|
|
238
|
+
|
|
239
|
+
## 1.0 Implementation
|
|
240
|
+
|
|
241
|
+
**Status:** PENDING
|
|
242
|
+
|
|
243
|
+
Implement the feature below.
|
|
244
|
+
|
|
245
|
+
**Detected project structure:**
|
|
246
|
+
${structureBlock}
|
|
247
|
+
|
|
248
|
+
**Dimensions:**
|
|
249
|
+
- 1.1 PENDING {First implementation step}
|
|
250
|
+
- 1.2 PENDING {Second implementation step}
|
|
251
|
+
- 1.3 PENDING {Write or update tests}
|
|
252
|
+
- 1.4 PENDING {Handle edge cases}
|
|
253
|
+
|
|
254
|
+
---
|
|
255
|
+
|
|
256
|
+
## 2.0 Verification
|
|
257
|
+
|
|
258
|
+
**Status:** PENDING
|
|
259
|
+
|
|
260
|
+
**Detected gates:**
|
|
261
|
+
${gatesBlock}
|
|
262
|
+
|
|
263
|
+
**Test patterns detected:** ${testPatterns.length > 0 ? testPatterns.join(", ") : "none"}
|
|
264
|
+
|
|
265
|
+
**Dimensions:**
|
|
266
|
+
- 2.1 PENDING All detected gates pass
|
|
267
|
+
- 2.2 PENDING New tests cover the feature path
|
|
268
|
+
- 2.3 PENDING No regressions in existing tests
|
|
269
|
+
|
|
270
|
+
---
|
|
271
|
+
|
|
272
|
+
## 3.0 Acceptance Criteria
|
|
273
|
+
|
|
274
|
+
**Status:** PENDING
|
|
275
|
+
|
|
276
|
+
- [ ] 3.1 {Primary success criterion}
|
|
277
|
+
- [ ] 3.2 {Secondary success criterion}
|
|
278
|
+
- [ ] 3.3 All detected gates pass with no new failures
|
|
279
|
+
|
|
280
|
+
---
|
|
281
|
+
|
|
282
|
+
## 4.0 Out of Scope
|
|
283
|
+
|
|
284
|
+
- {Item not in scope}
|
|
285
|
+
- {Another out of scope item}
|
|
286
|
+
`;
|
|
287
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
export async function commandWorkspaceUpgradeScale(ctx, parsed, workspaceId, deps) {
|
|
2
|
+
const { request, apiHeaders, ui, printJson, writeLine } = deps;
|
|
3
|
+
const subscriptionId = parsed.options["subscription-id"] || parsed.positionals[1];
|
|
4
|
+
|
|
5
|
+
if (!subscriptionId) {
|
|
6
|
+
writeLine(ctx.stderr, ui.err("workspace upgrade-scale requires --subscription-id"));
|
|
7
|
+
return 2;
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
const res = await request(ctx, `/v1/workspaces/${encodeURIComponent(workspaceId)}/billing/scale`, {
|
|
11
|
+
method: "POST",
|
|
12
|
+
headers: apiHeaders(ctx),
|
|
13
|
+
body: JSON.stringify({ subscription_id: subscriptionId }),
|
|
14
|
+
});
|
|
15
|
+
|
|
16
|
+
if (ctx.jsonMode) {
|
|
17
|
+
printJson(ctx.stdout, res);
|
|
18
|
+
} else {
|
|
19
|
+
writeLine(ctx.stdout, ui.ok(`workspace upgraded to ${res.plan_tier}`));
|
|
20
|
+
writeLine(ctx.stdout, `workspace_id: ${workspaceId}`);
|
|
21
|
+
writeLine(ctx.stdout, `plan_tier: ${res.plan_tier}`);
|
|
22
|
+
writeLine(ctx.stdout, `billing_status: ${res.billing_status}`);
|
|
23
|
+
if (res.subscription_id != null) writeLine(ctx.stdout, `subscription_id: ${res.subscription_id}`);
|
|
24
|
+
}
|
|
25
|
+
return 0;
|
|
26
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export const ERR_BILLING_CREDIT_EXHAUSTED = "UZ-BILLING-005";
|