@agile-vibe-coding/avc 0.1.1 → 0.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cli/agent-loader.js +21 -0
- package/cli/agents/agent-selector.md +152 -0
- package/cli/agents/architecture-recommender.md +418 -0
- package/cli/agents/code-implementer.md +117 -0
- package/cli/agents/code-validator.md +80 -0
- package/cli/agents/context-reviewer-epic.md +101 -0
- package/cli/agents/context-reviewer-story.md +92 -0
- package/cli/agents/context-writer-epic.md +145 -0
- package/cli/agents/context-writer-story.md +111 -0
- package/cli/agents/database-deep-dive.md +470 -0
- package/cli/agents/database-recommender.md +634 -0
- package/cli/agents/doc-distributor.md +176 -0
- package/cli/agents/doc-writer-epic.md +42 -0
- package/cli/agents/doc-writer-story.md +43 -0
- package/cli/agents/documentation-updater.md +203 -0
- package/cli/agents/duplicate-detector.md +110 -0
- package/cli/agents/epic-story-decomposer.md +559 -0
- package/cli/agents/feature-context-generator.md +91 -0
- package/cli/agents/gap-checker-epic.md +52 -0
- package/cli/agents/impact-checker-story.md +51 -0
- package/cli/agents/migration-guide-generator.md +305 -0
- package/cli/agents/mission-scope-generator.md +143 -0
- package/cli/agents/mission-scope-validator.md +146 -0
- package/cli/agents/project-context-extractor.md +122 -0
- package/cli/agents/project-documentation-creator.json +226 -0
- package/cli/agents/project-documentation-creator.md +595 -0
- package/cli/agents/question-prefiller.md +269 -0
- package/cli/agents/refiner-epic.md +39 -0
- package/cli/agents/refiner-story.md +42 -0
- package/cli/agents/scaffolding-generator.md +99 -0
- package/cli/agents/seed-validator.md +71 -0
- package/cli/agents/story-doc-enricher.md +133 -0
- package/cli/agents/story-scope-reviewer.md +147 -0
- package/cli/agents/story-splitter.md +83 -0
- package/cli/agents/suggestion-business-analyst.md +88 -0
- package/cli/agents/suggestion-deployment-architect.md +263 -0
- package/cli/agents/suggestion-product-manager.md +129 -0
- package/cli/agents/suggestion-security-specialist.md +156 -0
- package/cli/agents/suggestion-technical-architect.md +269 -0
- package/cli/agents/suggestion-ux-researcher.md +93 -0
- package/cli/agents/task-subtask-decomposer.md +188 -0
- package/cli/agents/validator-documentation.json +183 -0
- package/cli/agents/validator-documentation.md +455 -0
- package/cli/agents/validator-selector.md +211 -0
- package/cli/ansi-colors.js +21 -0
- package/cli/api-reference-tool.js +368 -0
- package/cli/build-docs.js +29 -8
- package/cli/ceremony-history.js +369 -0
- package/cli/checks/catalog.json +76 -0
- package/cli/checks/code/quality.json +26 -0
- package/cli/checks/code/testing.json +14 -0
- package/cli/checks/code/traceability.json +26 -0
- package/cli/checks/cross-refs/epic.json +171 -0
- package/cli/checks/cross-refs/story.json +149 -0
- package/cli/checks/epic/api.json +114 -0
- package/cli/checks/epic/backend.json +126 -0
- package/cli/checks/epic/cloud.json +126 -0
- package/cli/checks/epic/data.json +102 -0
- package/cli/checks/epic/database.json +114 -0
- package/cli/checks/epic/developer.json +182 -0
- package/cli/checks/epic/devops.json +174 -0
- package/cli/checks/epic/frontend.json +162 -0
- package/cli/checks/epic/mobile.json +102 -0
- package/cli/checks/epic/qa.json +90 -0
- package/cli/checks/epic/security.json +184 -0
- package/cli/checks/epic/solution-architect.json +192 -0
- package/cli/checks/epic/test-architect.json +90 -0
- package/cli/checks/epic/ui.json +102 -0
- package/cli/checks/epic/ux.json +90 -0
- package/cli/checks/fixes/epic-fix-template.md +10 -0
- package/cli/checks/fixes/story-fix-template.md +10 -0
- package/cli/checks/story/api.json +186 -0
- package/cli/checks/story/backend.json +102 -0
- package/cli/checks/story/cloud.json +102 -0
- package/cli/checks/story/data.json +210 -0
- package/cli/checks/story/database.json +102 -0
- package/cli/checks/story/developer.json +168 -0
- package/cli/checks/story/devops.json +102 -0
- package/cli/checks/story/frontend.json +174 -0
- package/cli/checks/story/mobile.json +102 -0
- package/cli/checks/story/qa.json +210 -0
- package/cli/checks/story/security.json +198 -0
- package/cli/checks/story/solution-architect.json +230 -0
- package/cli/checks/story/test-architect.json +210 -0
- package/cli/checks/story/ui.json +102 -0
- package/cli/checks/story/ux.json +102 -0
- package/cli/coding-order.js +401 -0
- package/cli/command-logger.js +49 -12
- package/cli/components/static-output.js +63 -0
- package/cli/console-output-manager.js +94 -0
- package/cli/dependency-checker.js +72 -0
- package/cli/docs-sync.js +306 -0
- package/cli/epic-story-validator.js +659 -0
- package/cli/evaluation-prompts.js +1008 -0
- package/cli/execution-context.js +195 -0
- package/cli/generate-summary-table.js +340 -0
- package/cli/init-model-config.js +704 -0
- package/cli/init.js +1737 -278
- package/cli/kanban-server-manager.js +227 -0
- package/cli/llm-claude.js +150 -1
- package/cli/llm-gemini.js +109 -0
- package/cli/llm-local.js +493 -0
- package/cli/llm-mock.js +233 -0
- package/cli/llm-openai.js +454 -0
- package/cli/llm-provider.js +379 -3
- package/cli/llm-token-limits.js +211 -0
- package/cli/llm-verifier.js +662 -0
- package/cli/llm-xiaomi.js +143 -0
- package/cli/message-constants.js +49 -0
- package/cli/message-manager.js +334 -0
- package/cli/message-types.js +96 -0
- package/cli/messaging-api.js +291 -0
- package/cli/micro-check-fixer.js +335 -0
- package/cli/micro-check-runner.js +449 -0
- package/cli/micro-check-scorer.js +148 -0
- package/cli/micro-check-validator.js +538 -0
- package/cli/model-pricing.js +192 -0
- package/cli/model-query-engine.js +468 -0
- package/cli/model-recommendation-analyzer.js +495 -0
- package/cli/model-selector.js +270 -0
- package/cli/output-buffer.js +107 -0
- package/cli/process-manager.js +73 -2
- package/cli/prompt-logger.js +57 -0
- package/cli/repl-ink.js +4625 -1094
- package/cli/repl-old.js +3 -4
- package/cli/seed-processor.js +962 -0
- package/cli/sprint-planning-processor.js +4162 -0
- package/cli/template-processor.js +2149 -105
- package/cli/templates/project.md +25 -8
- package/cli/templates/vitepress-config.mts.template +5 -4
- package/cli/token-tracker.js +547 -0
- package/cli/tools/generate-story-validators.js +317 -0
- package/cli/tools/generate-validators.js +669 -0
- package/cli/update-checker.js +19 -17
- package/cli/update-notifier.js +4 -4
- package/cli/validation-router.js +667 -0
- package/cli/verification-tracker.js +563 -0
- package/cli/worktree-runner.js +654 -0
- package/kanban/README.md +386 -0
- package/kanban/client/README.md +205 -0
- package/kanban/client/components.json +20 -0
- package/kanban/client/dist/assets/index-D_KC5EQT.css +1 -0
- package/kanban/client/dist/assets/index-DjY5zqW7.js +351 -0
- package/kanban/client/dist/index.html +16 -0
- package/kanban/client/dist/vite.svg +1 -0
- package/kanban/client/index.html +15 -0
- package/kanban/client/package-lock.json +9442 -0
- package/kanban/client/package.json +44 -0
- package/kanban/client/postcss.config.js +6 -0
- package/kanban/client/public/vite.svg +1 -0
- package/kanban/client/src/App.jsx +651 -0
- package/kanban/client/src/components/ProjectFileEditorPopup.jsx +117 -0
- package/kanban/client/src/components/ceremony/AskArchPopup.jsx +420 -0
- package/kanban/client/src/components/ceremony/AskModelPopup.jsx +629 -0
- package/kanban/client/src/components/ceremony/CeremonyWorkflowModal.jsx +1133 -0
- package/kanban/client/src/components/ceremony/EpicStorySelectionModal.jsx +254 -0
- package/kanban/client/src/components/ceremony/ProviderSwitcherButton.jsx +290 -0
- package/kanban/client/src/components/ceremony/SponsorCallModal.jsx +686 -0
- package/kanban/client/src/components/ceremony/SprintPlanningModal.jsx +838 -0
- package/kanban/client/src/components/ceremony/steps/ArchitectureStep.jsx +150 -0
- package/kanban/client/src/components/ceremony/steps/CompleteStep.jsx +136 -0
- package/kanban/client/src/components/ceremony/steps/DatabaseStep.jsx +202 -0
- package/kanban/client/src/components/ceremony/steps/DeploymentStep.jsx +123 -0
- package/kanban/client/src/components/ceremony/steps/MissionStep.jsx +106 -0
- package/kanban/client/src/components/ceremony/steps/ReviewAnswersStep.jsx +329 -0
- package/kanban/client/src/components/ceremony/steps/RunningStep.jsx +249 -0
- package/kanban/client/src/components/kanban/CardDetailModal.jsx +646 -0
- package/kanban/client/src/components/kanban/EpicSection.jsx +146 -0
- package/kanban/client/src/components/kanban/FilterToolbar.jsx +222 -0
- package/kanban/client/src/components/kanban/GroupingSelector.jsx +63 -0
- package/kanban/client/src/components/kanban/KanbanBoard.jsx +211 -0
- package/kanban/client/src/components/kanban/KanbanCard.jsx +147 -0
- package/kanban/client/src/components/kanban/KanbanColumn.jsx +90 -0
- package/kanban/client/src/components/kanban/RefineWorkItemPopup.jsx +784 -0
- package/kanban/client/src/components/kanban/RunButton.jsx +162 -0
- package/kanban/client/src/components/kanban/SeedButton.jsx +176 -0
- package/kanban/client/src/components/layout/LoadingScreen.jsx +82 -0
- package/kanban/client/src/components/process/ProcessMonitorBar.jsx +80 -0
- package/kanban/client/src/components/settings/AgentEditorPopup.jsx +171 -0
- package/kanban/client/src/components/settings/AgentsTab.jsx +381 -0
- package/kanban/client/src/components/settings/ApiKeysTab.jsx +142 -0
- package/kanban/client/src/components/settings/CeremonyModelsTab.jsx +105 -0
- package/kanban/client/src/components/settings/CheckEditorPopup.jsx +507 -0
- package/kanban/client/src/components/settings/CostThresholdsTab.jsx +95 -0
- package/kanban/client/src/components/settings/ModelPricingTab.jsx +269 -0
- package/kanban/client/src/components/settings/OpenAIAuthSection.jsx +412 -0
- package/kanban/client/src/components/settings/ServersTab.jsx +121 -0
- package/kanban/client/src/components/settings/SettingsModal.jsx +84 -0
- package/kanban/client/src/components/stats/CostModal.jsx +384 -0
- package/kanban/client/src/components/ui/badge.jsx +27 -0
- package/kanban/client/src/components/ui/dialog.jsx +121 -0
- package/kanban/client/src/components/ui/tabs.jsx +85 -0
- package/kanban/client/src/hooks/__tests__/useGrouping.test.js +232 -0
- package/kanban/client/src/hooks/useGrouping.js +177 -0
- package/kanban/client/src/hooks/useWebSocket.js +120 -0
- package/kanban/client/src/lib/__tests__/api.test.js +196 -0
- package/kanban/client/src/lib/__tests__/status-grouping.test.js +94 -0
- package/kanban/client/src/lib/api.js +515 -0
- package/kanban/client/src/lib/status-grouping.js +154 -0
- package/kanban/client/src/lib/utils.js +11 -0
- package/kanban/client/src/main.jsx +10 -0
- package/kanban/client/src/store/__tests__/kanbanStore.test.js +164 -0
- package/kanban/client/src/store/ceremonyStore.js +172 -0
- package/kanban/client/src/store/filterStore.js +201 -0
- package/kanban/client/src/store/kanbanStore.js +123 -0
- package/kanban/client/src/store/processStore.js +65 -0
- package/kanban/client/src/store/sprintPlanningStore.js +33 -0
- package/kanban/client/src/styles/globals.css +59 -0
- package/kanban/client/tailwind.config.js +77 -0
- package/kanban/client/vite.config.js +28 -0
- package/kanban/client/vitest.config.js +28 -0
- package/kanban/dev-start.sh +47 -0
- package/kanban/package.json +12 -0
- package/kanban/server/index.js +537 -0
- package/kanban/server/routes/ceremony.js +454 -0
- package/kanban/server/routes/costs.js +163 -0
- package/kanban/server/routes/openai-oauth.js +366 -0
- package/kanban/server/routes/processes.js +50 -0
- package/kanban/server/routes/settings.js +736 -0
- package/kanban/server/routes/websocket.js +281 -0
- package/kanban/server/routes/work-items.js +487 -0
- package/kanban/server/services/CeremonyService.js +1441 -0
- package/kanban/server/services/FileSystemScanner.js +95 -0
- package/kanban/server/services/FileWatcher.js +144 -0
- package/kanban/server/services/HierarchyBuilder.js +196 -0
- package/kanban/server/services/ProcessRegistry.js +122 -0
- package/kanban/server/services/TaskRunnerService.js +261 -0
- package/kanban/server/services/WorkItemReader.js +123 -0
- package/kanban/server/services/WorkItemRefineService.js +510 -0
- package/kanban/server/start.js +49 -0
- package/kanban/server/utils/kanban-logger.js +132 -0
- package/kanban/server/utils/markdown.js +91 -0
- package/kanban/server/utils/status-grouping.js +107 -0
- package/kanban/server/workers/run-task-worker.js +121 -0
- package/kanban/server/workers/seed-worker.js +94 -0
- package/kanban/server/workers/sponsor-call-worker.js +92 -0
- package/kanban/server/workers/sprint-planning-worker.js +212 -0
- package/package.json +19 -7
- package/cli/agents/documentation.md +0 -302
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
import { exec, execSync } from 'child_process';
|
|
2
|
+
import { promisify } from 'util';
|
|
3
|
+
import path from 'path';
|
|
4
|
+
import fs from 'fs';
|
|
5
|
+
import net from 'net';
|
|
6
|
+
import http from 'http';
|
|
7
|
+
|
|
8
|
+
const execAsync = promisify(exec);
|
|
9
|
+
|
|
10
|
+
/**
|
|
11
|
+
* Kanban Server Manager
|
|
12
|
+
* Manages lifecycle of the AVC Kanban Board server
|
|
13
|
+
*/
|
|
14
|
+
export class KanbanServerManager {
|
|
15
|
+
constructor(projectRoot = process.cwd()) {
|
|
16
|
+
this.projectRoot = projectRoot;
|
|
17
|
+
this.avcDir = path.join(projectRoot, '.avc');
|
|
18
|
+
this.avcProjectPath = path.join(this.avcDir, 'project');
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Check if .avc/project directory exists
|
|
23
|
+
*/
|
|
24
|
+
hasWorkItems() {
|
|
25
|
+
return fs.existsSync(this.avcProjectPath);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Get kanban server port from avc.json config
|
|
30
|
+
* Returns default port 4174 if not configured
|
|
31
|
+
*/
|
|
32
|
+
getPort() {
|
|
33
|
+
const configPath = path.join(this.avcDir, 'avc.json');
|
|
34
|
+
|
|
35
|
+
if (!fs.existsSync(configPath)) {
|
|
36
|
+
return 4174; // Default port
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
try {
|
|
40
|
+
const config = JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
41
|
+
return config.settings?.kanban?.port || 4174;
|
|
42
|
+
} catch (error) {
|
|
43
|
+
console.warn(`Could not read port from avc.json: ${error.message}`);
|
|
44
|
+
return 4174;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Check if a port is in use
|
|
50
|
+
* @param {number} port - Port number to check
|
|
51
|
+
* @returns {Promise<boolean>} - True if port is in use
|
|
52
|
+
*/
|
|
53
|
+
async isPortInUse(port) {
|
|
54
|
+
return new Promise((resolve) => {
|
|
55
|
+
const server = net.createServer();
|
|
56
|
+
|
|
57
|
+
server.once('error', (err) => {
|
|
58
|
+
if (err.code === 'EADDRINUSE') {
|
|
59
|
+
resolve(true); // Port is in use
|
|
60
|
+
} else {
|
|
61
|
+
resolve(false);
|
|
62
|
+
}
|
|
63
|
+
});
|
|
64
|
+
|
|
65
|
+
server.once('listening', () => {
|
|
66
|
+
server.close();
|
|
67
|
+
resolve(false); // Port is available
|
|
68
|
+
});
|
|
69
|
+
|
|
70
|
+
server.listen(port, '127.0.0.1');
|
|
71
|
+
});
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
/**
|
|
75
|
+
* Check if the server on this port is the AVC Kanban Board
|
|
76
|
+
* Makes HTTP request to /api/health and checks for AVC kanban response
|
|
77
|
+
* @param {number} port - Port number to check
|
|
78
|
+
* @returns {Promise<boolean>} - True if it's confirmed to be AVC kanban server
|
|
79
|
+
*/
|
|
80
|
+
async isKanbanServer(port) {
|
|
81
|
+
return new Promise((resolve) => {
|
|
82
|
+
const req = http.get(`http://localhost:${port}/api/health`, {
|
|
83
|
+
timeout: 2000,
|
|
84
|
+
}, (res) => {
|
|
85
|
+
let data = '';
|
|
86
|
+
|
|
87
|
+
res.on('data', (chunk) => {
|
|
88
|
+
data += chunk;
|
|
89
|
+
});
|
|
90
|
+
|
|
91
|
+
res.on('end', () => {
|
|
92
|
+
try {
|
|
93
|
+
const json = JSON.parse(data);
|
|
94
|
+
// Check if it's our AVC kanban server
|
|
95
|
+
const isKanban = json.projectRoot === this.projectRoot;
|
|
96
|
+
resolve(isKanban);
|
|
97
|
+
} catch {
|
|
98
|
+
resolve(false);
|
|
99
|
+
}
|
|
100
|
+
});
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
req.on('error', () => {
|
|
104
|
+
resolve(false); // Can't connect or verify
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
req.on('timeout', () => {
|
|
108
|
+
req.destroy();
|
|
109
|
+
resolve(false);
|
|
110
|
+
});
|
|
111
|
+
});
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/**
|
|
115
|
+
* Find which process is using a port
|
|
116
|
+
* Works cross-platform (Linux, macOS, Windows)
|
|
117
|
+
* @param {number} port - Port number to check
|
|
118
|
+
* @returns {Promise<{pid: number, command: string} | null>} - Process info or null if not found
|
|
119
|
+
*/
|
|
120
|
+
async findProcessUsingPort(port) {
|
|
121
|
+
try {
|
|
122
|
+
let command;
|
|
123
|
+
let parseOutput;
|
|
124
|
+
|
|
125
|
+
if (process.platform === 'win32') {
|
|
126
|
+
// Windows: netstat -ano | findstr :PORT
|
|
127
|
+
command = `netstat -ano | findstr :${port}`;
|
|
128
|
+
parseOutput = (output) => {
|
|
129
|
+
const lines = output.split('\n');
|
|
130
|
+
for (const line of lines) {
|
|
131
|
+
if (
|
|
132
|
+
line.includes(`0.0.0.0:${port}`) ||
|
|
133
|
+
line.includes(`127.0.0.1:${port}`) ||
|
|
134
|
+
line.includes(`[::]:${port}`)
|
|
135
|
+
) {
|
|
136
|
+
const parts = line.trim().split(/\s+/);
|
|
137
|
+
const pid = parseInt(parts[parts.length - 1]);
|
|
138
|
+
if (pid && !isNaN(pid)) {
|
|
139
|
+
return { pid, command: 'Unknown' };
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
return null;
|
|
144
|
+
};
|
|
145
|
+
} else {
|
|
146
|
+
// Linux/macOS: lsof -i :PORT
|
|
147
|
+
command = `lsof -i :${port} -t -sTCP:LISTEN`;
|
|
148
|
+
parseOutput = (output) => {
|
|
149
|
+
const pid = parseInt(output.trim());
|
|
150
|
+
if (pid && !isNaN(pid)) {
|
|
151
|
+
// Try to get process name
|
|
152
|
+
try {
|
|
153
|
+
const psOutput = execSync(`ps -p ${pid} -o comm=`, { encoding: 'utf8' });
|
|
154
|
+
return { pid, command: psOutput.trim() };
|
|
155
|
+
} catch {
|
|
156
|
+
return { pid, command: 'Unknown' };
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
return null;
|
|
160
|
+
};
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
const { stdout } = await execAsync(command);
|
|
164
|
+
return parseOutput(stdout);
|
|
165
|
+
} catch (error) {
|
|
166
|
+
// Command failed (no process found) or permission error
|
|
167
|
+
return null;
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Kill a process by PID
|
|
173
|
+
* @param {number} pid - Process ID to kill
|
|
174
|
+
* @returns {Promise<boolean>} - True if kill succeeded
|
|
175
|
+
*/
|
|
176
|
+
async killProcess(pid) {
|
|
177
|
+
try {
|
|
178
|
+
if (process.platform === 'win32') {
|
|
179
|
+
await execAsync(`taskkill /F /PID ${pid}`);
|
|
180
|
+
} else {
|
|
181
|
+
await execAsync(`kill -9 ${pid}`);
|
|
182
|
+
}
|
|
183
|
+
return true;
|
|
184
|
+
} catch (error) {
|
|
185
|
+
console.error(`Failed to kill process ${pid}:`, error.message);
|
|
186
|
+
return false;
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* Get the frontend dev server URL (Vite)
|
|
192
|
+
* @returns {string} Frontend URL
|
|
193
|
+
*/
|
|
194
|
+
getFrontendUrl() {
|
|
195
|
+
const config = this.getConfig();
|
|
196
|
+
const frontendPort = config.settings?.kanban?.frontendPort || 5173;
|
|
197
|
+
return `http://localhost:${frontendPort}`;
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* Get the backend API server URL
|
|
202
|
+
* @returns {string} Backend URL
|
|
203
|
+
*/
|
|
204
|
+
getBackendUrl() {
|
|
205
|
+
const port = this.getPort();
|
|
206
|
+
return `http://localhost:${port}`;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/**
|
|
210
|
+
* Get AVC configuration
|
|
211
|
+
* @returns {object} Configuration object
|
|
212
|
+
*/
|
|
213
|
+
getConfig() {
|
|
214
|
+
const configPath = path.join(this.avcDir, 'avc.json');
|
|
215
|
+
|
|
216
|
+
if (!fs.existsSync(configPath)) {
|
|
217
|
+
return { settings: {} };
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
try {
|
|
221
|
+
return JSON.parse(fs.readFileSync(configPath, 'utf8'));
|
|
222
|
+
} catch (error) {
|
|
223
|
+
console.warn(`Could not read avc.json: ${error.message}`);
|
|
224
|
+
return { settings: {} };
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
}
|
package/cli/llm-claude.js
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import Anthropic from '@anthropic-ai/sdk';
|
|
2
|
+
import { jsonrepair } from 'jsonrepair';
|
|
2
3
|
import { LLMProvider } from './llm-provider.js';
|
|
4
|
+
import { getMaxTokensForModel } from './llm-token-limits.js';
|
|
3
5
|
|
|
4
6
|
export class ClaudeProvider extends LLMProvider {
|
|
5
7
|
constructor(model) { super('claude', model); }
|
|
@@ -7,7 +9,9 @@ export class ClaudeProvider extends LLMProvider {
|
|
|
7
9
|
_createClient() {
|
|
8
10
|
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
9
11
|
if (!apiKey) throw new Error('ANTHROPIC_API_KEY not set. Add it to your .env file.');
|
|
10
|
-
|
|
12
|
+
// 5-minute timeout per request; SDK retries disabled so our retryWithBackoff
|
|
13
|
+
// handles all retries with full logging visibility.
|
|
14
|
+
return new Anthropic({ apiKey, timeout: 5 * 60 * 1000, maxRetries: 0 });
|
|
11
15
|
}
|
|
12
16
|
|
|
13
17
|
async _callProvider(prompt, maxTokens, systemInstructions) {
|
|
@@ -22,6 +26,151 @@ export class ClaudeProvider extends LLMProvider {
|
|
|
22
26
|
}
|
|
23
27
|
|
|
24
28
|
const response = await this._client.messages.create(params);
|
|
29
|
+
this._trackTokens(response.usage);
|
|
25
30
|
return response.content[0].text;
|
|
26
31
|
}
|
|
32
|
+
|
|
33
|
+
async generateJSON(prompt, agentInstructions = null, cachedContext = null) {
|
|
34
|
+
if (!this._client) {
|
|
35
|
+
this._client = this._createClient();
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// Use model-specific maximum tokens
|
|
39
|
+
const maxTokens = getMaxTokensForModel(this.model);
|
|
40
|
+
|
|
41
|
+
const JSON_SYSTEM = 'You are a helpful assistant that always returns valid JSON. Your response must be a valid JSON object or array, nothing else.';
|
|
42
|
+
|
|
43
|
+
let systemParam;
|
|
44
|
+
let userContent;
|
|
45
|
+
|
|
46
|
+
if (cachedContext) {
|
|
47
|
+
// Structured content blocks: cache_control on agentInstructions (system) and
|
|
48
|
+
// cachedContext (first user block) — both stay stable across multiple calls
|
|
49
|
+
// in the same ceremony, hitting the 5-min cache on subsequent validators.
|
|
50
|
+
systemParam = agentInstructions
|
|
51
|
+
? [
|
|
52
|
+
{ type: 'text', text: JSON_SYSTEM },
|
|
53
|
+
{ type: 'text', text: agentInstructions, cache_control: { type: 'ephemeral' } },
|
|
54
|
+
]
|
|
55
|
+
: [{ type: 'text', text: JSON_SYSTEM }];
|
|
56
|
+
|
|
57
|
+
userContent = [
|
|
58
|
+
{ type: 'text', text: cachedContext, cache_control: { type: 'ephemeral' } },
|
|
59
|
+
{ type: 'text', text: prompt },
|
|
60
|
+
];
|
|
61
|
+
} else {
|
|
62
|
+
systemParam = JSON_SYSTEM;
|
|
63
|
+
userContent = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
const requestParams = {
|
|
67
|
+
model: this.model,
|
|
68
|
+
max_tokens: maxTokens,
|
|
69
|
+
system: systemParam,
|
|
70
|
+
messages: [{ role: 'user', content: userContent }],
|
|
71
|
+
};
|
|
72
|
+
|
|
73
|
+
const fullPrompt = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
74
|
+
|
|
75
|
+
const _t0Json = Date.now();
|
|
76
|
+
const response = await this._withRetry(
|
|
77
|
+
() => this._client.messages.create(requestParams),
|
|
78
|
+
'JSON generation (Claude)'
|
|
79
|
+
);
|
|
80
|
+
|
|
81
|
+
const content = response.content[0].text;
|
|
82
|
+
this._trackTokens(response.usage, {
|
|
83
|
+
prompt: fullPrompt,
|
|
84
|
+
agentInstructions: agentInstructions ?? null,
|
|
85
|
+
response: content,
|
|
86
|
+
elapsed: Date.now() - _t0Json,
|
|
87
|
+
});
|
|
88
|
+
|
|
89
|
+
// Extract JSON from response (handle markdown code blocks and preamble text)
|
|
90
|
+
let jsonStr = content.trim();
|
|
91
|
+
|
|
92
|
+
// Strip markdown code fences if the response starts with one
|
|
93
|
+
if (jsonStr.startsWith('```')) {
|
|
94
|
+
jsonStr = jsonStr.replace(/^```(?:json)?\s*\n?/, '');
|
|
95
|
+
jsonStr = jsonStr.replace(/\n?\s*```\s*$/, '');
|
|
96
|
+
jsonStr = jsonStr.trim();
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// If model added reasoning preamble before JSON, find the first { or [ and extract from there.
|
|
100
|
+
// This handles Claude responses like "I'll analyze...\n\n```json\n{...}\n```" or "Here is the JSON:\n{...}"
|
|
101
|
+
if (!jsonStr.startsWith('{') && !jsonStr.startsWith('[')) {
|
|
102
|
+
const firstBrace = jsonStr.indexOf('{');
|
|
103
|
+
const firstBracket = jsonStr.indexOf('[');
|
|
104
|
+
const jsonStart = firstBrace === -1 ? firstBracket
|
|
105
|
+
: firstBracket === -1 ? firstBrace
|
|
106
|
+
: Math.min(firstBrace, firstBracket);
|
|
107
|
+
if (jsonStart > 0) {
|
|
108
|
+
// Also strip trailing markdown fences that may follow the JSON block
|
|
109
|
+
jsonStr = jsonStr.slice(jsonStart).replace(/\n?\s*```\s*$/, '').trim();
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
try {
|
|
114
|
+
return JSON.parse(jsonStr);
|
|
115
|
+
} catch (firstError) {
|
|
116
|
+
// Only attempt repair when the content looks like JSON (starts with { or [)
|
|
117
|
+
// — avoids silently accepting completely non-JSON responses
|
|
118
|
+
if (jsonStr.startsWith('{') || jsonStr.startsWith('[')) {
|
|
119
|
+
try {
|
|
120
|
+
return JSON.parse(jsonrepair(jsonStr));
|
|
121
|
+
} catch { /* fall through to throw */ }
|
|
122
|
+
}
|
|
123
|
+
throw new Error(`Failed to parse JSON response: ${firstError.message}\n\nResponse was:\n${content}`);
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
async generateText(prompt, agentInstructions = null, cachedContext = null) {
|
|
128
|
+
if (!this._client) {
|
|
129
|
+
this._client = this._createClient();
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
// Use model-specific maximum tokens
|
|
133
|
+
const maxTokens = getMaxTokensForModel(this.model);
|
|
134
|
+
|
|
135
|
+
let systemParam;
|
|
136
|
+
let userContent;
|
|
137
|
+
|
|
138
|
+
if (cachedContext) {
|
|
139
|
+
systemParam = agentInstructions
|
|
140
|
+
? [
|
|
141
|
+
{ type: 'text', text: agentInstructions, cache_control: { type: 'ephemeral' } },
|
|
142
|
+
]
|
|
143
|
+
: undefined;
|
|
144
|
+
userContent = [
|
|
145
|
+
{ type: 'text', text: cachedContext, cache_control: { type: 'ephemeral' } },
|
|
146
|
+
{ type: 'text', text: prompt },
|
|
147
|
+
];
|
|
148
|
+
} else {
|
|
149
|
+
userContent = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
const requestParams = {
|
|
153
|
+
model: this.model,
|
|
154
|
+
max_tokens: maxTokens,
|
|
155
|
+
messages: [{ role: 'user', content: userContent }],
|
|
156
|
+
};
|
|
157
|
+
if (systemParam) requestParams.system = systemParam;
|
|
158
|
+
|
|
159
|
+
const fullPrompt = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
160
|
+
|
|
161
|
+
const _t0Text = Date.now();
|
|
162
|
+
const response = await this._withRetry(
|
|
163
|
+
() => this._client.messages.create(requestParams),
|
|
164
|
+
'Text generation (Claude)'
|
|
165
|
+
);
|
|
166
|
+
|
|
167
|
+
const text = response.content[0].text;
|
|
168
|
+
this._trackTokens(response.usage, {
|
|
169
|
+
prompt: fullPrompt,
|
|
170
|
+
agentInstructions: agentInstructions ?? null,
|
|
171
|
+
response: text,
|
|
172
|
+
elapsed: Date.now() - _t0Text,
|
|
173
|
+
});
|
|
174
|
+
return text;
|
|
175
|
+
}
|
|
27
176
|
}
|
package/cli/llm-gemini.js
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import { GoogleGenAI } from '@google/genai';
|
|
2
|
+
import { jsonrepair } from 'jsonrepair';
|
|
2
3
|
import { LLMProvider } from './llm-provider.js';
|
|
4
|
+
import { getMaxTokensForModel } from './llm-token-limits.js';
|
|
3
5
|
|
|
4
6
|
export class GeminiProvider extends LLMProvider {
|
|
5
7
|
constructor(model = 'gemini-2.5-flash') { super('gemini', model); }
|
|
@@ -25,6 +27,113 @@ export class GeminiProvider extends LLMProvider {
|
|
|
25
27
|
if (!response.text) {
|
|
26
28
|
throw new Error('Gemini returned no text (possible safety filter block).');
|
|
27
29
|
}
|
|
30
|
+
this._trackTokens(response.usageMetadata);
|
|
28
31
|
return response.text;
|
|
29
32
|
}
|
|
33
|
+
|
|
34
|
+
async generateJSON(prompt, agentInstructions = null, cachedContext = null) {
|
|
35
|
+
if (!this._client) {
|
|
36
|
+
this._client = this._createClient();
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
const fullPrompt = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
40
|
+
|
|
41
|
+
// Use model-specific maximum tokens
|
|
42
|
+
const maxTokens = getMaxTokensForModel(this.model);
|
|
43
|
+
|
|
44
|
+
const params = {
|
|
45
|
+
model: this.model,
|
|
46
|
+
contents: fullPrompt,
|
|
47
|
+
generationConfig: {
|
|
48
|
+
responseMimeType: 'application/json', // Gemini's native JSON mode
|
|
49
|
+
maxOutputTokens: maxTokens
|
|
50
|
+
}
|
|
51
|
+
};
|
|
52
|
+
|
|
53
|
+
// When cachedContext is provided (e.g. project rootContextMd), set it as the
|
|
54
|
+
// systemInstruction — Gemini's implicit caching targets system instructions and
|
|
55
|
+
// stable prefix content, giving a best-effort discount with no extra setup.
|
|
56
|
+
if (cachedContext) {
|
|
57
|
+
params.systemInstruction = cachedContext;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
const _t0Json = Date.now();
|
|
61
|
+
const response = await this._withRetry(
|
|
62
|
+
() => this._client.models.generateContent(params),
|
|
63
|
+
'JSON generation (Gemini)'
|
|
64
|
+
);
|
|
65
|
+
if (!response.text) {
|
|
66
|
+
throw new Error('Gemini returned no text (possible safety filter block).');
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
const content = response.text;
|
|
70
|
+
this._trackTokens(response.usageMetadata, {
|
|
71
|
+
prompt: fullPrompt,
|
|
72
|
+
agentInstructions: agentInstructions ?? null,
|
|
73
|
+
response: content,
|
|
74
|
+
elapsed: Date.now() - _t0Json,
|
|
75
|
+
});
|
|
76
|
+
|
|
77
|
+
// Strip markdown code fences if present (more robust)
|
|
78
|
+
let jsonStr = content.trim();
|
|
79
|
+
if (jsonStr.startsWith('```')) {
|
|
80
|
+
// Remove opening fence (```json or ```)
|
|
81
|
+
jsonStr = jsonStr.replace(/^```(?:json)?\s*\n?/, '');
|
|
82
|
+
// Remove closing fence
|
|
83
|
+
jsonStr = jsonStr.replace(/\n?\s*```\s*$/, '');
|
|
84
|
+
jsonStr = jsonStr.trim();
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
try {
|
|
88
|
+
return JSON.parse(jsonStr);
|
|
89
|
+
} catch (firstError) {
|
|
90
|
+
if (jsonStr.startsWith('{') || jsonStr.startsWith('[')) {
|
|
91
|
+
try {
|
|
92
|
+
return JSON.parse(jsonrepair(jsonStr));
|
|
93
|
+
} catch { /* fall through to throw */ }
|
|
94
|
+
}
|
|
95
|
+
throw new Error(`Failed to parse JSON response: ${firstError.message}\n\nResponse was:\n${content}`);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
async generateText(prompt, agentInstructions = null, cachedContext = null) {
|
|
100
|
+
if (!this._client) {
|
|
101
|
+
this._client = this._createClient();
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const fullPrompt = agentInstructions ? `${agentInstructions}\n\n${prompt}` : prompt;
|
|
105
|
+
|
|
106
|
+
// Use model-specific maximum tokens
|
|
107
|
+
const maxTokens = getMaxTokensForModel(this.model);
|
|
108
|
+
|
|
109
|
+
const params = {
|
|
110
|
+
model: this.model,
|
|
111
|
+
contents: fullPrompt,
|
|
112
|
+
generationConfig: {
|
|
113
|
+
maxOutputTokens: maxTokens
|
|
114
|
+
}
|
|
115
|
+
};
|
|
116
|
+
|
|
117
|
+
if (cachedContext) {
|
|
118
|
+
params.systemInstruction = cachedContext;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
const _t0Text = Date.now();
|
|
122
|
+
const response = await this._withRetry(
|
|
123
|
+
() => this._client.models.generateContent(params),
|
|
124
|
+
'Text generation (Gemini)'
|
|
125
|
+
);
|
|
126
|
+
if (!response.text) {
|
|
127
|
+
throw new Error('Gemini returned no text (possible safety filter block).');
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
const text = response.text;
|
|
131
|
+
this._trackTokens(response.usageMetadata, {
|
|
132
|
+
prompt: fullPrompt,
|
|
133
|
+
agentInstructions: agentInstructions ?? null,
|
|
134
|
+
response: text,
|
|
135
|
+
elapsed: Date.now() - _t0Text,
|
|
136
|
+
});
|
|
137
|
+
return text;
|
|
138
|
+
}
|
|
30
139
|
}
|