@xelth/eck-snapshot 2.2.0 ā 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +119 -225
- package/index.js +14 -776
- package/package.json +25 -7
- package/setup.json +805 -0
- package/src/cli/cli.js +427 -0
- package/src/cli/commands/askGpt.js +29 -0
- package/src/cli/commands/autoDocs.js +150 -0
- package/src/cli/commands/consilium.js +86 -0
- package/src/cli/commands/createSnapshot.js +601 -0
- package/src/cli/commands/detectProfiles.js +98 -0
- package/src/cli/commands/detectProject.js +112 -0
- package/src/cli/commands/generateProfileGuide.js +91 -0
- package/src/cli/commands/pruneSnapshot.js +106 -0
- package/src/cli/commands/restoreSnapshot.js +173 -0
- package/src/cli/commands/setupGemini.js +149 -0
- package/src/cli/commands/setupGemini.test.js +115 -0
- package/src/cli/commands/trainTokens.js +38 -0
- package/src/config.js +81 -0
- package/src/services/authService.js +20 -0
- package/src/services/claudeCliService.js +621 -0
- package/src/services/claudeCliService.test.js +267 -0
- package/src/services/dispatcherService.js +33 -0
- package/src/services/gptService.js +302 -0
- package/src/services/gptService.test.js +120 -0
- package/src/templates/agent-prompt.template.md +29 -0
- package/src/templates/architect-prompt.template.md +50 -0
- package/src/templates/envScanRequest.md +4 -0
- package/src/templates/gitWorkflow.md +32 -0
- package/src/templates/multiAgent.md +164 -0
- package/src/templates/vectorMode.md +22 -0
- package/src/utils/aiHeader.js +303 -0
- package/src/utils/fileUtils.js +928 -0
- package/src/utils/projectDetector.js +704 -0
- package/src/utils/tokenEstimator.js +198 -0
- package/.ecksnapshot.config.js +0 -35
package/src/cli/cli.js
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
1
|
+
import { Command } from 'commander';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import fs from 'fs/promises';
|
|
4
|
+
import { fileURLToPath } from 'url';
|
|
5
|
+
|
|
6
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
7
|
+
const __dirname = path.dirname(__filename);
|
|
8
|
+
|
|
9
|
+
import { createRepoSnapshot } from './commands/createSnapshot.js';
|
|
10
|
+
import { restoreSnapshot } from './commands/restoreSnapshot.js';
|
|
11
|
+
import { pruneSnapshot } from './commands/pruneSnapshot.js';
|
|
12
|
+
import { generateConsilium } from './commands/consilium.js';
|
|
13
|
+
import { detectProject, testFileParsing } from './commands/detectProject.js';
|
|
14
|
+
import { trainTokens, showTokenStats } from './commands/trainTokens.js';
|
|
15
|
+
import { askGpt } from './commands/askGpt.js';
|
|
16
|
+
import { ask as askGptService } from '../services/gptService.js';
|
|
17
|
+
import { executePrompt, executePromptWithSession } from '../services/claudeCliService.js';
|
|
18
|
+
import { detectProfiles } from './commands/detectProfiles.js';
|
|
19
|
+
import { generateProfileGuide } from './commands/generateProfileGuide.js';
|
|
20
|
+
import { setupGemini } from './commands/setupGemini.js';
|
|
21
|
+
import { generateAutoDocs } from './commands/autoDocs.js';
|
|
22
|
+
import inquirer from 'inquirer';
|
|
23
|
+
import ora from 'ora';
|
|
24
|
+
import { execa } from 'execa';
|
|
25
|
+
import chalk from 'chalk';
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Check code boundaries in a file
|
|
29
|
+
*/
|
|
30
|
+
async function checkCodeBoundaries(filePath, agentId) {
|
|
31
|
+
try {
|
|
32
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
33
|
+
const boundaryRegex = /\/\* AGENT_BOUNDARY:\[([^\]]+)\] START \*\/([\s\S]*?)\/\* AGENT_BOUNDARY:\[[^\]]+\] END \*\//g;
|
|
34
|
+
|
|
35
|
+
const boundaries = [];
|
|
36
|
+
let match;
|
|
37
|
+
|
|
38
|
+
while ((match = boundaryRegex.exec(content)) !== null) {
|
|
39
|
+
boundaries.push({
|
|
40
|
+
owner: match[1],
|
|
41
|
+
startIndex: match.index,
|
|
42
|
+
endIndex: match.index + match[0].length,
|
|
43
|
+
content: match[2]
|
|
44
|
+
});
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
return {
|
|
48
|
+
file: filePath,
|
|
49
|
+
hasBoundaries: boundaries.length > 0,
|
|
50
|
+
boundaries: boundaries,
|
|
51
|
+
canModify: boundaries.every(b => b.owner === agentId || b.owner === 'SHARED')
|
|
52
|
+
};
|
|
53
|
+
} catch (error) {
|
|
54
|
+
return {
|
|
55
|
+
file: filePath,
|
|
56
|
+
error: error.message,
|
|
57
|
+
canModify: true // If can't read, assume can modify (new file)
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Main run function that sets up the CLI
|
|
63
|
+
export function run() {
|
|
64
|
+
const program = new Command();
|
|
65
|
+
|
|
66
|
+
const helpGuide = `eck-snapshot (v4.0.0) - A lightweight, platform-independent CLI for creating project snapshots.
|
|
67
|
+
|
|
68
|
+
--- Getting Started: Environment Setup ---
|
|
69
|
+
|
|
70
|
+
This tool is designed to work with Large Language Models (LLMs). For the best results, you'll need:
|
|
71
|
+
1. An 'Architect' LLM (like Gemini, GPT-4, or Grok) to analyze snapshots.
|
|
72
|
+
2. A 'Coder' LLM (like Claude Code) to execute coding tasks.
|
|
73
|
+
|
|
74
|
+
--- Core Workflow: A Step-by-Step Guide ---
|
|
75
|
+
|
|
76
|
+
Step 1: Create a Full Project Snapshot
|
|
77
|
+
This is your primary command. It scans your project and packs all code into a single file.
|
|
78
|
+
|
|
79
|
+
> Usage:
|
|
80
|
+
$ eck-snapshot
|
|
81
|
+
|
|
82
|
+
-> This creates a file like 'myProject_snapshot_... .md' in the .eck/snapshots/ directory.
|
|
83
|
+
You can now pass this file to your Architect LLM for analysis.
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
Step 2: Handle Large Projects with Auto-Profiling
|
|
87
|
+
If your project is too big for the LLM's context window, \`profile-detect\` will automatically
|
|
88
|
+
slice it into logical parts (profiles) using AI.
|
|
89
|
+
|
|
90
|
+
> Usage:
|
|
91
|
+
$ eck-snapshot profile-detect
|
|
92
|
+
|
|
93
|
+
-> Output:
|
|
94
|
+
⨠Detected Profiles:
|
|
95
|
+
---------------------------
|
|
96
|
+
- cli
|
|
97
|
+
- services
|
|
98
|
+
- core
|
|
99
|
+
- templates
|
|
100
|
+
- docs
|
|
101
|
+
- config
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
Step 3: Use Profiles to Create Focused Snapshots
|
|
105
|
+
Use the --profile option to create smaller snapshots of specific project areas.
|
|
106
|
+
|
|
107
|
+
> Example 1: Combine and exclude profiles
|
|
108
|
+
$ eck-snapshot --profile "core,services,cli,-docs,-config"
|
|
109
|
+
|
|
110
|
+
-> Creates a snapshot with code from the 'core', 'services', and 'cli' profiles,
|
|
111
|
+
while excluding anything from 'docs' and 'config'.
|
|
112
|
+
|
|
113
|
+
> Example 2: Use ad-hoc glob patterns
|
|
114
|
+
$ eck-snapshot --profile "src/**/*.js,-**/*.test.js"
|
|
115
|
+
|
|
116
|
+
-> Includes all .js files in the 'src' directory and its subdirectories,
|
|
117
|
+
but excludes any file ending in '.test.js'.
|
|
118
|
+
Note: Quotes are required for complex patterns.
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
Step 4: Intelligently Prune a Snapshot
|
|
122
|
+
If a snapshot is still too large, \`prune\` uses AI to shrink it to a target size,
|
|
123
|
+
keeping only the most important files.
|
|
124
|
+
|
|
125
|
+
> Usage:
|
|
126
|
+
$ eck-snapshot prune myProject_snapshot.md --target-size 500KB
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
Step 5 (Alternative): Truncate Files by Line Count
|
|
130
|
+
A faster, non-AI method to reduce size by keeping only the top N lines of each file.
|
|
131
|
+
Useful for a high-level overview.
|
|
132
|
+
|
|
133
|
+
> Usage:
|
|
134
|
+
$ eck-snapshot --max-lines-per-file 200
|
|
135
|
+
|
|
136
|
+
--- Auxiliary Commands ---
|
|
137
|
+
|
|
138
|
+
- restore: Restore a project from a snapshot file.
|
|
139
|
+
- generate-profile-guide: Creates a guide for manual profile creation. Use this if 'profile-detect' fails on very large projects, as it allows you to use an LLM with a larger context window (e.g., a web UI).
|
|
140
|
+
- detect: Show how eckSnapshot identifies your project type.
|
|
141
|
+
- ask-gpt / ask-claude: Directly query the configured AI coder agents.
|
|
142
|
+
- setup-gemini: Auto-configure integration with gemini-cli.
|
|
143
|
+
`;
|
|
144
|
+
|
|
145
|
+
program
|
|
146
|
+
.name('eck-snapshot')
|
|
147
|
+
.description('A lightweight, platform-independent CLI for creating project snapshots.')
|
|
148
|
+
.version('4.0.0')
|
|
149
|
+
.addHelpText('before', helpGuide);
|
|
150
|
+
|
|
151
|
+
// Main snapshot command
|
|
152
|
+
program
|
|
153
|
+
.command('snapshot', { isDefault: true })
|
|
154
|
+
.description('Create a multi-agent aware snapshot of a repository')
|
|
155
|
+
.argument('[repoPath]', 'Path to the repository', process.cwd())
|
|
156
|
+
.option('-o, --output <dir>', 'Output directory')
|
|
157
|
+
.option('--no-tree', 'Exclude directory tree')
|
|
158
|
+
.option('-v, --verbose', 'Show detailed processing')
|
|
159
|
+
.option('--max-file-size <size>', 'Maximum file size', '10MB')
|
|
160
|
+
.option('--max-total-size <size>', 'Maximum total size', '100MB')
|
|
161
|
+
.option('--max-depth <number>', 'Maximum tree depth', (val) => parseInt(val), 10)
|
|
162
|
+
.option('--config <path>', 'Configuration file path')
|
|
163
|
+
.option('--include-hidden', 'Include hidden files')
|
|
164
|
+
.option('--format <type>', 'Output format: md, json', 'md')
|
|
165
|
+
.option('--no-ai-header', 'Skip AI instructions')
|
|
166
|
+
.option('-d, --dir', 'Directory mode')
|
|
167
|
+
.option('--enhanced', 'Use enhanced multi-agent headers (default: true)', true)
|
|
168
|
+
.option('--profile <name>', 'Filter files using profiles and/or ad-hoc glob patterns.')
|
|
169
|
+
.option('--agent', 'Generate a snapshot optimized for a command-line agent')
|
|
170
|
+
.option('--with-ja', 'Generate a detailed snapshot for the Junior Architect agent')
|
|
171
|
+
.option('--max-lines-per-file <number>', 'Truncate files to max N lines (e.g., 200 for compact snapshots)', (val) => parseInt(val))
|
|
172
|
+
.action(createRepoSnapshot)
|
|
173
|
+
.addHelpText('after', `
|
|
174
|
+
Profile Usage Guide:
|
|
175
|
+
Profiles allow you to curate focused snapshots by filtering files using glob patterns.
|
|
176
|
+
Define reusable profiles in .eck/profiles.json or use ad-hoc patterns directly.
|
|
177
|
+
|
|
178
|
+
Profile Structure (.eck/profiles.json):
|
|
179
|
+
{
|
|
180
|
+
"backend": {
|
|
181
|
+
"include": ["src/api/**", "src/services/**"],
|
|
182
|
+
"exclude": ["**/*.test.js"]
|
|
183
|
+
},
|
|
184
|
+
"frontend": {
|
|
185
|
+
"include": ["src/components/**", "src/pages/**"],
|
|
186
|
+
"exclude": ["**/*.spec.js"]
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
Examples:
|
|
191
|
+
--profile backend
|
|
192
|
+
Uses the 'backend' profile defined in .eck/profiles.json
|
|
193
|
+
|
|
194
|
+
--profile "backend,-**/tests/**"
|
|
195
|
+
Uses 'backend' profile, then excludes all test directories
|
|
196
|
+
|
|
197
|
+
--profile "src/**/*.js,-**/*.test.js"
|
|
198
|
+
Ad-hoc filtering: includes all JS files in src/, excludes test files
|
|
199
|
+
|
|
200
|
+
--profile "frontend,src/utils/**"
|
|
201
|
+
Combines 'frontend' profile with additional utility files
|
|
202
|
+
|
|
203
|
+
Glob Pattern Reference:
|
|
204
|
+
** Matches any number of directories
|
|
205
|
+
* Matches any characters within a directory level
|
|
206
|
+
{a,b} Matches either 'a' or 'b'
|
|
207
|
+
[0-9] Matches any digit
|
|
208
|
+
-pattern Prefix with '-' to exclude matching files
|
|
209
|
+
|
|
210
|
+
Creating Custom Profiles:
|
|
211
|
+
1. Run: eck-snapshot generate-profile-guide
|
|
212
|
+
2. Follow the generated guide in .eck/profile_generation_guide.md
|
|
213
|
+
3. Save your custom profiles to .eck/profiles.json
|
|
214
|
+
|
|
215
|
+
Alternatively, use AI detection:
|
|
216
|
+
eck-snapshot profile-detect (auto-generates profiles using AI)
|
|
217
|
+
`);
|
|
218
|
+
|
|
219
|
+
// Restore command
|
|
220
|
+
program
|
|
221
|
+
.command('restore')
|
|
222
|
+
.description('Restore files from a snapshot')
|
|
223
|
+
.argument('<snapshot_file>', 'Snapshot file path')
|
|
224
|
+
.argument('[target_directory]', 'Target directory', process.cwd())
|
|
225
|
+
.option('-f, --force', 'Skip confirmation')
|
|
226
|
+
.option('-v, --verbose', 'Show detailed progress')
|
|
227
|
+
.option('--dry-run', 'Preview without writing')
|
|
228
|
+
.option('--include <patterns...>', 'Include patterns')
|
|
229
|
+
.option('--exclude <patterns...>', 'Exclude patterns')
|
|
230
|
+
.option('--concurrency <number>', 'Concurrent operations', (val) => parseInt(val), 10)
|
|
231
|
+
.action(restoreSnapshot);
|
|
232
|
+
|
|
233
|
+
// Prune command
|
|
234
|
+
program
|
|
235
|
+
.command('prune')
|
|
236
|
+
.description('Intelligently reduce snapshot size using AI file ranking')
|
|
237
|
+
.argument('<snapshot_file>', 'Path to the snapshot file to prune')
|
|
238
|
+
.option('--target-size <size>', 'Target size (e.g., 500KB, 1MB)', '500KB')
|
|
239
|
+
.action(pruneSnapshot);
|
|
240
|
+
|
|
241
|
+
// Consilium command
|
|
242
|
+
program
|
|
243
|
+
.command('consilium')
|
|
244
|
+
.description('Generate a consilium request for complex decisions')
|
|
245
|
+
.option('--type <type>', 'Decision type', 'technical_decision')
|
|
246
|
+
.option('--title <title>', 'Decision title')
|
|
247
|
+
.option('--description <desc>', 'Detailed description')
|
|
248
|
+
.option('--complexity <num>', 'Complexity score (1-10)', (val) => parseInt(val), 7)
|
|
249
|
+
.option('--constraints <list>', 'Comma-separated constraints')
|
|
250
|
+
.option('--snapshot <file>', 'Include snapshot file')
|
|
251
|
+
.option('--agent <id>', 'Requesting agent ID')
|
|
252
|
+
.option('-o, --output <file>', 'Output file', 'consilium_request.json')
|
|
253
|
+
.action(generateConsilium);
|
|
254
|
+
|
|
255
|
+
// Check boundaries command
|
|
256
|
+
program
|
|
257
|
+
.command('check-boundaries')
|
|
258
|
+
.description('Check agent boundaries in a file')
|
|
259
|
+
.argument('<file>', 'File to check')
|
|
260
|
+
.option('--agent <id>', 'Your agent ID')
|
|
261
|
+
.action(async (file, options) => {
|
|
262
|
+
const result = await checkCodeBoundaries(file, options.agent || 'UNKNOWN');
|
|
263
|
+
console.log(JSON.stringify(result, null, 2));
|
|
264
|
+
});
|
|
265
|
+
|
|
266
|
+
program
|
|
267
|
+
.command('ask-gpt')
|
|
268
|
+
.description('Delegate tasks to OpenAI Codex agent with automatic authentication')
|
|
269
|
+
.argument('<payload>', 'JSON payload string (e.g. \'{"objective": "Calculate 5+2"}\')')
|
|
270
|
+
.option('-v, --verbose', 'Enable verbose logging and detailed execution output')
|
|
271
|
+
.option('--model <name>', 'Model to use (default: gpt-5-codex)', 'gpt-5-codex')
|
|
272
|
+
.option('--reasoning <level>', 'Reasoning level: low, medium, high (default: high)', 'high')
|
|
273
|
+
.action((payloadArg, cmd) => askGpt(payloadArg, cmd))
|
|
274
|
+
.addHelpText('after', `
|
|
275
|
+
Examples:
|
|
276
|
+
Ask a simple question:
|
|
277
|
+
eck-snapshot ask-gpt '{"objective": "What is 5+2?"}'
|
|
278
|
+
|
|
279
|
+
Request code changes with context:
|
|
280
|
+
eck-snapshot ask-gpt '{
|
|
281
|
+
"target_agent": "local_dev",
|
|
282
|
+
"task_id": "feature-123",
|
|
283
|
+
"payload": {
|
|
284
|
+
"objective": "Add error handling to login function",
|
|
285
|
+
"files_to_modify": [{"path": "src/auth.js", "action": "modify"}]
|
|
286
|
+
},
|
|
287
|
+
"post_execution_steps": {
|
|
288
|
+
"journal_entry": {
|
|
289
|
+
"type": "feat",
|
|
290
|
+
"scope": "auth",
|
|
291
|
+
"summary": "Add error handling"
|
|
292
|
+
}
|
|
293
|
+
}
|
|
294
|
+
}' --verbose
|
|
295
|
+
|
|
296
|
+
Prerequisites:
|
|
297
|
+
1. Install Codex CLI: npm install -g @openai/codex
|
|
298
|
+
2. Login: codex login (requires ChatGPT Plus/Pro subscription)
|
|
299
|
+
3. The command automatically loads .eck project context
|
|
300
|
+
|
|
301
|
+
Authentication:
|
|
302
|
+
- Uses your existing 'codex login' credentials
|
|
303
|
+
- Auto-retries on authentication errors
|
|
304
|
+
- Supports ChatGPT Plus/Pro subscriptions
|
|
305
|
+
`);
|
|
306
|
+
|
|
307
|
+
// Project detection command
|
|
308
|
+
program
|
|
309
|
+
.command('detect')
|
|
310
|
+
.description('Detect and display project type and configuration')
|
|
311
|
+
.argument('[projectPath]', 'Path to the project', process.cwd())
|
|
312
|
+
.option('-v, --verbose', 'Show detailed detection results')
|
|
313
|
+
.action(detectProject);
|
|
314
|
+
|
|
315
|
+
// Android parsing test command
|
|
316
|
+
program
|
|
317
|
+
.command('test-android')
|
|
318
|
+
.description('Test Android file parsing capabilities')
|
|
319
|
+
.argument('<filePath>', 'Path to Android source file (.kt or .java)')
|
|
320
|
+
.option('--show-content', 'Show content preview of parsed segments')
|
|
321
|
+
.action(testFileParsing);
|
|
322
|
+
|
|
323
|
+
// Token training command
|
|
324
|
+
program
|
|
325
|
+
.command('train-tokens')
|
|
326
|
+
.description('Train token estimation with actual results')
|
|
327
|
+
.argument('<projectType>', 'Project type (android, nodejs, python, etc.)')
|
|
328
|
+
.argument('<fileSizeBytes>', 'File size in bytes')
|
|
329
|
+
.argument('<estimatedTokens>', 'Estimated token count')
|
|
330
|
+
.argument('<actualTokens>', 'Actual token count from LLM')
|
|
331
|
+
.action(trainTokens);
|
|
332
|
+
|
|
333
|
+
// Token statistics command
|
|
334
|
+
program
|
|
335
|
+
.command('token-stats')
|
|
336
|
+
.description('Show token estimation statistics and accuracy')
|
|
337
|
+
.action(showTokenStats);
|
|
338
|
+
|
|
339
|
+
// Profile detection command
|
|
340
|
+
program
|
|
341
|
+
.command('profile-detect')
|
|
342
|
+
.description('Use AI to scan the directory tree and auto-generate local context profiles (saves to .eck/profiles.json)')
|
|
343
|
+
.argument('[repoPath]', 'Path to the repository', process.cwd())
|
|
344
|
+
.action(detectProfiles);
|
|
345
|
+
|
|
346
|
+
program
|
|
347
|
+
.command('generate-profile-guide')
|
|
348
|
+
.description('Generate a markdown guide with a prompt and directory tree for manual profile creation')
|
|
349
|
+
.argument('[repoPath]', 'Path to the repository', process.cwd())
|
|
350
|
+
.option('--config <path>', 'Configuration file path')
|
|
351
|
+
.action((repoPath, options) => generateProfileGuide(repoPath, options));
|
|
352
|
+
|
|
353
|
+
// Ask Claude command
|
|
354
|
+
program
|
|
355
|
+
.command('ask-claude')
|
|
356
|
+
.description('Execute a prompt using claude-code CLI and return JSON response')
|
|
357
|
+
.argument('<prompt>', 'Prompt to send to Claude')
|
|
358
|
+
.option('-c, --continue', 'Continue the most recent conversation')
|
|
359
|
+
.action(async (prompt, options) => {
|
|
360
|
+
try {
|
|
361
|
+
const result = await executePrompt(prompt, options.continue);
|
|
362
|
+
console.log(JSON.stringify(result, null, 2));
|
|
363
|
+
} catch (error) {
|
|
364
|
+
console.warn(`ā ļø Claude failed: ${error.message}`);
|
|
365
|
+
console.log('š Failing over to GPT for task...');
|
|
366
|
+
try {
|
|
367
|
+
const payload = (typeof prompt === 'string' && prompt.startsWith('{')) ? prompt : JSON.stringify({ objective: prompt });
|
|
368
|
+
const gptResult = await askGptService(payload, { verbose: false });
|
|
369
|
+
console.log(JSON.stringify(gptResult, null, 2));
|
|
370
|
+
} catch (gptError) {
|
|
371
|
+
console.error('Failed to execute prompt with both Claude and GPT:', gptError.message);
|
|
372
|
+
process.exit(1);
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
});
|
|
376
|
+
|
|
377
|
+
// Ask Claude with specific session
|
|
378
|
+
program
|
|
379
|
+
.command('ask-claude-session')
|
|
380
|
+
.description('Execute a prompt using specific session ID')
|
|
381
|
+
.argument('<sessionId>', 'Session ID to resume')
|
|
382
|
+
.argument('<prompt>', 'Prompt to send to Claude')
|
|
383
|
+
.action(async (sessionId, prompt) => {
|
|
384
|
+
try {
|
|
385
|
+
// Directly use the provided session ID
|
|
386
|
+
const result = await executePromptWithSession(prompt, sessionId);
|
|
387
|
+
console.log(JSON.stringify(result, null, 2));
|
|
388
|
+
} catch (error) {
|
|
389
|
+
console.error('Failed to execute prompt:', error.message);
|
|
390
|
+
process.exit(1);
|
|
391
|
+
}
|
|
392
|
+
});
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
program
|
|
398
|
+
.command('generate-ai-prompt')
|
|
399
|
+
.description('Generate a specific AI prompt from a template.')
|
|
400
|
+
.option('--role <role>', 'The role for which to generate a prompt', 'architect')
|
|
401
|
+
.action(async (options) => {
|
|
402
|
+
try {
|
|
403
|
+
const templatePath = path.join(__dirname, '..', 'templates', `${options.role}-prompt.template.md`);
|
|
404
|
+
const template = await fs.readFile(templatePath, 'utf-8');
|
|
405
|
+
// In the future, we can inject dynamic data here from setup.json
|
|
406
|
+
console.log(template);
|
|
407
|
+
} catch (error) {
|
|
408
|
+
console.error(`Failed to generate prompt for role '${options.role}':`, error.message);
|
|
409
|
+
process.exit(1);
|
|
410
|
+
}
|
|
411
|
+
});
|
|
412
|
+
|
|
413
|
+
// Setup Gemini command
|
|
414
|
+
program
|
|
415
|
+
.command('setup-gemini')
|
|
416
|
+
.description('Generate claude.toml configuration for gemini-cli integration with dynamic paths')
|
|
417
|
+
.option('-v, --verbose', 'Show detailed output and error information')
|
|
418
|
+
.action(setupGemini);
|
|
419
|
+
|
|
420
|
+
// Auto-docs command
|
|
421
|
+
program
|
|
422
|
+
.command('docs-auto')
|
|
423
|
+
.description('Auto-generate documentation from gemini-extension.json files')
|
|
424
|
+
.action(generateAutoDocs);
|
|
425
|
+
|
|
426
|
+
program.parse(process.argv);
|
|
427
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { ask } from '../../services/gptService.js';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* CLI entry point for ask-gpt command.
|
|
5
|
+
* @param {string} payload - JSON payload string.
|
|
6
|
+
* @param {{ verbose?: boolean, model?: string, reasoning?: string }} options - CLI options.
|
|
7
|
+
*/
|
|
8
|
+
export async function askGpt(payload, options = {}) {
|
|
9
|
+
const verbose = Boolean(options.verbose);
|
|
10
|
+
const model = options.model || 'gpt-5-codex';
|
|
11
|
+
const reasoning = options.reasoning || 'high';
|
|
12
|
+
|
|
13
|
+
if (!payload) {
|
|
14
|
+
console.error('ask-gpt requires a JSON payload argument.');
|
|
15
|
+
process.exitCode = 1;
|
|
16
|
+
return;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
try {
|
|
20
|
+
const result = await ask(payload, { verbose, model, reasoning });
|
|
21
|
+
console.log(JSON.stringify(result, null, 2));
|
|
22
|
+
} catch (error) {
|
|
23
|
+
console.error(error.message);
|
|
24
|
+
if (verbose && error?.stack) {
|
|
25
|
+
console.error(error.stack);
|
|
26
|
+
}
|
|
27
|
+
process.exitCode = 1;
|
|
28
|
+
}
|
|
29
|
+
}
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import fs from 'fs/promises';
|
|
2
|
+
import path from 'path';
|
|
3
|
+
import { fileURLToPath } from 'url';
|
|
4
|
+
|
|
5
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
6
|
+
const __dirname = path.dirname(__filename);
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Auto-generate documentation from gemini-extension.json files
|
|
10
|
+
*/
|
|
11
|
+
export async function generateAutoDocs() {
|
|
12
|
+
try {
|
|
13
|
+
const projectRoot = path.resolve(__dirname, '../../../');
|
|
14
|
+
const extensionsDir = path.join(projectRoot, 'packages/cli/src/commands/extensions');
|
|
15
|
+
const referenceFile = path.join(projectRoot, 'COMMANDS_REFERENCE.md');
|
|
16
|
+
|
|
17
|
+
// Check if extensions directory exists
|
|
18
|
+
try {
|
|
19
|
+
await fs.access(extensionsDir);
|
|
20
|
+
} catch (error) {
|
|
21
|
+
console.log(`Extensions directory not found at: ${extensionsDir}`);
|
|
22
|
+
console.log('Creating example structure...');
|
|
23
|
+
|
|
24
|
+
// Create the directory structure
|
|
25
|
+
await fs.mkdir(extensionsDir, { recursive: true });
|
|
26
|
+
|
|
27
|
+
// Create a sample gemini-extension.json file for demonstration
|
|
28
|
+
const sampleExtension = {
|
|
29
|
+
name: "sample-extension",
|
|
30
|
+
description: "Sample Gemini extension for demonstration",
|
|
31
|
+
commands: [
|
|
32
|
+
{
|
|
33
|
+
name: "sample-command",
|
|
34
|
+
description: "A sample command for testing auto-docs",
|
|
35
|
+
usage: "sample-command [options]",
|
|
36
|
+
examples: ["sample-command --help"]
|
|
37
|
+
}
|
|
38
|
+
],
|
|
39
|
+
tools: [
|
|
40
|
+
{
|
|
41
|
+
name: "sample-tool",
|
|
42
|
+
description: "A sample tool for testing auto-docs",
|
|
43
|
+
usage: "Use this tool for sample operations"
|
|
44
|
+
}
|
|
45
|
+
]
|
|
46
|
+
};
|
|
47
|
+
|
|
48
|
+
await fs.writeFile(
|
|
49
|
+
path.join(extensionsDir, 'sample-extension.json'),
|
|
50
|
+
JSON.stringify(sampleExtension, null, 2)
|
|
51
|
+
);
|
|
52
|
+
|
|
53
|
+
console.log('Created sample extension at:', path.join(extensionsDir, 'sample-extension.json'));
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Read all JSON files in the extensions directory
|
|
57
|
+
const files = await fs.readdir(extensionsDir);
|
|
58
|
+
const jsonFiles = files.filter(file => file.endsWith('.json'));
|
|
59
|
+
|
|
60
|
+
if (jsonFiles.length === 0) {
|
|
61
|
+
console.log('No JSON files found in extensions directory');
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
console.log(`Found ${jsonFiles.length} extension file(s): ${jsonFiles.join(', ')}`);
|
|
66
|
+
|
|
67
|
+
// Parse each JSON file and extract command/tool information
|
|
68
|
+
const extensions = [];
|
|
69
|
+
|
|
70
|
+
for (const file of jsonFiles) {
|
|
71
|
+
try {
|
|
72
|
+
const filePath = path.join(extensionsDir, file);
|
|
73
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
74
|
+
const extension = JSON.parse(content);
|
|
75
|
+
extensions.push({ filename: file, ...extension });
|
|
76
|
+
console.log(`Parsed extension: ${extension.name || file}`);
|
|
77
|
+
} catch (error) {
|
|
78
|
+
console.warn(`Failed to parse ${file}:`, error.message);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
if (extensions.length === 0) {
|
|
83
|
+
console.log('No valid extension files found');
|
|
84
|
+
return;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// Generate markdown content
|
|
88
|
+
let markdownContent = '\n## Auto-Generated Gemini Extensions\n\n';
|
|
89
|
+
markdownContent += '*This section is automatically generated. Run `npm run docs:auto` to update.*\n\n';
|
|
90
|
+
|
|
91
|
+
for (const extension of extensions) {
|
|
92
|
+
markdownContent += `### ${extension.name || extension.filename}\n\n`;
|
|
93
|
+
|
|
94
|
+
if (extension.description) {
|
|
95
|
+
markdownContent += `${extension.description}\n\n`;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
// Add commands section
|
|
99
|
+
if (extension.commands && extension.commands.length > 0) {
|
|
100
|
+
markdownContent += '**Commands:**\n\n';
|
|
101
|
+
for (const command of extension.commands) {
|
|
102
|
+
markdownContent += `- **${command.name}**: ${command.description || 'No description'}\n`;
|
|
103
|
+
if (command.usage) {
|
|
104
|
+
markdownContent += ` - Usage: \`${command.usage}\`\n`;
|
|
105
|
+
}
|
|
106
|
+
if (command.examples && command.examples.length > 0) {
|
|
107
|
+
markdownContent += ` - Examples: ${command.examples.map(ex => `\`${ex}\``).join(', ')}\n`;
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
markdownContent += '\n';
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Add tools section
|
|
114
|
+
if (extension.tools && extension.tools.length > 0) {
|
|
115
|
+
markdownContent += '**Tools:**\n\n';
|
|
116
|
+
for (const tool of extension.tools) {
|
|
117
|
+
markdownContent += `- **${tool.name}**: ${tool.description || 'No description'}\n`;
|
|
118
|
+
if (tool.usage) {
|
|
119
|
+
markdownContent += ` - Usage: ${tool.usage}\n`;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
markdownContent += '\n';
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// Read the current COMMANDS_REFERENCE.md
|
|
127
|
+
let currentContent;
|
|
128
|
+
try {
|
|
129
|
+
currentContent = await fs.readFile(referenceFile, 'utf-8');
|
|
130
|
+
} catch (error) {
|
|
131
|
+
console.warn('COMMANDS_REFERENCE.md not found, creating new file');
|
|
132
|
+
currentContent = '# Commands Reference\n\n';
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// Remove existing auto-generated section if it exists
|
|
136
|
+
const autoGenRegex = /\n## Auto-Generated Gemini Extensions[\s\S]*?(?=\n## |\n# |$)/;
|
|
137
|
+
const updatedContent = currentContent.replace(autoGenRegex, '') + markdownContent;
|
|
138
|
+
|
|
139
|
+
// Write the updated content back to the file
|
|
140
|
+
await fs.writeFile(referenceFile, updatedContent);
|
|
141
|
+
|
|
142
|
+
console.log('\nā
Auto-documentation generated successfully!');
|
|
143
|
+
console.log(`š Updated: ${referenceFile}`);
|
|
144
|
+
console.log(`š¦ Processed ${extensions.length} extension(s)`);
|
|
145
|
+
|
|
146
|
+
} catch (error) {
|
|
147
|
+
console.error('Failed to generate auto-docs:', error.message);
|
|
148
|
+
process.exit(1);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import fs from 'fs/promises';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Generate a consilium request for complex decisions
|
|
5
|
+
*/
|
|
6
|
+
async function generateConsiliumRequest(task, complexity, agentId) {
|
|
7
|
+
const request = {
|
|
8
|
+
consilium_request: {
|
|
9
|
+
request_id: `cons-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
|
10
|
+
timestamp: new Date().toISOString(),
|
|
11
|
+
requesting_agent: agentId,
|
|
12
|
+
complexity_score: complexity,
|
|
13
|
+
|
|
14
|
+
task: {
|
|
15
|
+
type: task.type || "technical_decision",
|
|
16
|
+
title: task.title,
|
|
17
|
+
description: task.description,
|
|
18
|
+
current_implementation: task.currentCode || "N/A",
|
|
19
|
+
proposed_solution: task.proposedSolution || "To be determined",
|
|
20
|
+
constraints: task.constraints || [],
|
|
21
|
+
success_criteria: task.criteria || []
|
|
22
|
+
},
|
|
23
|
+
|
|
24
|
+
consilium_instructions: `
|
|
25
|
+
You are a technical expert participating in a consilium decision.
|
|
26
|
+
|
|
27
|
+
RESPOND WITH:
|
|
28
|
+
1. Your expert opinion on the best approach
|
|
29
|
+
2. Specific technical recommendations
|
|
30
|
+
3. Potential risks and mitigation strategies
|
|
31
|
+
4. Your confidence level (0-100%)
|
|
32
|
+
|
|
33
|
+
FORMAT YOUR RESPONSE AS JSON:
|
|
34
|
+
{
|
|
35
|
+
"expert": "[Your Model Name]",
|
|
36
|
+
"role": "[Your assigned role]",
|
|
37
|
+
"recommendation": {
|
|
38
|
+
"approach": "Detailed technical solution",
|
|
39
|
+
"implementation_steps": ["step1", "step2"],
|
|
40
|
+
"key_benefits": ["benefit1", "benefit2"],
|
|
41
|
+
"risks": ["risk1", "risk2"],
|
|
42
|
+
"mitigation": ["strategy1", "strategy2"]
|
|
43
|
+
},
|
|
44
|
+
"alternatives_considered": ["alt1", "alt2"],
|
|
45
|
+
"confidence": 85,
|
|
46
|
+
"critical_warnings": []
|
|
47
|
+
}
|
|
48
|
+
`,
|
|
49
|
+
|
|
50
|
+
aggregation_rules: {
|
|
51
|
+
minimum_confidence_required: 60,
|
|
52
|
+
consensus_threshold: 0.66,
|
|
53
|
+
veto_roles: ["security_auditor"],
|
|
54
|
+
conflict_resolution: "weighted_average_with_discussion"
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
};
|
|
58
|
+
|
|
59
|
+
return request;
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
export async function generateConsilium(options) {
|
|
63
|
+
console.log('š§ Generating Consilium Request...');
|
|
64
|
+
|
|
65
|
+
const task = {
|
|
66
|
+
type: options.type || 'technical_decision',
|
|
67
|
+
title: options.title || 'Technical Decision Required',
|
|
68
|
+
description: options.description || 'Please provide a description',
|
|
69
|
+
constraints: options.constraints ? options.constraints.split(',') : [],
|
|
70
|
+
currentCode: options.snapshot || null
|
|
71
|
+
};
|
|
72
|
+
|
|
73
|
+
const complexity = options.complexity || 7;
|
|
74
|
+
const agentId = options.agent || 'AGENT_ORCHESTRATOR';
|
|
75
|
+
|
|
76
|
+
const request = await generateConsiliumRequest(task, complexity, agentId);
|
|
77
|
+
|
|
78
|
+
const outputFile = options.output || 'consilium_request.json';
|
|
79
|
+
await fs.writeFile(outputFile, JSON.stringify(request, null, 2));
|
|
80
|
+
|
|
81
|
+
console.log(`ā
Consilium request saved to: ${outputFile}`);
|
|
82
|
+
console.log('\nš Next steps:');
|
|
83
|
+
console.log('1. Send this request to multiple LLM experts');
|
|
84
|
+
console.log('2. Collect their responses');
|
|
85
|
+
console.log('3. Run: eck-snapshot process-consilium <responses.json>');
|
|
86
|
+
}
|