scai 0.1.103 → 0.1.105
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/CHANGELOG.md +12 -1
- package/dist/agent/agentManager.js +28 -0
- package/dist/agent/workflowManager.js +89 -0
- package/dist/commands/SwitchCmd.js +1 -1
- package/dist/config.js +1 -1
- package/dist/context.js +27 -2
- package/dist/index.js +39 -19
- package/dist/pipeline/modules/cleanGeneratedTestsModule.js +22 -0
- package/dist/pipeline/modules/commentModule.js +4 -1
- package/dist/pipeline/modules/generateTestsModule.js +39 -18
- package/dist/pipeline/modules/preserveCodeModule.js +5 -1
- package/dist/pipeline/registry/moduleRegistry.js +5 -1
- package/dist/utils/checkModel.js +30 -0
- package/dist/utils/contentUtils.js +49 -0
- package/dist/utils/repoKey.js +1 -1
- package/package.json +1 -1
- package/dist/agentManager.js +0 -47
- package/dist/utils/normalizePath.js +0 -23
package/dist/CHANGELOG.md
CHANGED
|
@@ -146,4 +146,15 @@ Type handling with the module pipeline
|
|
|
146
146
|
|
|
147
147
|
• Fixed bug where entire block was returned as a single line for multi-line comments
|
|
148
148
|
• Add multi-line comment handling with ~90% accuracy
|
|
149
|
-
• Update CLI config file to use codellama:13b model and 4096 context length
|
|
149
|
+
• Update CLI config file to use codellama:13b model and 4096 context length
|
|
150
|
+
|
|
151
|
+
## 2025-08-30
|
|
152
|
+
|
|
153
|
+
* Add new workflow management functionality to handle file writes.
|
|
154
|
+
|
|
155
|
+
## 2025-08-31
|
|
156
|
+
|
|
157
|
+
• Introduce Agent class with minimal implementation
|
|
158
|
+
• Improve test-module's filepath handling and variable naming
|
|
159
|
+
• Rename workflowManager.ts to agent/workflowManager.ts
|
|
160
|
+
• Improved formatting of agent run summary output
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
// src/agent/agentManager.ts
|
|
2
|
+
import chalk from "chalk";
|
|
3
|
+
import fs from "fs/promises";
|
|
4
|
+
import { getModuleByName } from "../pipeline/registry/moduleRegistry.js";
|
|
5
|
+
import { handleAgentRun } from "./workflowManager.js";
|
|
6
|
+
// Minimal agent: just collects modules and delegates to handleAgentRun
|
|
7
|
+
export class Agent {
|
|
8
|
+
constructor(goals) {
|
|
9
|
+
// Trim goal names to avoid whitespace issues
|
|
10
|
+
this.goals = goals.map(g => g.trim());
|
|
11
|
+
}
|
|
12
|
+
async execute(filepath) {
|
|
13
|
+
console.log(chalk.cyan(`🤖 Agent starting on: ${filepath}`));
|
|
14
|
+
// Map goal names → module objects
|
|
15
|
+
const modules = this.goals.map(goal => {
|
|
16
|
+
const mod = getModuleByName(goal);
|
|
17
|
+
if (!mod)
|
|
18
|
+
throw new Error(`❌ Unknown module: ${goal}`);
|
|
19
|
+
return mod;
|
|
20
|
+
});
|
|
21
|
+
console.log(chalk.green("📋 Modules to run:"), modules.map(m => m.name).join(" → "));
|
|
22
|
+
// Read file content
|
|
23
|
+
const content = await fs.readFile(filepath, "utf-8");
|
|
24
|
+
// Delegate everything to handleAgentRun (like CLI commands do)
|
|
25
|
+
await handleAgentRun(filepath, modules);
|
|
26
|
+
console.log(chalk.green("✅ Agent finished!"));
|
|
27
|
+
}
|
|
28
|
+
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
// agentManager.ts
|
|
2
|
+
import fs from 'fs/promises';
|
|
3
|
+
import chalk from 'chalk';
|
|
4
|
+
import { runModulePipeline } from '../pipeline/runModulePipeline.js';
|
|
5
|
+
import { countTokens, splitCodeIntoChunks } from '../utils/splitCodeIntoChunk.js';
|
|
6
|
+
import { normalizePath } from '../utils/contentUtils.js';
|
|
7
|
+
export async function handleAgentRun(filepath, modules) {
|
|
8
|
+
try {
|
|
9
|
+
filepath = normalizePath(filepath);
|
|
10
|
+
let fileContent = await fs.readFile(filepath, 'utf-8');
|
|
11
|
+
// Immutable baseline for this file (stays until file changes)
|
|
12
|
+
const maxTokens = 1500;
|
|
13
|
+
const baseChunks = splitCodeIntoChunks(fileContent, maxTokens);
|
|
14
|
+
// Working chunks that flow through modules; stays index-aligned with baseChunks
|
|
15
|
+
let workingChunks = [...baseChunks];
|
|
16
|
+
for (const mod of modules) {
|
|
17
|
+
console.log(chalk.cyan(`\n⚙️ Running module: ${mod.name}`));
|
|
18
|
+
console.log(chalk.blue(`🧮 Tokens:`), chalk.yellow(countTokens(fileContent).toString()));
|
|
19
|
+
console.log(chalk.magenta(`📦 Chunks: ${workingChunks.length}`));
|
|
20
|
+
const processed = [];
|
|
21
|
+
let mode;
|
|
22
|
+
let newFilepath;
|
|
23
|
+
for (let i = 0; i < workingChunks.length; i++) {
|
|
24
|
+
const input = {
|
|
25
|
+
originalContent: baseChunks[i], // immutable baseline for this file
|
|
26
|
+
content: workingChunks[i], // current state for this slice
|
|
27
|
+
filepath,
|
|
28
|
+
chunkIndex: i,
|
|
29
|
+
chunkCount: workingChunks.length,
|
|
30
|
+
};
|
|
31
|
+
const out = await runModulePipeline([mod], input);
|
|
32
|
+
if (!out.content?.trim()) {
|
|
33
|
+
throw new Error(`⚠️ Empty result on chunk ${i + 1}`);
|
|
34
|
+
}
|
|
35
|
+
processed.push(out.content);
|
|
36
|
+
// Capture mode/path (should be consistent across chunks)
|
|
37
|
+
if (out.mode)
|
|
38
|
+
mode = out.mode;
|
|
39
|
+
if (out.newFilepath)
|
|
40
|
+
newFilepath = out.newFilepath;
|
|
41
|
+
}
|
|
42
|
+
const finalOutput = processed.join('\n\n');
|
|
43
|
+
// Apply output mode
|
|
44
|
+
switch (mode ?? 'overwrite') {
|
|
45
|
+
case 'overwrite':
|
|
46
|
+
await fs.writeFile(filepath, finalOutput, 'utf-8');
|
|
47
|
+
console.log(chalk.green(`✅ Overwritten: ${filepath}`));
|
|
48
|
+
// keep baseChunks (baseline stays the same), keep alignment: do NOT re-chunk
|
|
49
|
+
workingChunks = processed;
|
|
50
|
+
fileContent = finalOutput;
|
|
51
|
+
break;
|
|
52
|
+
case 'append':
|
|
53
|
+
await fs.appendFile(filepath, finalOutput, 'utf-8');
|
|
54
|
+
console.log(chalk.green(`✅ Appended: ${filepath}`));
|
|
55
|
+
// appended file content diverges; keep alignment by using processed as new working
|
|
56
|
+
workingChunks = processed;
|
|
57
|
+
fileContent += finalOutput;
|
|
58
|
+
break;
|
|
59
|
+
case 'newFile':
|
|
60
|
+
if (!newFilepath)
|
|
61
|
+
throw new Error(`newFile mode requires newFilepath`);
|
|
62
|
+
await fs.writeFile(newFilepath, finalOutput, 'utf-8');
|
|
63
|
+
console.log(chalk.green(`✅ New file: ${newFilepath}`));
|
|
64
|
+
// File context changes → reset baseline and working to the new file
|
|
65
|
+
filepath = newFilepath;
|
|
66
|
+
fileContent = finalOutput;
|
|
67
|
+
const reset = splitCodeIntoChunks(fileContent, maxTokens);
|
|
68
|
+
// new baseline for the new file (e.g., generated tests before cleaning)
|
|
69
|
+
for (let i = 0; i < reset.length; i++)
|
|
70
|
+
; // (no-op; just clarity)
|
|
71
|
+
// Replace both arrays to keep them in sync for subsequent modules
|
|
72
|
+
workingChunks = reset;
|
|
73
|
+
// Important: also reset baseChunks to this new file’s content so the next module
|
|
74
|
+
// (e.g., cleaner) sees the *generated tests* as its originalContent baseline.
|
|
75
|
+
baseChunks.length = 0;
|
|
76
|
+
baseChunks.push(...reset);
|
|
77
|
+
break;
|
|
78
|
+
default:
|
|
79
|
+
console.log(chalk.yellow(`⚠️ Unknown mode; skipping write`));
|
|
80
|
+
// still move pipeline forward with processed
|
|
81
|
+
workingChunks = processed;
|
|
82
|
+
fileContent = finalOutput;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
catch (err) {
|
|
87
|
+
console.error(chalk.red('❌ Error in agent run:'), err instanceof Error ? err.message : err);
|
|
88
|
+
}
|
|
89
|
+
}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
// File: src/commands/switch.ts
|
|
2
2
|
import readline from 'readline';
|
|
3
3
|
import { Config, writeConfig } from '../config.js';
|
|
4
|
-
import { getRepoKeyForPath } from '../utils/
|
|
4
|
+
import { getRepoKeyForPath } from '../utils/contentUtils.js';
|
|
5
5
|
import chalk from 'chalk';
|
|
6
6
|
export function runSwitchCommand(inputPathOrKey) {
|
|
7
7
|
const config = Config.getRaw();
|
package/dist/config.js
CHANGED
|
@@ -2,7 +2,7 @@ import fs from 'fs';
|
|
|
2
2
|
import path from 'path';
|
|
3
3
|
import { CONFIG_PATH, SCAI_HOME, SCAI_REPOS } from './constants.js';
|
|
4
4
|
import { getDbForRepo } from './db/client.js';
|
|
5
|
-
import { normalizePath } from './utils/
|
|
5
|
+
import { normalizePath } from './utils/contentUtils.js';
|
|
6
6
|
import chalk from 'chalk';
|
|
7
7
|
import { getHashedRepoKey } from './utils/repoKey.js';
|
|
8
8
|
const defaultConfig = {
|
package/dist/context.js
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
// context.ts
|
|
2
|
-
import { readConfig, writeConfig } from "./config.js";
|
|
3
|
-
import { normalizePath } from "./utils/
|
|
2
|
+
import { readConfig, writeConfig, Config } from "./config.js";
|
|
3
|
+
import { normalizePath } from "./utils/contentUtils.js";
|
|
4
4
|
import { getHashedRepoKey } from "./utils/repoKey.js";
|
|
5
5
|
import { getDbForRepo, getDbPathForRepo } from "./db/client.js";
|
|
6
6
|
import fs from "fs";
|
|
7
7
|
import chalk from "chalk";
|
|
8
|
+
import { generate } from "./lib/generate.js"; // 👈 use your existing generate wrapper
|
|
9
|
+
import { startModelProcess } from "./utils/checkModel.js";
|
|
8
10
|
export async function updateContext() {
|
|
9
11
|
const cwd = normalizePath(process.cwd());
|
|
10
12
|
const cfg = readConfig();
|
|
@@ -54,6 +56,17 @@ export async function updateContext() {
|
|
|
54
56
|
else if (isNewRepo || activeRepoChanged) {
|
|
55
57
|
console.log(chalk.green("✅ Database present"));
|
|
56
58
|
}
|
|
59
|
+
// ✅ NEW: Ensure model is available
|
|
60
|
+
if (ok) {
|
|
61
|
+
const modelReady = await ensureModelReady();
|
|
62
|
+
if (modelReady) {
|
|
63
|
+
console.log(chalk.green("✅ Model ready"));
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
console.log(chalk.red("❌ Model not available"));
|
|
67
|
+
ok = false;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
57
70
|
// Final context status
|
|
58
71
|
if (ok) {
|
|
59
72
|
console.log(chalk.bold.green("\n✅ Context OK\n"));
|
|
@@ -63,3 +76,15 @@ export async function updateContext() {
|
|
|
63
76
|
}
|
|
64
77
|
return ok;
|
|
65
78
|
}
|
|
79
|
+
async function ensureModelReady() {
|
|
80
|
+
try {
|
|
81
|
+
// simple "ping" prompt that costs almost nothing
|
|
82
|
+
const res = await generate({ content: "ping" }, Config.getModel());
|
|
83
|
+
return Boolean(res?.content);
|
|
84
|
+
}
|
|
85
|
+
catch {
|
|
86
|
+
console.log(chalk.yellow("⚡ Model not responding. Attempting to start..."));
|
|
87
|
+
await startModelProcess();
|
|
88
|
+
return false;
|
|
89
|
+
}
|
|
90
|
+
}
|
package/dist/index.js
CHANGED
|
@@ -8,7 +8,6 @@ import { suggestCommitMessage } from "./commands/CommitSuggesterCmd.js";
|
|
|
8
8
|
import { bootstrap } from './modelSetup.js';
|
|
9
9
|
import { summarizeFile } from "./commands/SummaryCmd.js";
|
|
10
10
|
import { handleStandaloneChangelogUpdate } from './commands/ChangeLogUpdateCmd.js';
|
|
11
|
-
import { runModulePipelineFromCLI } from './commands/ModulePipelineCmd.js';
|
|
12
11
|
import { runIndexCommand } from './commands/IndexCmd.js';
|
|
13
12
|
import { resetDatabase } from './commands/ResetDbCmd.js';
|
|
14
13
|
import { runFindCommand } from './commands/FindCmd.js';
|
|
@@ -26,13 +25,16 @@ import { runInteractiveSwitch } from "./commands/SwitchCmd.js";
|
|
|
26
25
|
import { execSync } from "child_process";
|
|
27
26
|
import { fileURLToPath } from "url";
|
|
28
27
|
import { dirname, resolve } from "path";
|
|
29
|
-
import { handleAgentRun } from './
|
|
28
|
+
import { handleAgentRun } from './agent/workflowManager.js';
|
|
30
29
|
import { addCommentsModule } from './pipeline/modules/commentModule.js';
|
|
31
30
|
import { generateTestsModule } from './pipeline/modules/generateTestsModule.js';
|
|
32
31
|
import { preserveCodeModule } from './pipeline/modules/preserveCodeModule.js';
|
|
33
32
|
import { runInteractiveDelete } from './commands/DeleteIndex.js';
|
|
34
33
|
import { resolveTargetsToFiles } from './utils/resolveTargetsToFiles.js';
|
|
35
34
|
import { updateContext } from './context.js';
|
|
35
|
+
import { cleanGeneratedTestsModule } from './pipeline/modules/cleanGeneratedTestsModule.js';
|
|
36
|
+
import { Agent } from './agent/agentManager.js';
|
|
37
|
+
import { builtInModules } from './pipeline/registry/moduleRegistry.js';
|
|
36
38
|
// 🎛️ CLI Setup
|
|
37
39
|
const cmd = new Command('scai')
|
|
38
40
|
.version(version)
|
|
@@ -46,6 +48,34 @@ cmd
|
|
|
46
48
|
await bootstrap();
|
|
47
49
|
console.log('✅ Model initialization completed!');
|
|
48
50
|
});
|
|
51
|
+
// 🔧 Group: Agent-related commands
|
|
52
|
+
const agent = cmd
|
|
53
|
+
.command('agent')
|
|
54
|
+
.description(`Run an agent workflow. Available tools:\n` +
|
|
55
|
+
Object.keys(builtInModules).map(m => ` - ${m}`).join('\n') +
|
|
56
|
+
`\n\nExample usage:\n` +
|
|
57
|
+
` $ scai agent run summary cleanup tests\n` +
|
|
58
|
+
` This will run the agent with the goals: summary → cleanup → tests\n`);
|
|
59
|
+
// Run workflow subcommand
|
|
60
|
+
const runCmd = agent
|
|
61
|
+
.command('run <goals...>')
|
|
62
|
+
.description('Run an agent workflow with a list of goals')
|
|
63
|
+
.option('-f, --file <filepath>', 'File to process', 'example.txt')
|
|
64
|
+
.action(async (cmdGoals, cmd) => {
|
|
65
|
+
await withContext(async () => {
|
|
66
|
+
const goals = cmdGoals;
|
|
67
|
+
const file = cmd.file;
|
|
68
|
+
console.log('Agent will execute:', goals.join(' → '));
|
|
69
|
+
console.log('On file:', file);
|
|
70
|
+
const agentInstance = new Agent(goals);
|
|
71
|
+
await agentInstance.execute(file);
|
|
72
|
+
});
|
|
73
|
+
});
|
|
74
|
+
// Inject modules list into the --help output
|
|
75
|
+
runCmd.on('--help', () => {
|
|
76
|
+
console.log('\nAvailable tools:');
|
|
77
|
+
Object.keys(builtInModules).forEach(m => console.log(` - ${m}`));
|
|
78
|
+
});
|
|
49
79
|
// 🔧 Group: Git-related commands
|
|
50
80
|
const git = cmd.command('git').description('Git utilities');
|
|
51
81
|
git
|
|
@@ -149,12 +179,14 @@ gen
|
|
|
149
179
|
});
|
|
150
180
|
});
|
|
151
181
|
gen
|
|
152
|
-
.command(
|
|
153
|
-
.description(
|
|
154
|
-
.
|
|
155
|
-
.action(async (file) => {
|
|
182
|
+
.command("test <targets...>")
|
|
183
|
+
.description("Generate tests for the given file(s) or folder(s)")
|
|
184
|
+
.action(async (targets, options) => {
|
|
156
185
|
await withContext(async () => {
|
|
157
|
-
|
|
186
|
+
const files = await resolveTargetsToFiles(targets);
|
|
187
|
+
for (const file of files) {
|
|
188
|
+
await handleAgentRun(file, [generateTestsModule, cleanGeneratedTestsModule]);
|
|
189
|
+
}
|
|
158
190
|
});
|
|
159
191
|
});
|
|
160
192
|
// ⚙️ Group: Configuration settings
|
|
@@ -298,18 +330,6 @@ cmd
|
|
|
298
330
|
const fullQuery = questionParts?.join(' ');
|
|
299
331
|
await runAskCommand(fullQuery);
|
|
300
332
|
}));
|
|
301
|
-
cmd
|
|
302
|
-
.command('pipe')
|
|
303
|
-
.description('Run a module pipeline on a given file')
|
|
304
|
-
.argument('<file>', 'Target file')
|
|
305
|
-
.option('-m, --modules <modules>', 'Comma-separated list of modules to run (e.g., comments,cleanup,summary)')
|
|
306
|
-
.action((file, options) => {
|
|
307
|
-
if (!options.modules) {
|
|
308
|
-
console.error('❌ You must specify modules with -m or --modules');
|
|
309
|
-
process.exit(1);
|
|
310
|
-
}
|
|
311
|
-
runModulePipelineFromCLI(file, options);
|
|
312
|
-
});
|
|
313
333
|
cmd.addHelpText('after', `
|
|
314
334
|
🚨 Alpha Features:
|
|
315
335
|
- The "index", "daemon", "stop-daemon", "reset-db" commands are considered alpha features.
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { normalizeText, stripMarkdownFences, isCodeLike } from "../../utils/contentUtils.js";
|
|
2
|
+
export const cleanGeneratedTestsModule = {
|
|
3
|
+
name: "cleanGeneratedTestsModule",
|
|
4
|
+
description: "Removes markdown fences, prose, and explanations from generated test output, leaving only code",
|
|
5
|
+
async run(input) {
|
|
6
|
+
const { content, filepath } = input;
|
|
7
|
+
// normalize + strip markdown
|
|
8
|
+
const normalized = normalizeText(content);
|
|
9
|
+
const stripped = stripMarkdownFences(normalized);
|
|
10
|
+
// filter non-code lines
|
|
11
|
+
const lines = stripped.split("\n");
|
|
12
|
+
// filter non-code lines, but keep blank ones
|
|
13
|
+
const codeLines = lines.filter(line => line.trim() === "" || isCodeLike(line));
|
|
14
|
+
const cleanedCode = codeLines.join("\n");
|
|
15
|
+
return {
|
|
16
|
+
originalContent: content,
|
|
17
|
+
content: cleanedCode, // cleaned code for pipeline
|
|
18
|
+
filepath, // original file path
|
|
19
|
+
mode: "overwrite", // indicates overwrite existing file
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
};
|
|
@@ -59,6 +59,9 @@ ${input.content}
|
|
|
59
59
|
`.trim();
|
|
60
60
|
const response = await generate({ content: prompt }, model);
|
|
61
61
|
const contentToReturn = (response.content && response.content !== 'NO UPDATE') ? response.content : input.content;
|
|
62
|
-
return {
|
|
62
|
+
return {
|
|
63
|
+
content: contentToReturn,
|
|
64
|
+
mode: 'overwrite', // <-- declares that the original file should be overwritten
|
|
65
|
+
};
|
|
63
66
|
},
|
|
64
67
|
};
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import fs from 'fs/promises';
|
|
2
1
|
import path from 'path';
|
|
3
2
|
import { generate } from '../../lib/generate.js';
|
|
4
3
|
import { detectFileType } from '../../fileRules/detectFileType.js';
|
|
@@ -11,28 +10,50 @@ export const generateTestsModule = {
|
|
|
11
10
|
throw new Error('Missing filepath in pipeline context');
|
|
12
11
|
const model = Config.getModel();
|
|
13
12
|
const lang = detectFileType(filepath);
|
|
13
|
+
const repoRoot = Config.getIndexDir();
|
|
14
|
+
// Compute relative import path (repo-relative, without extension)
|
|
15
|
+
const relativePath = path.relative(repoRoot, filepath);
|
|
16
|
+
const { dir, name, ext } = path.parse(relativePath);
|
|
17
|
+
const importPath = './' + path.join(dir, name).replace(/\\/g, '/');
|
|
18
|
+
// Where the test should be written (next to source file)
|
|
19
|
+
const absParsed = path.parse(filepath);
|
|
20
|
+
const testPath = path.join(absParsed.dir, `${absParsed.name}.test${ext}`);
|
|
14
21
|
const prompt = `
|
|
15
|
-
|
|
22
|
+
You are a senior ${lang.toUpperCase()} engineer. Generate a Jest test file for the module below.
|
|
16
23
|
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
+
Requirements:
|
|
25
|
+
- Use the 'jest' test framework.
|
|
26
|
+
- Always include imports at the top:
|
|
27
|
+
import { describe, it, expect } from '@jest/globals';
|
|
28
|
+
import * as moduleUnderTest from '${importPath}';
|
|
29
|
+
- Cover only one public method: the most relevant or central function.
|
|
30
|
+
- Include one edge case for that method.
|
|
31
|
+
- Preserve and consider existing code comments in the module.
|
|
32
|
+
- Only output valid ${lang} code; do not include markdown fences or explanations.
|
|
33
|
+
- Use this scaffold at minimum:
|
|
24
34
|
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
35
|
+
import { describe, it, expect } from '@jest/globals';
|
|
36
|
+
import * as moduleUnderTest from '${importPath}';
|
|
37
|
+
|
|
38
|
+
describe('moduleUnderTest', () => {
|
|
39
|
+
it('should ...', () => {
|
|
40
|
+
// test implementation
|
|
41
|
+
});
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
--- MODULE CODE ---
|
|
45
|
+
${content}
|
|
46
|
+
--- END MODULE CODE ---
|
|
47
|
+
`.trim();
|
|
29
48
|
const response = await generate({ content: prompt }, model);
|
|
30
49
|
if (!response)
|
|
31
50
|
throw new Error('⚠️ No test code returned from model');
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
51
|
+
return {
|
|
52
|
+
originalContent: content,
|
|
53
|
+
content: response.content, // the test code
|
|
54
|
+
filepath, // original file path
|
|
55
|
+
newFilepath: testPath,
|
|
56
|
+
mode: "newFile" // ensure it gets written as a new file
|
|
57
|
+
};
|
|
37
58
|
}
|
|
38
59
|
};
|
|
@@ -144,6 +144,10 @@ export const preserveCodeModule = {
|
|
|
144
144
|
const colored = type === "code" ? chalk.green(line) : chalk.yellow(line);
|
|
145
145
|
console.log(`${i + 1}: ${colored} ${chalk.gray(`[${type}]`)}`);
|
|
146
146
|
});
|
|
147
|
-
return {
|
|
147
|
+
return {
|
|
148
|
+
content: fixedLines.join("\n"),
|
|
149
|
+
filepath,
|
|
150
|
+
mode: "overwrite"
|
|
151
|
+
};
|
|
148
152
|
}
|
|
149
153
|
};
|
|
@@ -4,14 +4,18 @@ import { summaryModule } from '../modules/summaryModule.js';
|
|
|
4
4
|
import { generateTestsModule } from '../modules/generateTestsModule.js';
|
|
5
5
|
import { commitSuggesterModule } from '../modules/commitSuggesterModule.js';
|
|
6
6
|
import { changelogModule } from '../modules/changeLogModule.js';
|
|
7
|
+
import { cleanGeneratedTestsModule } from '../modules/cleanGeneratedTestsModule.js';
|
|
8
|
+
import { preserveCodeModule } from '../modules/preserveCodeModule.js';
|
|
7
9
|
// Add more as needed...
|
|
8
|
-
const builtInModules = {
|
|
10
|
+
export const builtInModules = {
|
|
9
11
|
comments: addCommentsModule,
|
|
10
12
|
cleanup: cleanupModule,
|
|
11
13
|
summary: summaryModule,
|
|
12
14
|
tests: generateTestsModule,
|
|
13
15
|
suggest: commitSuggesterModule,
|
|
14
16
|
changelog: changelogModule,
|
|
17
|
+
cleanTests: cleanGeneratedTestsModule,
|
|
18
|
+
cleanComments: preserveCodeModule
|
|
15
19
|
};
|
|
16
20
|
export function getModuleByName(name) {
|
|
17
21
|
return builtInModules[name];
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { spawn } from "child_process";
|
|
2
|
+
let modelProcess = null;
|
|
3
|
+
async function isModelRunning() {
|
|
4
|
+
try {
|
|
5
|
+
const res = await fetch("http://localhost:11434/health"); // whatever endpoint your model exposes
|
|
6
|
+
return res.ok;
|
|
7
|
+
}
|
|
8
|
+
catch {
|
|
9
|
+
return false;
|
|
10
|
+
}
|
|
11
|
+
}
|
|
12
|
+
export async function startModelProcess() {
|
|
13
|
+
if (await isModelRunning()) {
|
|
14
|
+
console.log("✅ Model already running");
|
|
15
|
+
return;
|
|
16
|
+
}
|
|
17
|
+
console.log("🚀 Starting model process...");
|
|
18
|
+
modelProcess = spawn("ollama", ["serve"], {
|
|
19
|
+
stdio: "inherit",
|
|
20
|
+
});
|
|
21
|
+
// Poll until the model is ready
|
|
22
|
+
for (let i = 0; i < 30; i++) {
|
|
23
|
+
if (await isModelRunning()) {
|
|
24
|
+
console.log("✅ Model is now running");
|
|
25
|
+
return;
|
|
26
|
+
}
|
|
27
|
+
await new Promise((res) => setTimeout(res, 1000));
|
|
28
|
+
}
|
|
29
|
+
throw new Error("❌ Model failed to start in time");
|
|
30
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import os from 'os';
|
|
2
|
+
import path from "path";
|
|
3
|
+
/**
|
|
4
|
+
* Normalizes a path string for loose, fuzzy matching:
|
|
5
|
+
* - Lowercases
|
|
6
|
+
* - Removes slashes and backslashes
|
|
7
|
+
* - Removes whitespace
|
|
8
|
+
*/
|
|
9
|
+
export function normalizePathForLooseMatch(p) {
|
|
10
|
+
return p.toLowerCase().replace(/[\\/]/g, '').replace(/\s+/g, '');
|
|
11
|
+
}
|
|
12
|
+
// Helper to normalize and resolve paths to a consistent format (forward slashes)
|
|
13
|
+
export function normalizePath(p) {
|
|
14
|
+
if (p.startsWith('~')) {
|
|
15
|
+
p = path.join(os.homedir(), p.slice(1));
|
|
16
|
+
}
|
|
17
|
+
return path.resolve(p).replace(/\\/g, '/');
|
|
18
|
+
}
|
|
19
|
+
export function getRepoKeyForPath(pathToMatch, config) {
|
|
20
|
+
const norm = normalizePath(pathToMatch);
|
|
21
|
+
return Object.entries(config.repos).find(([, val]) => normalizePath(val.indexDir) === norm)?.[0] || null;
|
|
22
|
+
}
|
|
23
|
+
export function normalizeText(txt) {
|
|
24
|
+
return txt.replace(/\r\n/g, "\n").replace(/\r/g, "\n");
|
|
25
|
+
}
|
|
26
|
+
export function stripMarkdownFences(txt) {
|
|
27
|
+
return txt
|
|
28
|
+
.replace(/```[\w-]*\s*/g, "") // ``` or ```java
|
|
29
|
+
.replace(/```/g, ""); // closing ```
|
|
30
|
+
}
|
|
31
|
+
// Very naive classifier: decide if a line is "code-like"
|
|
32
|
+
export function isCodeLike(line) {
|
|
33
|
+
const trimmed = line.trim();
|
|
34
|
+
if (!trimmed)
|
|
35
|
+
return false;
|
|
36
|
+
// obvious markdown / prose markers
|
|
37
|
+
if (/^(This|Here is|Note)\b/.test(trimmed))
|
|
38
|
+
return false;
|
|
39
|
+
if (/^\d+\./.test(trimmed))
|
|
40
|
+
return false; // bullet list
|
|
41
|
+
if (/^[-*] /.test(trimmed))
|
|
42
|
+
return false; // list
|
|
43
|
+
// allow imports, class, functions, braces, annotations, etc.
|
|
44
|
+
if (/^(import|export|public|private|protected|class|function|@Test|@Before)/.test(trimmed))
|
|
45
|
+
return true;
|
|
46
|
+
if (/[;{}()=]/.test(trimmed))
|
|
47
|
+
return true;
|
|
48
|
+
return false;
|
|
49
|
+
}
|
package/dist/utils/repoKey.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import crypto from 'crypto';
|
|
2
2
|
import path from 'path';
|
|
3
|
-
import { normalizePath } from './
|
|
3
|
+
import { normalizePath } from './contentUtils.js';
|
|
4
4
|
/**
|
|
5
5
|
* Generate a stable unique key for a repo path.
|
|
6
6
|
* Uses the basename plus a short hash of the full path.
|
package/package.json
CHANGED
package/dist/agentManager.js
DELETED
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
import fs from 'fs/promises';
|
|
2
|
-
import chalk from 'chalk';
|
|
3
|
-
import { runModulePipeline } from './pipeline/runModulePipeline.js';
|
|
4
|
-
import { normalizePath } from './utils/normalizePath.js';
|
|
5
|
-
import { readConfig } from './config.js';
|
|
6
|
-
import { countTokens, splitCodeIntoChunks } from './utils/splitCodeIntoChunk.js';
|
|
7
|
-
export async function handleAgentRun(filepath, modules) {
|
|
8
|
-
try {
|
|
9
|
-
filepath = normalizePath(filepath);
|
|
10
|
-
const content = await fs.readFile(filepath, 'utf-8');
|
|
11
|
-
const totalTokens = countTokens(content);
|
|
12
|
-
console.log(chalk.blue(`🧮 Total tokens in file:`), chalk.yellow(totalTokens.toString()));
|
|
13
|
-
const config = readConfig();
|
|
14
|
-
const maxTokens = 1500;
|
|
15
|
-
const chunks = splitCodeIntoChunks(content, maxTokens);
|
|
16
|
-
console.log(chalk.magenta(`📦 Split into ${chunks.length} chunks`));
|
|
17
|
-
const processedChunks = [];
|
|
18
|
-
for (const [i, chunk] of chunks.entries()) {
|
|
19
|
-
const chunkTokens = countTokens(chunk);
|
|
20
|
-
if (i === 0) {
|
|
21
|
-
console.log(chalk.cyan(`🔍 Processing ${chunks.length} chunks of file:`), chalk.white(filepath));
|
|
22
|
-
}
|
|
23
|
-
console.log(chalk.gray(` - Chunk ${i + 1} tokens:`), chalk.yellow(chunkTokens.toString()));
|
|
24
|
-
const chunkInput = {
|
|
25
|
-
originalContent: chunk,
|
|
26
|
-
content: chunk,
|
|
27
|
-
filepath,
|
|
28
|
-
chunkIndex: i,
|
|
29
|
-
chunkCount: chunks.length,
|
|
30
|
-
};
|
|
31
|
-
const response = await runModulePipeline(modules, chunkInput);
|
|
32
|
-
if (!response.content.trim()) {
|
|
33
|
-
throw new Error(`⚠️ Model returned empty result on chunk ${i + 1}`);
|
|
34
|
-
}
|
|
35
|
-
processedChunks.push(response.content);
|
|
36
|
-
//console.log(chalk.green(`✅ Finished chunk ${i + 1}/${chunks.length}`));
|
|
37
|
-
}
|
|
38
|
-
// Join all chunk outputs into one string
|
|
39
|
-
const finalOutput = processedChunks.join('\n\n');
|
|
40
|
-
// Overwrite original file here:
|
|
41
|
-
await fs.writeFile(filepath, finalOutput, 'utf-8');
|
|
42
|
-
console.log(chalk.green(`✅ Original file overwritten: ${filepath}`));
|
|
43
|
-
}
|
|
44
|
-
catch (err) {
|
|
45
|
-
console.error(chalk.red('❌ Error in agent run:'), err instanceof Error ? err.message : err);
|
|
46
|
-
}
|
|
47
|
-
}
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
// src/utils/normalizePath.ts
|
|
2
|
-
import os from 'os';
|
|
3
|
-
import path from "path";
|
|
4
|
-
/**
|
|
5
|
-
* Normalizes a path string for loose, fuzzy matching:
|
|
6
|
-
* - Lowercases
|
|
7
|
-
* - Removes slashes and backslashes
|
|
8
|
-
* - Removes whitespace
|
|
9
|
-
*/
|
|
10
|
-
export function normalizePathForLooseMatch(p) {
|
|
11
|
-
return p.toLowerCase().replace(/[\\/]/g, '').replace(/\s+/g, '');
|
|
12
|
-
}
|
|
13
|
-
// Helper to normalize and resolve paths to a consistent format (forward slashes)
|
|
14
|
-
export function normalizePath(p) {
|
|
15
|
-
if (p.startsWith('~')) {
|
|
16
|
-
p = path.join(os.homedir(), p.slice(1));
|
|
17
|
-
}
|
|
18
|
-
return path.resolve(p).replace(/\\/g, '/');
|
|
19
|
-
}
|
|
20
|
-
export function getRepoKeyForPath(pathToMatch, config) {
|
|
21
|
-
const norm = normalizePath(pathToMatch);
|
|
22
|
-
return Object.entries(config.repos).find(([, val]) => normalizePath(val.indexDir) === norm)?.[0] || null;
|
|
23
|
-
}
|