scai 0.1.102 → 0.1.104
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/CHANGELOG.md +5 -1
- package/dist/commands/SwitchCmd.js +1 -1
- package/dist/config.js +1 -1
- package/dist/context.js +27 -2
- package/dist/fileRules/detectFileType.js +45 -48
- package/dist/index.js +11 -7
- package/dist/pipeline/modules/cleanGeneratedTestsModule.js +21 -0
- package/dist/pipeline/modules/commentModule.js +8 -15
- package/dist/pipeline/modules/generateTestsModule.js +22 -16
- package/dist/pipeline/modules/preserveCodeModule.js +38 -20
- package/dist/pipeline/registry/moduleRegistry.js +4 -0
- package/dist/pipeline/runModulePipeline.js +5 -2
- package/dist/utils/checkModel.js +30 -0
- package/dist/utils/commentMap.js +2 -1
- package/dist/utils/contentUtils.js +49 -0
- package/dist/utils/repoKey.js +1 -1
- package/dist/utils/resolveTargetsToFiles.js +1 -1
- package/dist/workflowManager.js +89 -0
- package/package.json +1 -1
- package/dist/agentManager.js +0 -47
- package/dist/utils/normalizePath.js +0 -23
package/dist/CHANGELOG.md
CHANGED
|
@@ -146,4 +146,8 @@ Type handling with the module pipeline
|
|
|
146
146
|
|
|
147
147
|
• Fixed bug where entire block was returned as a single line for multi-line comments
|
|
148
148
|
• Add multi-line comment handling with ~90% accuracy
|
|
149
|
-
• Update CLI config file to use codellama:13b model and 4096 context length
|
|
149
|
+
• Update CLI config file to use codellama:13b model and 4096 context length
|
|
150
|
+
|
|
151
|
+
## 2025-08-30
|
|
152
|
+
|
|
153
|
+
* Add new workflow management functionality to handle file writes.
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
// File: src/commands/switch.ts
|
|
2
2
|
import readline from 'readline';
|
|
3
3
|
import { Config, writeConfig } from '../config.js';
|
|
4
|
-
import { getRepoKeyForPath } from '../utils/
|
|
4
|
+
import { getRepoKeyForPath } from '../utils/contentUtils.js';
|
|
5
5
|
import chalk from 'chalk';
|
|
6
6
|
export function runSwitchCommand(inputPathOrKey) {
|
|
7
7
|
const config = Config.getRaw();
|
package/dist/config.js
CHANGED
|
@@ -2,7 +2,7 @@ import fs from 'fs';
|
|
|
2
2
|
import path from 'path';
|
|
3
3
|
import { CONFIG_PATH, SCAI_HOME, SCAI_REPOS } from './constants.js';
|
|
4
4
|
import { getDbForRepo } from './db/client.js';
|
|
5
|
-
import { normalizePath } from './utils/
|
|
5
|
+
import { normalizePath } from './utils/contentUtils.js';
|
|
6
6
|
import chalk from 'chalk';
|
|
7
7
|
import { getHashedRepoKey } from './utils/repoKey.js';
|
|
8
8
|
const defaultConfig = {
|
package/dist/context.js
CHANGED
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
// context.ts
|
|
2
|
-
import { readConfig, writeConfig } from "./config.js";
|
|
3
|
-
import { normalizePath } from "./utils/
|
|
2
|
+
import { readConfig, writeConfig, Config } from "./config.js";
|
|
3
|
+
import { normalizePath } from "./utils/contentUtils.js";
|
|
4
4
|
import { getHashedRepoKey } from "./utils/repoKey.js";
|
|
5
5
|
import { getDbForRepo, getDbPathForRepo } from "./db/client.js";
|
|
6
6
|
import fs from "fs";
|
|
7
7
|
import chalk from "chalk";
|
|
8
|
+
import { generate } from "./lib/generate.js"; // 👈 use your existing generate wrapper
|
|
9
|
+
import { startModelProcess } from "./utils/checkModel.js";
|
|
8
10
|
export async function updateContext() {
|
|
9
11
|
const cwd = normalizePath(process.cwd());
|
|
10
12
|
const cfg = readConfig();
|
|
@@ -54,6 +56,17 @@ export async function updateContext() {
|
|
|
54
56
|
else if (isNewRepo || activeRepoChanged) {
|
|
55
57
|
console.log(chalk.green("✅ Database present"));
|
|
56
58
|
}
|
|
59
|
+
// ✅ NEW: Ensure model is available
|
|
60
|
+
if (ok) {
|
|
61
|
+
const modelReady = await ensureModelReady();
|
|
62
|
+
if (modelReady) {
|
|
63
|
+
console.log(chalk.green("✅ Model ready"));
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
console.log(chalk.red("❌ Model not available"));
|
|
67
|
+
ok = false;
|
|
68
|
+
}
|
|
69
|
+
}
|
|
57
70
|
// Final context status
|
|
58
71
|
if (ok) {
|
|
59
72
|
console.log(chalk.bold.green("\n✅ Context OK\n"));
|
|
@@ -63,3 +76,15 @@ export async function updateContext() {
|
|
|
63
76
|
}
|
|
64
77
|
return ok;
|
|
65
78
|
}
|
|
79
|
+
async function ensureModelReady() {
|
|
80
|
+
try {
|
|
81
|
+
// simple "ping" prompt that costs almost nothing
|
|
82
|
+
const res = await generate({ content: "ping" }, Config.getModel());
|
|
83
|
+
return Boolean(res?.content);
|
|
84
|
+
}
|
|
85
|
+
catch {
|
|
86
|
+
console.log(chalk.yellow("⚡ Model not responding. Attempting to start..."));
|
|
87
|
+
await startModelProcess();
|
|
88
|
+
return false;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
@@ -1,52 +1,49 @@
|
|
|
1
1
|
import path from 'path';
|
|
2
|
-
export function detectFileType(
|
|
3
|
-
|
|
2
|
+
export function detectFileType(filepathOrExt) {
|
|
3
|
+
// If it's already an extension (starts with '.'), use it directly
|
|
4
|
+
const ext = filepathOrExt.startsWith(".")
|
|
5
|
+
? filepathOrExt.toLowerCase()
|
|
6
|
+
: path.extname(filepathOrExt).toLowerCase();
|
|
4
7
|
const map = {
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
'.docx': 'word',
|
|
45
|
-
'.pdf': 'pdf',
|
|
46
|
-
'.ppt': 'powerpoint',
|
|
47
|
-
'.pptx': 'powerpoint',
|
|
48
|
-
'.xls': 'excel',
|
|
49
|
-
'.xlsx': 'excel',
|
|
8
|
+
".ts": "typescript",
|
|
9
|
+
".tsx": "typescript",
|
|
10
|
+
".js": "javascript",
|
|
11
|
+
".jsx": "javascript",
|
|
12
|
+
".java": "java",
|
|
13
|
+
".py": "python",
|
|
14
|
+
".rb": "ruby",
|
|
15
|
+
".php": "php",
|
|
16
|
+
".go": "go",
|
|
17
|
+
".rs": "rust",
|
|
18
|
+
".c": "c",
|
|
19
|
+
".cpp": "cpp",
|
|
20
|
+
".cs": "csharp",
|
|
21
|
+
".swift": "swift",
|
|
22
|
+
".kt": "kotlin",
|
|
23
|
+
".scala": "scala",
|
|
24
|
+
".md": "markdown",
|
|
25
|
+
".html": "html",
|
|
26
|
+
".htm": "html",
|
|
27
|
+
".xml": "xml",
|
|
28
|
+
".json": "json",
|
|
29
|
+
".yaml": "yaml",
|
|
30
|
+
".yml": "yaml",
|
|
31
|
+
".ini": "config",
|
|
32
|
+
".toml": "config",
|
|
33
|
+
".env": "config",
|
|
34
|
+
".sql": "sql",
|
|
35
|
+
".csv": "csv",
|
|
36
|
+
".tsv": "tsv",
|
|
37
|
+
".txt": "text",
|
|
38
|
+
".log": "log",
|
|
39
|
+
".rst": "text",
|
|
40
|
+
".doc": "word",
|
|
41
|
+
".docx": "word",
|
|
42
|
+
".pdf": "pdf",
|
|
43
|
+
".ppt": "powerpoint",
|
|
44
|
+
".pptx": "powerpoint",
|
|
45
|
+
".xls": "excel",
|
|
46
|
+
".xlsx": "excel",
|
|
50
47
|
};
|
|
51
|
-
return map[ext] || ext.replace(
|
|
48
|
+
return map[ext] || ext.replace(".", "") || "unknown";
|
|
52
49
|
}
|
package/dist/index.js
CHANGED
|
@@ -26,13 +26,14 @@ import { runInteractiveSwitch } from "./commands/SwitchCmd.js";
|
|
|
26
26
|
import { execSync } from "child_process";
|
|
27
27
|
import { fileURLToPath } from "url";
|
|
28
28
|
import { dirname, resolve } from "path";
|
|
29
|
-
import { handleAgentRun } from './
|
|
29
|
+
import { handleAgentRun } from './workflowManager.js';
|
|
30
30
|
import { addCommentsModule } from './pipeline/modules/commentModule.js';
|
|
31
31
|
import { generateTestsModule } from './pipeline/modules/generateTestsModule.js';
|
|
32
32
|
import { preserveCodeModule } from './pipeline/modules/preserveCodeModule.js';
|
|
33
33
|
import { runInteractiveDelete } from './commands/DeleteIndex.js';
|
|
34
34
|
import { resolveTargetsToFiles } from './utils/resolveTargetsToFiles.js';
|
|
35
35
|
import { updateContext } from './context.js';
|
|
36
|
+
import { cleanGeneratedTestsModule } from './pipeline/modules/cleanGeneratedTestsModule.js';
|
|
36
37
|
// 🎛️ CLI Setup
|
|
37
38
|
const cmd = new Command('scai')
|
|
38
39
|
.version(version)
|
|
@@ -125,7 +126,8 @@ gen
|
|
|
125
126
|
.description("Write comments for the given file(s) or folder(s)")
|
|
126
127
|
.action(async (targets) => {
|
|
127
128
|
await withContext(async () => {
|
|
128
|
-
|
|
129
|
+
// Remove the file type filter to allow any file
|
|
130
|
+
const files = await resolveTargetsToFiles(targets);
|
|
129
131
|
for (const file of files) {
|
|
130
132
|
await handleAgentRun(file, [addCommentsModule, preserveCodeModule]);
|
|
131
133
|
}
|
|
@@ -148,12 +150,14 @@ gen
|
|
|
148
150
|
});
|
|
149
151
|
});
|
|
150
152
|
gen
|
|
151
|
-
.command(
|
|
152
|
-
.description(
|
|
153
|
-
.
|
|
154
|
-
.action(async (file) => {
|
|
153
|
+
.command("test <targets...>")
|
|
154
|
+
.description("Generate tests for the given file(s) or folder(s)")
|
|
155
|
+
.action(async (targets, options) => {
|
|
155
156
|
await withContext(async () => {
|
|
156
|
-
|
|
157
|
+
const files = await resolveTargetsToFiles(targets);
|
|
158
|
+
for (const file of files) {
|
|
159
|
+
await handleAgentRun(file, [generateTestsModule, cleanGeneratedTestsModule]);
|
|
160
|
+
}
|
|
157
161
|
});
|
|
158
162
|
});
|
|
159
163
|
// ⚙️ Group: Configuration settings
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import { normalizeText, stripMarkdownFences, isCodeLike } from "../../utils/contentUtils.js";
|
|
2
|
+
export const cleanGeneratedTestsModule = {
|
|
3
|
+
name: "cleanGeneratedTestsModule",
|
|
4
|
+
description: "Removes markdown fences, prose, and explanations from generated test output, leaving only code",
|
|
5
|
+
async run(input) {
|
|
6
|
+
const { content, filepath } = input;
|
|
7
|
+
// normalize + strip markdown
|
|
8
|
+
const normalized = normalizeText(content);
|
|
9
|
+
const stripped = stripMarkdownFences(normalized);
|
|
10
|
+
// filter non-code lines
|
|
11
|
+
const lines = stripped.split("\n");
|
|
12
|
+
const codeLines = lines.filter(line => isCodeLike(line));
|
|
13
|
+
const cleanedCode = codeLines.join("\n");
|
|
14
|
+
return {
|
|
15
|
+
originalContent: content,
|
|
16
|
+
content: cleanedCode, // cleaned code for pipeline
|
|
17
|
+
filepath, // original file path
|
|
18
|
+
mode: "overwrite", // indicates overwrite existing file
|
|
19
|
+
};
|
|
20
|
+
}
|
|
21
|
+
};
|
|
@@ -42,18 +42,10 @@ You are a senior engineer reviewing a ${fileType} file.
|
|
|
42
42
|
|
|
43
43
|
Please:
|
|
44
44
|
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
2. Add clear, helpful inline comments to explain non-obvious logic inside functions.
|
|
50
|
-
- Preserve all existing comments as-is.
|
|
51
|
-
|
|
52
|
-
3. Use "${commentSyntax}" as the comment syntax appropriate for ${fileType}.
|
|
53
|
-
|
|
54
|
-
4. Preserve all original formatting, whitespace, and code exactly.
|
|
55
|
-
|
|
56
|
-
5. You may only be shown parts of a file. Add comments as instructed.
|
|
45
|
+
- Add summary comments (2-3 lines) at relevant points for greater class insights
|
|
46
|
+
- Add clear, helpful inline comments to explain non-obvious logic inside functions.
|
|
47
|
+
- Use "${commentSyntax}" as the comment syntax appropriate for ${fileType}.
|
|
48
|
+
- Preserve all original formatting, whitespace, and code exactly.
|
|
57
49
|
|
|
58
50
|
Rules:
|
|
59
51
|
- Return the full original chunk of code with added comments only.
|
|
@@ -62,13 +54,14 @@ Rules:
|
|
|
62
54
|
- Inline comments should clarify complex or tricky parts only.
|
|
63
55
|
- Do NOT add any “chunk” start/end markers, numbering, or metadata to the output.
|
|
64
56
|
|
|
65
|
-
Code to comments is below this line:
|
|
66
|
-
|
|
67
57
|
${input.content}
|
|
68
58
|
|
|
69
59
|
`.trim();
|
|
70
60
|
const response = await generate({ content: prompt }, model);
|
|
71
61
|
const contentToReturn = (response.content && response.content !== 'NO UPDATE') ? response.content : input.content;
|
|
72
|
-
return {
|
|
62
|
+
return {
|
|
63
|
+
content: contentToReturn,
|
|
64
|
+
mode: 'overwrite', // <-- declares that the original file should be overwritten
|
|
65
|
+
};
|
|
73
66
|
},
|
|
74
67
|
};
|
|
@@ -1,35 +1,41 @@
|
|
|
1
|
-
import fs from 'fs/promises';
|
|
2
1
|
import path from 'path';
|
|
3
|
-
import { Config } from '../../config.js';
|
|
4
2
|
import { generate } from '../../lib/generate.js';
|
|
3
|
+
import { detectFileType } from '../../fileRules/detectFileType.js';
|
|
4
|
+
import { Config } from '../../config.js';
|
|
5
5
|
export const generateTestsModule = {
|
|
6
6
|
name: 'tests',
|
|
7
7
|
description: 'Generate a Jest test file for the class/module',
|
|
8
8
|
async run({ content, filepath }) {
|
|
9
|
-
const model = Config.getModel();
|
|
10
|
-
const lang = Config.getLanguage();
|
|
11
9
|
if (!filepath)
|
|
12
10
|
throw new Error('Missing filepath in pipeline context');
|
|
11
|
+
const model = Config.getModel();
|
|
12
|
+
const lang = detectFileType(filepath);
|
|
13
13
|
const prompt = `
|
|
14
|
-
You are a senior ${lang.toUpperCase()} engineer. Given the following class or module, generate a Jest test file.
|
|
14
|
+
You are a senior ${lang.toUpperCase()} engineer. Given the following class or module, generate a Jest test file.
|
|
15
15
|
|
|
16
|
-
Guidelines:
|
|
17
|
-
- Use the 'jest' test framework
|
|
18
|
-
- Cover public
|
|
19
|
-
-
|
|
20
|
-
-
|
|
16
|
+
Guidelines:
|
|
17
|
+
- Use the 'jest' test framework
|
|
18
|
+
- Cover only one public method: the most relevant or central function
|
|
19
|
+
- Include one edge case for that method
|
|
20
|
+
- Preserve and consider existing code comments
|
|
21
|
+
- Name the file <original>.test.${path.extname(filepath).slice(1)}
|
|
22
|
+
- Only return valid ${lang} code
|
|
21
23
|
|
|
22
|
-
--- CODE START ---
|
|
23
|
-
${content}
|
|
24
|
-
--- CODE END ---
|
|
24
|
+
--- CODE START ---
|
|
25
|
+
${content}
|
|
26
|
+
--- CODE END ---
|
|
25
27
|
`.trim();
|
|
26
28
|
const response = await generate({ content: prompt }, model);
|
|
27
29
|
if (!response)
|
|
28
30
|
throw new Error('⚠️ No test code returned from model');
|
|
29
31
|
const { dir, name } = path.parse(filepath);
|
|
30
32
|
const testPath = path.join(dir, `${name}.test.ts`);
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
33
|
+
return {
|
|
34
|
+
originalContent: content,
|
|
35
|
+
content: response.content, // the test code
|
|
36
|
+
filepath, // original file path
|
|
37
|
+
newFilepath: testPath,
|
|
38
|
+
mode: "newFile" // where it *should* be written
|
|
39
|
+
};
|
|
34
40
|
}
|
|
35
41
|
};
|
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import chalk from "chalk";
|
|
2
|
+
import { getCommentSyntax } from "../../utils/commentMap.js";
|
|
3
|
+
import { detectFileType } from "../../fileRules/detectFileType.js";
|
|
2
4
|
export const preserveCodeModule = {
|
|
3
5
|
name: "preserveCodeModule",
|
|
4
6
|
description: "Ensure code matches original exactly, preserving comments with clear before/after output",
|
|
@@ -6,10 +8,13 @@ export const preserveCodeModule = {
|
|
|
6
8
|
const { originalContent, content, filepath } = input;
|
|
7
9
|
if (!originalContent)
|
|
8
10
|
throw new Error("Requires `originalContent`.");
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
11
|
+
// Determine language from filepath extension
|
|
12
|
+
console.log("Filepath: ", filepath);
|
|
13
|
+
const ext = "." + (filepath?.split(".").pop() || "ts");
|
|
14
|
+
console.log("Extension: ", ext);
|
|
15
|
+
const language = detectFileType(filepath ?? ext); // returns "javascript", "python", etc.
|
|
16
|
+
const syntax = getCommentSyntax(language);
|
|
17
|
+
console.log(`Using comment syntax for extension '${language}':`, syntax);
|
|
13
18
|
// --- Normalize line endings ---
|
|
14
19
|
const normalize = (txt) => txt.replace(/\r\n/g, "\n").replace(/\r/g, "\n");
|
|
15
20
|
const origLines = normalize(originalContent).split("\n");
|
|
@@ -17,6 +22,7 @@ export const preserveCodeModule = {
|
|
|
17
22
|
// --- Classify line ---
|
|
18
23
|
let inBlockComment = false;
|
|
19
24
|
let blockLines = [];
|
|
25
|
+
// --- Classify line ---
|
|
20
26
|
// returns: "code" | comment string | null
|
|
21
27
|
const classifyLine = (line) => {
|
|
22
28
|
const trimmed = line.trimStart();
|
|
@@ -25,29 +31,38 @@ export const preserveCodeModule = {
|
|
|
25
31
|
if (trimmed.startsWith(s))
|
|
26
32
|
return line;
|
|
27
33
|
}
|
|
28
|
-
// --- Multi-line comment ---
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
}
|
|
34
|
+
// --- Multi-line comment (optional) ---
|
|
35
|
+
const multiLineComments = syntax.multiLine ?? [];
|
|
36
|
+
if (!inBlockComment) {
|
|
37
|
+
// check if line starts a multi-line comment
|
|
38
|
+
for (const { start, end } of multiLineComments) {
|
|
34
39
|
if (trimmed.startsWith(start)) {
|
|
35
|
-
inBlockComment = true;
|
|
36
40
|
blockLines = [line];
|
|
37
|
-
|
|
41
|
+
if (trimmed.includes(end) && trimmed.indexOf(end) > trimmed.indexOf(start)) {
|
|
42
|
+
// entire block on one line
|
|
43
|
+
return blockLines.join("\n");
|
|
44
|
+
}
|
|
45
|
+
else {
|
|
46
|
+
inBlockComment = true;
|
|
47
|
+
return null; // wait for block to finish
|
|
48
|
+
}
|
|
38
49
|
}
|
|
39
50
|
}
|
|
40
|
-
|
|
41
|
-
|
|
51
|
+
}
|
|
52
|
+
else {
|
|
53
|
+
// currently inside a multi-line block
|
|
54
|
+
blockLines.push(line);
|
|
55
|
+
for (const { end } of multiLineComments) {
|
|
42
56
|
if (trimmed.includes(end)) {
|
|
43
57
|
inBlockComment = false;
|
|
44
58
|
const fullBlock = blockLines.join("\n");
|
|
45
59
|
blockLines = [];
|
|
46
60
|
return fullBlock; // emit entire block
|
|
47
61
|
}
|
|
48
|
-
return null; // still inside block
|
|
49
62
|
}
|
|
63
|
+
return null; // still inside block
|
|
50
64
|
}
|
|
65
|
+
// --- default: code ---
|
|
51
66
|
return "code";
|
|
52
67
|
};
|
|
53
68
|
// --- Helper: collect comment blocks into map ---
|
|
@@ -123,13 +138,16 @@ export const preserveCodeModule = {
|
|
|
123
138
|
console.log(chalk.bold.blue("\n=== FIXED CONTENT ==="));
|
|
124
139
|
fixedLines.forEach((line, i) => {
|
|
125
140
|
const trimmed = line.trimStart();
|
|
126
|
-
const
|
|
127
|
-
syntax.multiLine.some(({ start }) => trimmed.startsWith(start))
|
|
128
|
-
|
|
129
|
-
: "code";
|
|
141
|
+
const isComment = syntax.singleLine.some(s => trimmed.startsWith(s)) ||
|
|
142
|
+
(syntax.multiLine ?? []).some(({ start }) => trimmed.startsWith(start));
|
|
143
|
+
const type = isComment ? "comment" : "code";
|
|
130
144
|
const colored = type === "code" ? chalk.green(line) : chalk.yellow(line);
|
|
131
145
|
console.log(`${i + 1}: ${colored} ${chalk.gray(`[${type}]`)}`);
|
|
132
146
|
});
|
|
133
|
-
return {
|
|
147
|
+
return {
|
|
148
|
+
content: fixedLines.join("\n"),
|
|
149
|
+
filepath,
|
|
150
|
+
mode: "overwrite"
|
|
151
|
+
};
|
|
134
152
|
}
|
|
135
153
|
};
|
|
@@ -4,6 +4,8 @@ import { summaryModule } from '../modules/summaryModule.js';
|
|
|
4
4
|
import { generateTestsModule } from '../modules/generateTestsModule.js';
|
|
5
5
|
import { commitSuggesterModule } from '../modules/commitSuggesterModule.js';
|
|
6
6
|
import { changelogModule } from '../modules/changeLogModule.js';
|
|
7
|
+
import { cleanGeneratedTestsModule } from '../modules/cleanGeneratedTestsModule.js';
|
|
8
|
+
import { preserveCodeModule } from '../modules/preserveCodeModule.js';
|
|
7
9
|
// Add more as needed...
|
|
8
10
|
const builtInModules = {
|
|
9
11
|
comments: addCommentsModule,
|
|
@@ -12,6 +14,8 @@ const builtInModules = {
|
|
|
12
14
|
tests: generateTestsModule,
|
|
13
15
|
suggest: commitSuggesterModule,
|
|
14
16
|
changelog: changelogModule,
|
|
17
|
+
cleantTests: cleanGeneratedTestsModule,
|
|
18
|
+
cleanComments: preserveCodeModule
|
|
15
19
|
};
|
|
16
20
|
export function getModuleByName(name) {
|
|
17
21
|
return builtInModules[name];
|
|
@@ -4,15 +4,18 @@ export async function runModulePipeline(modules, input) {
|
|
|
4
4
|
const isDebug = false;
|
|
5
5
|
for (const mod of modules) {
|
|
6
6
|
try {
|
|
7
|
+
if (isDebug)
|
|
8
|
+
console.log(current.filepath);
|
|
7
9
|
const response = await mod.run(current);
|
|
8
10
|
console.log(`⚙️ Running: ${mod.name}`);
|
|
9
11
|
if (isDebug) {
|
|
10
12
|
console.log(chalk.yellow('➡️ Output:', response.content));
|
|
11
13
|
}
|
|
12
|
-
// Safeguard
|
|
14
|
+
// Safeguard to preserve filepath + originalContent
|
|
13
15
|
current = {
|
|
14
16
|
...response,
|
|
15
|
-
originalContent: current.originalContent
|
|
17
|
+
originalContent: current.originalContent,
|
|
18
|
+
filepath: current.filepath,
|
|
16
19
|
};
|
|
17
20
|
}
|
|
18
21
|
catch (error) {
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { spawn } from "child_process";
|
|
2
|
+
let modelProcess = null;
|
|
3
|
+
async function isModelRunning() {
|
|
4
|
+
try {
|
|
5
|
+
const res = await fetch("http://localhost:11434/health"); // whatever endpoint your model exposes
|
|
6
|
+
return res.ok;
|
|
7
|
+
}
|
|
8
|
+
catch {
|
|
9
|
+
return false;
|
|
10
|
+
}
|
|
11
|
+
}
|
|
12
|
+
export async function startModelProcess() {
|
|
13
|
+
if (await isModelRunning()) {
|
|
14
|
+
console.log("✅ Model already running");
|
|
15
|
+
return;
|
|
16
|
+
}
|
|
17
|
+
console.log("🚀 Starting model process...");
|
|
18
|
+
modelProcess = spawn("ollama", ["serve"], {
|
|
19
|
+
stdio: "inherit",
|
|
20
|
+
});
|
|
21
|
+
// Poll until the model is ready
|
|
22
|
+
for (let i = 0; i < 30; i++) {
|
|
23
|
+
if (await isModelRunning()) {
|
|
24
|
+
console.log("✅ Model is now running");
|
|
25
|
+
return;
|
|
26
|
+
}
|
|
27
|
+
await new Promise((res) => setTimeout(res, 1000));
|
|
28
|
+
}
|
|
29
|
+
throw new Error("❌ Model failed to start in time");
|
|
30
|
+
}
|
package/dist/utils/commentMap.js
CHANGED
|
@@ -75,5 +75,6 @@ export const commentMap = {
|
|
|
75
75
|
*/
|
|
76
76
|
export function getCommentSyntax(language) {
|
|
77
77
|
const normalized = language.toLowerCase();
|
|
78
|
-
return commentMap[normalized] ||
|
|
78
|
+
return (commentMap[normalized] ||
|
|
79
|
+
{ singleLine: ["//"], multiLine: ["/*", "*/"] });
|
|
79
80
|
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import os from 'os';
|
|
2
|
+
import path from "path";
|
|
3
|
+
/**
|
|
4
|
+
* Normalizes a path string for loose, fuzzy matching:
|
|
5
|
+
* - Lowercases
|
|
6
|
+
* - Removes slashes and backslashes
|
|
7
|
+
* - Removes whitespace
|
|
8
|
+
*/
|
|
9
|
+
export function normalizePathForLooseMatch(p) {
|
|
10
|
+
return p.toLowerCase().replace(/[\\/]/g, '').replace(/\s+/g, '');
|
|
11
|
+
}
|
|
12
|
+
// Helper to normalize and resolve paths to a consistent format (forward slashes)
|
|
13
|
+
export function normalizePath(p) {
|
|
14
|
+
if (p.startsWith('~')) {
|
|
15
|
+
p = path.join(os.homedir(), p.slice(1));
|
|
16
|
+
}
|
|
17
|
+
return path.resolve(p).replace(/\\/g, '/');
|
|
18
|
+
}
|
|
19
|
+
export function getRepoKeyForPath(pathToMatch, config) {
|
|
20
|
+
const norm = normalizePath(pathToMatch);
|
|
21
|
+
return Object.entries(config.repos).find(([, val]) => normalizePath(val.indexDir) === norm)?.[0] || null;
|
|
22
|
+
}
|
|
23
|
+
export function normalizeText(txt) {
|
|
24
|
+
return txt.replace(/\r\n/g, "\n").replace(/\r/g, "\n");
|
|
25
|
+
}
|
|
26
|
+
export function stripMarkdownFences(txt) {
|
|
27
|
+
return txt
|
|
28
|
+
.replace(/```[\w-]*\s*/g, "") // ``` or ```java
|
|
29
|
+
.replace(/```/g, ""); // closing ```
|
|
30
|
+
}
|
|
31
|
+
// Very naive classifier: decide if a line is "code-like"
|
|
32
|
+
export function isCodeLike(line) {
|
|
33
|
+
const trimmed = line.trim();
|
|
34
|
+
if (!trimmed)
|
|
35
|
+
return false;
|
|
36
|
+
// obvious markdown / prose markers
|
|
37
|
+
if (/^(This|Here is|Note)\b/.test(trimmed))
|
|
38
|
+
return false;
|
|
39
|
+
if (/^\d+\./.test(trimmed))
|
|
40
|
+
return false; // bullet list
|
|
41
|
+
if (/^[-*] /.test(trimmed))
|
|
42
|
+
return false; // list
|
|
43
|
+
// allow imports, class, functions, braces, annotations, etc.
|
|
44
|
+
if (/^(import|export|public|private|protected|class|function|@Test|@Before)/.test(trimmed))
|
|
45
|
+
return true;
|
|
46
|
+
if (/[;{}()=]/.test(trimmed))
|
|
47
|
+
return true;
|
|
48
|
+
return false;
|
|
49
|
+
}
|
package/dist/utils/repoKey.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import crypto from 'crypto';
|
|
2
2
|
import path from 'path';
|
|
3
|
-
import { normalizePath } from './
|
|
3
|
+
import { normalizePath } from './contentUtils.js';
|
|
4
4
|
/**
|
|
5
5
|
* Generate a stable unique key for a repo path.
|
|
6
6
|
* Uses the basename plus a short hash of the full path.
|
|
@@ -21,7 +21,7 @@ async function collectFilesRecursive(dir, exts) {
|
|
|
21
21
|
if (entry.isDirectory()) {
|
|
22
22
|
files.push(...await collectFilesRecursive(fullPath, exts));
|
|
23
23
|
}
|
|
24
|
-
else if (exts.includes(path.extname(entry.name))) {
|
|
24
|
+
else if (!exts || exts.includes(path.extname(entry.name))) {
|
|
25
25
|
files.push(fullPath);
|
|
26
26
|
}
|
|
27
27
|
}
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
// agentManager.ts
|
|
2
|
+
import fs from 'fs/promises';
|
|
3
|
+
import chalk from 'chalk';
|
|
4
|
+
import { runModulePipeline } from './pipeline/runModulePipeline.js';
|
|
5
|
+
import { countTokens, splitCodeIntoChunks } from './utils/splitCodeIntoChunk.js';
|
|
6
|
+
import { normalizePath } from './utils/contentUtils.js';
|
|
7
|
+
export async function handleAgentRun(filepath, modules) {
|
|
8
|
+
try {
|
|
9
|
+
filepath = normalizePath(filepath);
|
|
10
|
+
let fileContent = await fs.readFile(filepath, 'utf-8');
|
|
11
|
+
// Immutable baseline for this file (stays until file changes)
|
|
12
|
+
const maxTokens = 1500;
|
|
13
|
+
const baseChunks = splitCodeIntoChunks(fileContent, maxTokens);
|
|
14
|
+
// Working chunks that flow through modules; stays index-aligned with baseChunks
|
|
15
|
+
let workingChunks = [...baseChunks];
|
|
16
|
+
for (const mod of modules) {
|
|
17
|
+
console.log(chalk.cyan(`\n⚙️ Running module: ${mod.name}`));
|
|
18
|
+
console.log(chalk.blue(`🧮 Tokens:`), chalk.yellow(countTokens(fileContent).toString()));
|
|
19
|
+
console.log(chalk.magenta(`📦 Chunks: ${workingChunks.length}`));
|
|
20
|
+
const processed = [];
|
|
21
|
+
let mode;
|
|
22
|
+
let newFilepath;
|
|
23
|
+
for (let i = 0; i < workingChunks.length; i++) {
|
|
24
|
+
const input = {
|
|
25
|
+
originalContent: baseChunks[i], // immutable baseline for this file
|
|
26
|
+
content: workingChunks[i], // current state for this slice
|
|
27
|
+
filepath,
|
|
28
|
+
chunkIndex: i,
|
|
29
|
+
chunkCount: workingChunks.length,
|
|
30
|
+
};
|
|
31
|
+
const out = await runModulePipeline([mod], input);
|
|
32
|
+
if (!out.content?.trim()) {
|
|
33
|
+
throw new Error(`⚠️ Empty result on chunk ${i + 1}`);
|
|
34
|
+
}
|
|
35
|
+
processed.push(out.content);
|
|
36
|
+
// Capture mode/path (should be consistent across chunks)
|
|
37
|
+
if (out.mode)
|
|
38
|
+
mode = out.mode;
|
|
39
|
+
if (out.newFilepath)
|
|
40
|
+
newFilepath = out.newFilepath;
|
|
41
|
+
}
|
|
42
|
+
const finalOutput = processed.join('\n\n');
|
|
43
|
+
// Apply output mode
|
|
44
|
+
switch (mode ?? 'overwrite') {
|
|
45
|
+
case 'overwrite':
|
|
46
|
+
await fs.writeFile(filepath, finalOutput, 'utf-8');
|
|
47
|
+
console.log(chalk.green(`✅ Overwritten: ${filepath}`));
|
|
48
|
+
// keep baseChunks (baseline stays the same), keep alignment: do NOT re-chunk
|
|
49
|
+
workingChunks = processed;
|
|
50
|
+
fileContent = finalOutput;
|
|
51
|
+
break;
|
|
52
|
+
case 'append':
|
|
53
|
+
await fs.appendFile(filepath, finalOutput, 'utf-8');
|
|
54
|
+
console.log(chalk.green(`✅ Appended: ${filepath}`));
|
|
55
|
+
// appended file content diverges; keep alignment by using processed as new working
|
|
56
|
+
workingChunks = processed;
|
|
57
|
+
fileContent += finalOutput;
|
|
58
|
+
break;
|
|
59
|
+
case 'newFile':
|
|
60
|
+
if (!newFilepath)
|
|
61
|
+
throw new Error(`newFile mode requires newFilepath`);
|
|
62
|
+
await fs.writeFile(newFilepath, finalOutput, 'utf-8');
|
|
63
|
+
console.log(chalk.green(`✅ New file: ${newFilepath}`));
|
|
64
|
+
// File context changes → reset baseline and working to the new file
|
|
65
|
+
filepath = newFilepath;
|
|
66
|
+
fileContent = finalOutput;
|
|
67
|
+
const reset = splitCodeIntoChunks(fileContent, maxTokens);
|
|
68
|
+
// new baseline for the new file (e.g., generated tests before cleaning)
|
|
69
|
+
for (let i = 0; i < reset.length; i++)
|
|
70
|
+
; // (no-op; just clarity)
|
|
71
|
+
// Replace both arrays to keep them in sync for subsequent modules
|
|
72
|
+
workingChunks = reset;
|
|
73
|
+
// Important: also reset baseChunks to this new file’s content so the next module
|
|
74
|
+
// (e.g., cleaner) sees the *generated tests* as its originalContent baseline.
|
|
75
|
+
baseChunks.length = 0;
|
|
76
|
+
baseChunks.push(...reset);
|
|
77
|
+
break;
|
|
78
|
+
default:
|
|
79
|
+
console.log(chalk.yellow(`⚠️ Unknown mode; skipping write`));
|
|
80
|
+
// still move pipeline forward with processed
|
|
81
|
+
workingChunks = processed;
|
|
82
|
+
fileContent = finalOutput;
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
catch (err) {
|
|
87
|
+
console.error(chalk.red('❌ Error in agent run:'), err instanceof Error ? err.message : err);
|
|
88
|
+
}
|
|
89
|
+
}
|
package/package.json
CHANGED
package/dist/agentManager.js
DELETED
|
@@ -1,47 +0,0 @@
|
|
|
1
|
-
import fs from 'fs/promises';
|
|
2
|
-
import chalk from 'chalk';
|
|
3
|
-
import { runModulePipeline } from './pipeline/runModulePipeline.js';
|
|
4
|
-
import { normalizePath } from './utils/normalizePath.js';
|
|
5
|
-
import { readConfig } from './config.js';
|
|
6
|
-
import { countTokens, splitCodeIntoChunks } from './utils/splitCodeIntoChunk.js';
|
|
7
|
-
export async function handleAgentRun(filepath, modules) {
|
|
8
|
-
try {
|
|
9
|
-
filepath = normalizePath(filepath);
|
|
10
|
-
const content = await fs.readFile(filepath, 'utf-8');
|
|
11
|
-
const totalTokens = countTokens(content);
|
|
12
|
-
console.log(chalk.blue(`🧮 Total tokens in file:`), chalk.yellow(totalTokens.toString()));
|
|
13
|
-
const config = readConfig();
|
|
14
|
-
const maxTokens = 1500;
|
|
15
|
-
const chunks = splitCodeIntoChunks(content, maxTokens);
|
|
16
|
-
console.log(chalk.magenta(`📦 Split into ${chunks.length} chunks`));
|
|
17
|
-
const processedChunks = [];
|
|
18
|
-
for (const [i, chunk] of chunks.entries()) {
|
|
19
|
-
const chunkTokens = countTokens(chunk);
|
|
20
|
-
if (i === 0) {
|
|
21
|
-
console.log(chalk.cyan(`🔍 Processing ${chunks.length} chunks of file:`), chalk.white(filepath));
|
|
22
|
-
}
|
|
23
|
-
console.log(chalk.gray(` - Chunk ${i + 1} tokens:`), chalk.yellow(chunkTokens.toString()));
|
|
24
|
-
const chunkInput = {
|
|
25
|
-
originalContent: chunk,
|
|
26
|
-
content: chunk,
|
|
27
|
-
filepath,
|
|
28
|
-
chunkIndex: i,
|
|
29
|
-
chunkCount: chunks.length,
|
|
30
|
-
};
|
|
31
|
-
const response = await runModulePipeline(modules, chunkInput);
|
|
32
|
-
if (!response.content.trim()) {
|
|
33
|
-
throw new Error(`⚠️ Model returned empty result on chunk ${i + 1}`);
|
|
34
|
-
}
|
|
35
|
-
processedChunks.push(response.content);
|
|
36
|
-
//console.log(chalk.green(`✅ Finished chunk ${i + 1}/${chunks.length}`));
|
|
37
|
-
}
|
|
38
|
-
// Join all chunk outputs into one string
|
|
39
|
-
const finalOutput = processedChunks.join('\n\n');
|
|
40
|
-
// Overwrite original file here:
|
|
41
|
-
await fs.writeFile(filepath, finalOutput, 'utf-8');
|
|
42
|
-
console.log(chalk.green(`✅ Original file overwritten: ${filepath}`));
|
|
43
|
-
}
|
|
44
|
-
catch (err) {
|
|
45
|
-
console.error(chalk.red('❌ Error in agent run:'), err instanceof Error ? err.message : err);
|
|
46
|
-
}
|
|
47
|
-
}
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
// src/utils/normalizePath.ts
|
|
2
|
-
import os from 'os';
|
|
3
|
-
import path from "path";
|
|
4
|
-
/**
|
|
5
|
-
* Normalizes a path string for loose, fuzzy matching:
|
|
6
|
-
* - Lowercases
|
|
7
|
-
* - Removes slashes and backslashes
|
|
8
|
-
* - Removes whitespace
|
|
9
|
-
*/
|
|
10
|
-
export function normalizePathForLooseMatch(p) {
|
|
11
|
-
return p.toLowerCase().replace(/[\\/]/g, '').replace(/\s+/g, '');
|
|
12
|
-
}
|
|
13
|
-
// Helper to normalize and resolve paths to a consistent format (forward slashes)
|
|
14
|
-
export function normalizePath(p) {
|
|
15
|
-
if (p.startsWith('~')) {
|
|
16
|
-
p = path.join(os.homedir(), p.slice(1));
|
|
17
|
-
}
|
|
18
|
-
return path.resolve(p).replace(/\\/g, '/');
|
|
19
|
-
}
|
|
20
|
-
export function getRepoKeyForPath(pathToMatch, config) {
|
|
21
|
-
const norm = normalizePath(pathToMatch);
|
|
22
|
-
return Object.entries(config.repos).find(([, val]) => normalizePath(val.indexDir) === norm)?.[0] || null;
|
|
23
|
-
}
|