opencode-autognosis 2.0.4 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/activeset.d.ts +28 -0
- package/dist/activeset.js +2 -2
- package/dist/chunk-cards.d.ts +4 -0
- package/dist/chunk-cards.js +40 -0
- package/dist/database.d.ts +16 -8
- package/dist/database.js +239 -119
- package/dist/index.d.ts +6 -1
- package/dist/index.js +32 -1
- package/dist/performance-optimization.d.ts +1 -0
- package/dist/performance-optimization.js +7 -2
- package/dist/services/mlx.d.ts +8 -0
- package/dist/services/mlx.js +53 -0
- package/dist/services/policy.d.ts +12 -0
- package/dist/services/policy.js +59 -0
- package/dist/services/tui.d.ts +8 -0
- package/dist/services/tui.js +43 -0
- package/dist/services/watcher.d.ts +8 -0
- package/dist/services/watcher.js +50 -0
- package/dist/system-tools.js +6 -0
- package/dist/unified-api.js +156 -31
- package/package.json +4 -2
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import { exec } from "node:child_process";
|
|
2
|
+
import { promisify } from "node:util";
|
|
3
|
+
import * as fs from "node:fs";
|
|
4
|
+
import * as path from "node:path";
|
|
5
|
+
import { Logger } from "./logger.js";
|
|
6
|
+
const execAsync = promisify(exec);
|
|
7
|
+
export const DEFAULT_MLX_MODEL = "sentence-transformers/all-MiniLM-L6-v2";
|
|
8
|
+
export class MLXService {
|
|
9
|
+
isAvailable = null;
|
|
10
|
+
async checkAvailability() {
|
|
11
|
+
if (this.isAvailable !== null)
|
|
12
|
+
return this.isAvailable;
|
|
13
|
+
try {
|
|
14
|
+
await execAsync('python3 -c "import mlx.core; import sentence_transformers"');
|
|
15
|
+
this.isAvailable = true;
|
|
16
|
+
}
|
|
17
|
+
catch {
|
|
18
|
+
this.isAvailable = false;
|
|
19
|
+
}
|
|
20
|
+
return this.isAvailable;
|
|
21
|
+
}
|
|
22
|
+
async setup() {
|
|
23
|
+
try {
|
|
24
|
+
Logger.log("MLX", "Setting up MLX dependencies...");
|
|
25
|
+
await execAsync("pip3 install mlx sentence-transformers huggingface_hub");
|
|
26
|
+
this.isAvailable = true;
|
|
27
|
+
return "MLX and sentence-transformers installed successfully.";
|
|
28
|
+
}
|
|
29
|
+
catch (error) {
|
|
30
|
+
throw new Error(`MLX setup failed: ${error.message}`);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
async getEmbedding(text, model = DEFAULT_MLX_MODEL) {
|
|
34
|
+
if (!text || !text.trim())
|
|
35
|
+
return [];
|
|
36
|
+
// Escape text for python string
|
|
37
|
+
const escapedText = text.replace(/"/g, '\\"').replace(/\n/g, ' ');
|
|
38
|
+
// MLX optimized sentence-transformers execution
|
|
39
|
+
const pyScript = "\nimport mlx.core as mx\nfrom sentence_transformers import SentenceTransformer\nimport json\nimport sys\n\ntry:\n model = SentenceTransformer(\"${model}\")\n # Move to GPU if available (MLX default)\n embeddings = model.encode([\"${escapedText}\"])\n print(json.dumps(embeddings[0].tolist()))\nexcept Exception as e:\n print(json.dumps({\"error\": str(e)}))\n sys.exit(1)\n";
|
|
40
|
+
try {
|
|
41
|
+
const { stdout } = await execAsync(`python3 -c '${pyScript}'`);
|
|
42
|
+
const result = JSON.parse(stdout);
|
|
43
|
+
if (result.error)
|
|
44
|
+
throw new Error(result.error);
|
|
45
|
+
return result;
|
|
46
|
+
}
|
|
47
|
+
catch (error) {
|
|
48
|
+
Logger.log("MLX", "Embedding failed", error);
|
|
49
|
+
return [];
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
export const mlxService = new MLXService();
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
export interface PolicyViolation {
|
|
2
|
+
file: string;
|
|
3
|
+
line: number;
|
|
4
|
+
message: string;
|
|
5
|
+
severity: "error" | "warning";
|
|
6
|
+
}
|
|
7
|
+
export declare class PolicyEngine {
|
|
8
|
+
private rules;
|
|
9
|
+
checkContent(file: string, content: string): PolicyViolation[];
|
|
10
|
+
checkDiff(diff: string): PolicyViolation[];
|
|
11
|
+
}
|
|
12
|
+
export declare const policyEngine: PolicyEngine;
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
import * as fsSync from "node:fs";
|
|
2
|
+
import * as path from "node:path";
|
|
3
|
+
import { Logger } from "./logger.js";
|
|
4
|
+
export class PolicyEngine {
|
|
5
|
+
rules = [
|
|
6
|
+
{
|
|
7
|
+
name: "No Debug Logs",
|
|
8
|
+
pattern: /console\.(log|debug|info)\(/,
|
|
9
|
+
message: "Direct console logging is forbidden in production code.",
|
|
10
|
+
severity: "error"
|
|
11
|
+
},
|
|
12
|
+
{
|
|
13
|
+
name: "No TODO Debt",
|
|
14
|
+
pattern: /\/\/\s*TODO/,
|
|
15
|
+
message: "New TODOs must be linked to a ticket ID.",
|
|
16
|
+
severity: "warning"
|
|
17
|
+
},
|
|
18
|
+
{
|
|
19
|
+
name: "Forbidden Eval",
|
|
20
|
+
pattern: /eval\(/,
|
|
21
|
+
message: "Use of 'eval' is strictly forbidden for security reasons.",
|
|
22
|
+
severity: "error"
|
|
23
|
+
}
|
|
24
|
+
];
|
|
25
|
+
checkContent(file, content) {
|
|
26
|
+
const violations = [];
|
|
27
|
+
const lines = content.split('\n');
|
|
28
|
+
for (const rule of this.rules) {
|
|
29
|
+
lines.forEach((line, index) => {
|
|
30
|
+
if (rule.pattern.test(line)) {
|
|
31
|
+
violations.push({
|
|
32
|
+
file,
|
|
33
|
+
line: index + 1,
|
|
34
|
+
message: rule.message,
|
|
35
|
+
severity: rule.severity
|
|
36
|
+
});
|
|
37
|
+
}
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
return violations;
|
|
41
|
+
}
|
|
42
|
+
checkDiff(diff) {
|
|
43
|
+
// Check only added lines in diffs
|
|
44
|
+
const addedLines = diff.split('\n').filter(l => l.startsWith('+') && !l.startsWith('+++'));
|
|
45
|
+
const violations = [];
|
|
46
|
+
for (const rule of this.rules) {
|
|
47
|
+
if (rule.pattern.test(addedLines.join('\n'))) {
|
|
48
|
+
violations.push({
|
|
49
|
+
file: "diff",
|
|
50
|
+
line: 0,
|
|
51
|
+
message: `[Policy: ${rule.name}] ${rule.message}`,
|
|
52
|
+
severity: rule.severity
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
return violations;
|
|
57
|
+
}
|
|
58
|
+
}
|
|
59
|
+
export const policyEngine = new PolicyEngine();
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
export declare class TUIService {
|
|
2
|
+
private client;
|
|
3
|
+
setClient(client: any): void;
|
|
4
|
+
showProgress(title: string, progress: number, message: string): Promise<void>;
|
|
5
|
+
showSuccess(title: string, message: string): Promise<void>;
|
|
6
|
+
showError(title: string, message: string): Promise<void>;
|
|
7
|
+
}
|
|
8
|
+
export declare const tui: TUIService;
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
export class TUIService {
|
|
2
|
+
client;
|
|
3
|
+
setClient(client) {
|
|
4
|
+
this.client = client;
|
|
5
|
+
}
|
|
6
|
+
async showProgress(title, progress, message) {
|
|
7
|
+
if (!this.client)
|
|
8
|
+
return;
|
|
9
|
+
try {
|
|
10
|
+
await this.client.tui.showToast({
|
|
11
|
+
body: {
|
|
12
|
+
title: `[${progress}%] ${title}`,
|
|
13
|
+
message,
|
|
14
|
+
variant: "info"
|
|
15
|
+
}
|
|
16
|
+
});
|
|
17
|
+
}
|
|
18
|
+
catch (e) {
|
|
19
|
+
// Ignore if TUI not available
|
|
20
|
+
}
|
|
21
|
+
}
|
|
22
|
+
async showSuccess(title, message) {
|
|
23
|
+
if (!this.client)
|
|
24
|
+
return;
|
|
25
|
+
try {
|
|
26
|
+
await this.client.tui.showToast({
|
|
27
|
+
body: { title, message, variant: "success" }
|
|
28
|
+
});
|
|
29
|
+
}
|
|
30
|
+
catch (e) { }
|
|
31
|
+
}
|
|
32
|
+
async showError(title, message) {
|
|
33
|
+
if (!this.client)
|
|
34
|
+
return;
|
|
35
|
+
try {
|
|
36
|
+
await this.client.tui.showToast({
|
|
37
|
+
body: { title, message, variant: "error" }
|
|
38
|
+
});
|
|
39
|
+
}
|
|
40
|
+
catch (e) { }
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
export const tui = new TUIService();
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import chokidar, { FSWatcher } from "chokidar";
|
|
2
|
+
import * as path from "node:path";
|
|
3
|
+
import { indexFile } from "../performance-optimization.js";
|
|
4
|
+
import { Logger } from "./logger.js";
|
|
5
|
+
const PROJECT_ROOT = process.cwd();
|
|
6
|
+
export class CodeWatcher {
|
|
7
|
+
watcher = null;
|
|
8
|
+
start() {
|
|
9
|
+
if (this.watcher)
|
|
10
|
+
return;
|
|
11
|
+
Logger.log("Watcher", "Starting live codebase watcher...");
|
|
12
|
+
this.watcher = chokidar.watch(PROJECT_ROOT, {
|
|
13
|
+
ignored: [
|
|
14
|
+
"**/node_modules/**",
|
|
15
|
+
"**/dist/**",
|
|
16
|
+
"**/build/**",
|
|
17
|
+
"**/.opencode/**"
|
|
18
|
+
],
|
|
19
|
+
persistent: true,
|
|
20
|
+
ignoreInitial: true
|
|
21
|
+
});
|
|
22
|
+
this.watcher
|
|
23
|
+
.on("add", (filePath) => this.handleFileChange("added", filePath))
|
|
24
|
+
.on("change", (filePath) => this.handleFileChange("changed", filePath))
|
|
25
|
+
.on("unlink", (filePath) => this.handleFileDelete(filePath));
|
|
26
|
+
}
|
|
27
|
+
stop() {
|
|
28
|
+
if (this.watcher) {
|
|
29
|
+
this.watcher.close();
|
|
30
|
+
this.watcher = null;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
async handleFileChange(event, filePath) {
|
|
34
|
+
const ext = path.extname(filePath);
|
|
35
|
+
const supportedExts = [".ts", ".js", ".tsx", ".jsx", ".cpp", ".c", ".h", ".hpp", ".swift", ".py", ".go", ".rs"];
|
|
36
|
+
if (supportedExts.includes(ext)) {
|
|
37
|
+
Logger.log("Watcher", `File ${event}: ${filePath}`);
|
|
38
|
+
try {
|
|
39
|
+
await indexFile(filePath);
|
|
40
|
+
}
|
|
41
|
+
catch (e) {
|
|
42
|
+
Logger.log("Watcher", `Failed to index ${filePath}`, e);
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
handleFileDelete(filePath) {
|
|
47
|
+
Logger.log("Watcher", `File deleted: ${filePath}`);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
export const codeWatcher = new CodeWatcher();
|
package/dist/system-tools.js
CHANGED
|
@@ -7,6 +7,7 @@ import { promisify } from "node:util";
|
|
|
7
7
|
import * as crypto from "node:crypto";
|
|
8
8
|
import { Logger } from "./services/logger.js";
|
|
9
9
|
import { getDb } from "./database.js";
|
|
10
|
+
import { tui } from "./services/tui.js";
|
|
10
11
|
const execAsync = promisify(exec);
|
|
11
12
|
const PROJECT_ROOT = process.cwd();
|
|
12
13
|
const OPENCODE_DIR = path.join(PROJECT_ROOT, ".opencode");
|
|
@@ -305,10 +306,12 @@ export function systemTools() {
|
|
|
305
306
|
// Spawn background worker
|
|
306
307
|
(async () => {
|
|
307
308
|
getDb().updateJob(jobId, { status: "running", progress: 10 });
|
|
309
|
+
await tui.showProgress("Patch Validation", 10, "Creating temporary worktree...");
|
|
308
310
|
const tempWorktree = path.join(PROJECT_ROOT, ".opencode", "temp-" + jobId);
|
|
309
311
|
try {
|
|
310
312
|
await runCmd(`git worktree add -d "${tempWorktree}"`);
|
|
311
313
|
getDb().updateJob(jobId, { progress: 30 });
|
|
314
|
+
await tui.showProgress("Patch Validation", 30, "Applying diff...");
|
|
312
315
|
const content = await fs.readFile(patch_path, "utf-8");
|
|
313
316
|
const parts = content.split('\n\n');
|
|
314
317
|
const diffOnly = parts.length > 1 ? parts.slice(1).join('\n\n') : content;
|
|
@@ -318,6 +321,7 @@ export function systemTools() {
|
|
|
318
321
|
if (applyError)
|
|
319
322
|
throw new Error(`Apply failed: ${applyError.message}`);
|
|
320
323
|
getDb().updateJob(jobId, { progress: 60 });
|
|
324
|
+
await tui.showProgress("Patch Validation", 60, "Running build verification...");
|
|
321
325
|
let buildStatus = "SKIPPED";
|
|
322
326
|
if (fsSync.existsSync(path.join(tempWorktree, "package.json"))) {
|
|
323
327
|
const { error: buildError } = await runCmd("npm run build", tempWorktree);
|
|
@@ -332,9 +336,11 @@ export function systemTools() {
|
|
|
332
336
|
progress: 100,
|
|
333
337
|
result: JSON.stringify({ apply: "OK", build: buildStatus })
|
|
334
338
|
});
|
|
339
|
+
await tui.showSuccess("Validation Complete", `Apply: OK, Build: ${buildStatus}`);
|
|
335
340
|
}
|
|
336
341
|
catch (error) {
|
|
337
342
|
getDb().updateJob(jobId, { status: "failed", error: error.message });
|
|
343
|
+
await tui.showError("Validation Failed", error.message);
|
|
338
344
|
}
|
|
339
345
|
finally {
|
|
340
346
|
try {
|
package/dist/unified-api.js
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
import { tool } from "@opencode-ai/plugin";
|
|
2
|
+
import * as fsSync from "node:fs";
|
|
3
|
+
import * as path from "node:path";
|
|
2
4
|
import { systemTools } from "./system-tools.js";
|
|
3
5
|
import { activeSetTools } from "./activeset.js";
|
|
4
6
|
import { chunkCardsTools } from "./chunk-cards.js";
|
|
5
7
|
import { moduleSummariesTools } from "./module-summaries.js";
|
|
6
8
|
import { performanceTools } from "./performance-optimization.js";
|
|
7
|
-
import { graphTools } from "./database.js";
|
|
9
|
+
import { graphTools, getDb } from "./database.js";
|
|
10
|
+
import { policyEngine } from "./services/policy.js";
|
|
11
|
+
const PROJECT_ROOT = process.cwd();
|
|
8
12
|
// Aggregate all internal tools
|
|
9
13
|
const internal = {
|
|
10
14
|
...systemTools(),
|
|
@@ -14,6 +18,50 @@ const internal = {
|
|
|
14
18
|
...performanceTools(),
|
|
15
19
|
...graphTools(),
|
|
16
20
|
};
|
|
21
|
+
async function scoutPlugins() {
|
|
22
|
+
const plugins = new Set();
|
|
23
|
+
try {
|
|
24
|
+
const config = JSON.parse(fsSync.readFileSync(path.join(PROJECT_ROOT, "opencode.jsonc"), "utf-8"));
|
|
25
|
+
if (config.plugin)
|
|
26
|
+
config.plugin.forEach((p) => plugins.add(p));
|
|
27
|
+
}
|
|
28
|
+
catch { } // Ignore errors if config file doesn't exist
|
|
29
|
+
try {
|
|
30
|
+
const pkg = JSON.parse(fsSync.readFileSync(path.join(PROJECT_ROOT, "package.json"), "utf-8"));
|
|
31
|
+
const allDeps = { ...pkg.dependencies, ...pkg.devDependencies };
|
|
32
|
+
Object.keys(allDeps).forEach(d => { if (d.includes("opencode"))
|
|
33
|
+
plugins.add(d); });
|
|
34
|
+
}
|
|
35
|
+
catch { } // Ignore errors if package.json doesn't exist
|
|
36
|
+
return Array.from(plugins);
|
|
37
|
+
}
|
|
38
|
+
async function updateBridgePrompt(plugins) {
|
|
39
|
+
const bridgePath = "/Users/user/.config/opencode/prompts/bridge.md";
|
|
40
|
+
if (!fsSync.existsSync(bridgePath))
|
|
41
|
+
return "bridge.md not found at " + bridgePath;
|
|
42
|
+
const toolsSection = `
|
|
43
|
+
## Current Consolidated Tools (Autognosis v2.2)
|
|
44
|
+
- code_search: Universal search (semantic, symbol, filename, content).
|
|
45
|
+
- code_analyze: Deep structural analysis and impact reports.
|
|
46
|
+
- code_context: Working memory (ActiveSet) management and LRU eviction.
|
|
47
|
+
- code_read: Precise symbol jumping and file slicing.
|
|
48
|
+
- code_propose: Planning, patch generation, PR promotion, and Intent indexing.
|
|
49
|
+
- code_status: System health, background jobs, compliance, and Multi-Agent Blackboard.
|
|
50
|
+
- code_setup: Environment initialization, AI setup, and Architectural Boundaries.
|
|
51
|
+
|
|
52
|
+
## Other Detected Plugins
|
|
53
|
+
${plugins.filter(p => p !== "opencode-autognosis").map(p => `- ${p}`).join('\n')}
|
|
54
|
+
`;
|
|
55
|
+
let content = fsSync.readFileSync(bridgePath, "utf-8");
|
|
56
|
+
if (content.includes("## Current Consolidated Tools")) {
|
|
57
|
+
content = content.replace(/## Current Consolidated Tools[\s\S]*?(?=\n#|$)/, toolsSection);
|
|
58
|
+
}
|
|
59
|
+
else {
|
|
60
|
+
content += "\n" + toolsSection;
|
|
61
|
+
}
|
|
62
|
+
fsSync.writeFileSync(bridgePath, content);
|
|
63
|
+
return "Updated bridge.md with consolidated tools and detected plugins.";
|
|
64
|
+
}
|
|
17
65
|
export function unifiedTools() {
|
|
18
66
|
return {
|
|
19
67
|
code_search: tool({
|
|
@@ -38,7 +86,7 @@ export function unifiedTools() {
|
|
|
38
86
|
description: "Perform structural analysis on files or modules. Generates summaries, API maps, and impact reports.",
|
|
39
87
|
args: {
|
|
40
88
|
target: tool.schema.string().describe("File path or module ID"),
|
|
41
|
-
mode: tool.schema.enum(["summary", "api", "invariant", "module", "impact", "reasoning"]).optional().default("summary"),
|
|
89
|
+
mode: tool.schema.enum(["summary", "api", "invariant", "module", "impact", "reasoning", "callers"]).optional().default("summary"),
|
|
42
90
|
force: tool.schema.boolean().optional().default(false),
|
|
43
91
|
plan_id: tool.schema.string().optional()
|
|
44
92
|
},
|
|
@@ -47,6 +95,7 @@ export function unifiedTools() {
|
|
|
47
95
|
case "module": return internal.module_synthesize.execute({ file_path: args.target, force_resynthesize: args.force });
|
|
48
96
|
case "impact": return internal.brief_fix_loop.execute({ symbol: args.target, intent: "impact_analysis" });
|
|
49
97
|
case "reasoning": return internal.module_hierarchical_reasoning.execute({ module_id: args.target });
|
|
98
|
+
case "callers": return internal.graph_search_symbols.execute({ query: args.target });
|
|
50
99
|
default: return internal.chunk_create_card.execute({ file_path: args.target, chunk_type: args.mode, force_recreate: args.force });
|
|
51
100
|
}
|
|
52
101
|
}
|
|
@@ -54,18 +103,22 @@ export function unifiedTools() {
|
|
|
54
103
|
code_context: tool({
|
|
55
104
|
description: "Manage working memory (ActiveSets). Limits context window usage by loading/unloading specific chunks.",
|
|
56
105
|
args: {
|
|
57
|
-
action: tool.schema.enum(["create", "load", "add", "remove", "status", "list", "close"]),
|
|
58
|
-
target: tool.schema.string().optional().describe("ActiveSet ID or Chunk IDs
|
|
59
|
-
name: tool.schema.string().optional()
|
|
106
|
+
action: tool.schema.enum(["create", "load", "add", "remove", "status", "list", "close", "evict"]),
|
|
107
|
+
target: tool.schema.string().optional().describe("ActiveSet ID or Chunk IDs"),
|
|
108
|
+
name: tool.schema.string().optional(),
|
|
109
|
+
limit: tool.schema.number().optional().default(5).describe("Eviction limit"),
|
|
60
110
|
plan_id: tool.schema.string().optional()
|
|
61
111
|
},
|
|
62
112
|
async execute(args) {
|
|
63
|
-
const chunk_ids = args.target?.split(',').map(s => s.trim());
|
|
64
113
|
switch (args.action) {
|
|
65
|
-
case "create": return internal.activeset_create.execute({ name: args.name || "Context", chunk_ids });
|
|
114
|
+
case "create": return internal.activeset_create.execute({ name: args.name || "Context", chunk_ids: args.target?.split(',').map(s => s.trim()) });
|
|
66
115
|
case "load": return internal.activeset_load.execute({ set_id: args.target });
|
|
67
|
-
case "add": return internal.activeset_add_chunks.execute({ chunk_ids:
|
|
68
|
-
case "remove": return internal.activeset_remove_chunks.execute({ chunk_ids:
|
|
116
|
+
case "add": return internal.activeset_add_chunks.execute({ chunk_ids: args.target?.split(',').map(s => s.trim()) });
|
|
117
|
+
case "remove": return internal.activeset_remove_chunks.execute({ chunk_ids: args.target?.split(',').map(s => s.trim()) });
|
|
118
|
+
case "evict": {
|
|
119
|
+
const lru = getDb().getLruChunks(args.limit);
|
|
120
|
+
return internal.activeset_remove_chunks.execute({ chunk_ids: lru.map(c => c.chunk_id) });
|
|
121
|
+
}
|
|
69
122
|
case "list": return internal.activeset_list.execute({});
|
|
70
123
|
case "close": return internal.activeset_close.execute({});
|
|
71
124
|
default: return internal.activeset_get_current.execute({});
|
|
@@ -82,73 +135,145 @@ export function unifiedTools() {
|
|
|
82
135
|
plan_id: tool.schema.string().optional()
|
|
83
136
|
},
|
|
84
137
|
async execute(args) {
|
|
85
|
-
if (args.symbol)
|
|
138
|
+
if (args.symbol) {
|
|
139
|
+
getDb().logAccess(args.symbol, args.plan_id);
|
|
86
140
|
return internal.jump_to_symbol.execute({ symbol: args.symbol, plan_id: args.plan_id });
|
|
141
|
+
}
|
|
87
142
|
if (args.file && args.start_line && args.end_line) {
|
|
143
|
+
getDb().logAccess(args.file, args.plan_id);
|
|
88
144
|
return internal.read_slice.execute({ file: args.file, start_line: args.start_line, end_line: args.end_line, plan_id: args.plan_id });
|
|
89
145
|
}
|
|
90
146
|
throw new Error("Either 'symbol' or 'file' with line range must be provided.");
|
|
91
147
|
}
|
|
92
148
|
}),
|
|
93
149
|
code_propose: tool({
|
|
94
|
-
description: "Plan and
|
|
150
|
+
description: "Plan, propose, and promote changes. Includes patch generation, Intent indexing, and PR promotion.",
|
|
95
151
|
args: {
|
|
96
|
-
action: tool.schema.enum(["plan", "patch", "validate", "finalize"]),
|
|
97
|
-
symbol: tool.schema.string().optional()
|
|
98
|
-
intent: tool.schema.string().optional()
|
|
99
|
-
|
|
100
|
-
|
|
152
|
+
action: tool.schema.enum(["plan", "patch", "validate", "finalize", "promote"]),
|
|
153
|
+
symbol: tool.schema.string().optional(),
|
|
154
|
+
intent: tool.schema.string().optional(),
|
|
155
|
+
reasoning: tool.schema.string().optional().describe("Detailed reasoning for the change (Decision Indexing)"),
|
|
156
|
+
message: tool.schema.string().optional(),
|
|
157
|
+
patch_path: tool.schema.string().optional(),
|
|
158
|
+
branch: tool.schema.string().optional(),
|
|
101
159
|
plan_id: tool.schema.string().optional(),
|
|
102
160
|
outcome: tool.schema.string().optional()
|
|
103
161
|
},
|
|
104
162
|
async execute(args) {
|
|
105
163
|
switch (args.action) {
|
|
106
164
|
case "plan": return internal.brief_fix_loop.execute({ symbol: args.symbol, intent: args.intent });
|
|
107
|
-
case "patch":
|
|
108
|
-
|
|
165
|
+
case "patch": {
|
|
166
|
+
const { stdout: diff } = await internal.runCmd("git diff");
|
|
167
|
+
const violations = policyEngine.checkDiff(diff);
|
|
168
|
+
if (violations.some(v => v.severity === "error")) {
|
|
169
|
+
return JSON.stringify({ status: "POLICY_VIOLATION", violations, message: "Patch rejected by policy engine." }, null, 2);
|
|
170
|
+
}
|
|
171
|
+
const res = await internal.prepare_patch.execute({ message: args.message, plan_id: args.plan_id });
|
|
172
|
+
const json = JSON.parse(res);
|
|
173
|
+
if (json.status === "SUCCESS" && args.reasoning) {
|
|
174
|
+
getDb().storeIntent(json.patch_id, args.reasoning, args.plan_id || "adhoc");
|
|
175
|
+
}
|
|
176
|
+
return res;
|
|
177
|
+
}
|
|
178
|
+
case "validate": {
|
|
179
|
+
// Architectural Boundary Check
|
|
180
|
+
const { stdout: diff } = await internal.runCmd("git diff --name-only");
|
|
181
|
+
const changedFiles = diff.split('\n').filter(Boolean);
|
|
182
|
+
for (const file of changedFiles) {
|
|
183
|
+
const deps = await internal.extractDependencies.execute({ content: "", ast: null, filePath: file });
|
|
184
|
+
const imports = JSON.parse(deps);
|
|
185
|
+
for (const imp of imports) {
|
|
186
|
+
const violation = getDb().checkArchViolation(file, imp);
|
|
187
|
+
if (violation)
|
|
188
|
+
return JSON.stringify({ status: "ARCH_VIOLATION", file, forbidden_import: imp, rule: violation }, null, 2);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
return internal.validate_patch.execute({ patch_path: args.patch_path, plan_id: args.plan_id });
|
|
192
|
+
}
|
|
193
|
+
case "promote": {
|
|
194
|
+
const branch = args.branch || `autognosis-fix-${Date.now()}`;
|
|
195
|
+
const { execSync } = await import("node:child_process");
|
|
196
|
+
try {
|
|
197
|
+
execSync(`git checkout -b ${branch}`);
|
|
198
|
+
execSync(`git apply ${args.patch_path}`);
|
|
199
|
+
execSync(`git add . && git commit -m "${args.message || 'Automated promotion'}"`);
|
|
200
|
+
execSync(`gh pr create --title "${args.message}" --body "Automated promotion from Autognosis v2."`);
|
|
201
|
+
return JSON.stringify({ status: "SUCCESS", promoted_to: branch, pr: "OPENED" }, null, 2);
|
|
202
|
+
}
|
|
203
|
+
catch (e) {
|
|
204
|
+
return JSON.stringify({ status: "ERROR", message: e.message }, null, 2);
|
|
205
|
+
}
|
|
206
|
+
}
|
|
109
207
|
case "finalize": return internal.finalize_plan.execute({ plan_id: args.plan_id, outcome: args.outcome });
|
|
110
208
|
}
|
|
111
209
|
}
|
|
112
210
|
}),
|
|
113
211
|
code_status: tool({
|
|
114
|
-
description: "Monitor system health, background jobs, and
|
|
212
|
+
description: "Monitor system health, background jobs, compliance, and Multi-Agent Blackboard.",
|
|
115
213
|
args: {
|
|
116
|
-
mode: tool.schema.enum(["stats", "hot_files", "jobs", "plan"]).optional().default("stats"),
|
|
214
|
+
mode: tool.schema.enum(["stats", "hot_files", "jobs", "plan", "doctor", "blackboard"]).optional().default("stats"),
|
|
215
|
+
action: tool.schema.enum(["post", "read"]).optional(),
|
|
216
|
+
topic: tool.schema.string().optional().default("general"),
|
|
217
|
+
message: tool.schema.string().optional(),
|
|
117
218
|
job_id: tool.schema.string().optional(),
|
|
118
219
|
plan_id: tool.schema.string().optional(),
|
|
119
220
|
path: tool.schema.string().optional().default("")
|
|
120
221
|
},
|
|
121
222
|
async execute(args) {
|
|
122
223
|
switch (args.mode) {
|
|
224
|
+
case "blackboard": {
|
|
225
|
+
if (args.action === "post") {
|
|
226
|
+
getDb().postToBlackboard("Agent", args.message, args.topic);
|
|
227
|
+
return JSON.stringify({ status: "SUCCESS", message: "Posted to blackboard." });
|
|
228
|
+
}
|
|
229
|
+
return JSON.stringify({ status: "SUCCESS", entries: getDb().readBlackboard(args.topic) });
|
|
230
|
+
}
|
|
123
231
|
case "hot_files": return internal.journal_query_hot_files.execute({ path_prefix: args.path });
|
|
124
232
|
case "jobs": return internal.graph_background_status.execute({ job_id: args.job_id });
|
|
125
233
|
case "plan": return internal.graph_get_plan_metrics.execute({ plan_id: args.plan_id });
|
|
234
|
+
case "doctor": {
|
|
235
|
+
const stats = getDb().getStats();
|
|
236
|
+
let logSnippet = "";
|
|
237
|
+
try {
|
|
238
|
+
logSnippet = fsSync.readFileSync(path.join(PROJECT_ROOT, ".opencode", "logs", "autognosis.log"), "utf-8").split('\n').slice(-20).join('\n');
|
|
239
|
+
}
|
|
240
|
+
catch (e) { }
|
|
241
|
+
return JSON.stringify({ status: "HEALTHY", stats, recent_logs: logSnippet }, null, 2);
|
|
242
|
+
}
|
|
126
243
|
default: return internal.graph_stats.execute({});
|
|
127
244
|
}
|
|
128
245
|
}
|
|
129
246
|
}),
|
|
130
247
|
code_setup: tool({
|
|
131
|
-
description: "
|
|
248
|
+
description: "Setup and maintenance tasks (AI, Git Journal, Indexing, Prompt Scouting, Arch Boundaries).",
|
|
132
249
|
args: {
|
|
133
|
-
action: tool.schema.enum(["init", "ai", "index", "journal"]),
|
|
134
|
-
|
|
135
|
-
|
|
250
|
+
action: tool.schema.enum(["init", "ai", "index", "journal", "scout", "arch_rule"]),
|
|
251
|
+
provider: tool.schema.enum(["ollama", "mlx"]).optional().default("ollama"),
|
|
252
|
+
model: tool.schema.string().optional(),
|
|
253
|
+
limit: tool.schema.number().optional(),
|
|
254
|
+
source: tool.schema.string().optional().describe("Source target pattern"),
|
|
255
|
+
target: tool.schema.string().optional().describe("Target target pattern (forbidden)")
|
|
136
256
|
},
|
|
137
257
|
async execute(args) {
|
|
138
258
|
switch (args.action) {
|
|
139
|
-
case "
|
|
259
|
+
case "arch_rule": {
|
|
260
|
+
getDb().addArchRule(args.source, args.target);
|
|
261
|
+
return JSON.stringify({ status: "SUCCESS", message: `Architecture rule added: ${args.source} cannot import ${args.target}` });
|
|
262
|
+
}
|
|
263
|
+
case "ai": return internal.autognosis_setup_ai.execute({ provider: args.provider, model: args.model });
|
|
140
264
|
case "index": return internal.perf_incremental_index.execute({ background: true });
|
|
141
265
|
case "journal": return internal.journal_build.execute({ limit: args.limit });
|
|
142
|
-
|
|
266
|
+
case "scout": {
|
|
267
|
+
const plugins = await scoutPlugins();
|
|
268
|
+
return updateBridgePrompt(plugins);
|
|
269
|
+
}
|
|
270
|
+
default: return internal.autognosis_init.execute({ mode: "apply", token: "adhoc" });
|
|
143
271
|
}
|
|
144
272
|
}
|
|
145
273
|
}),
|
|
146
274
|
internal_call: tool({
|
|
147
|
-
description: "Advanced access to specialized internal tools.
|
|
148
|
-
args: {
|
|
149
|
-
tool_name: tool.schema.string().describe("Internal tool name"),
|
|
150
|
-
args: tool.schema.any().describe("Arguments for the internal tool")
|
|
151
|
-
},
|
|
275
|
+
description: "Advanced access to specialized internal tools.",
|
|
276
|
+
args: { tool_name: tool.schema.string(), args: tool.schema.any() },
|
|
152
277
|
async execute({ tool_name, args }) {
|
|
153
278
|
const target = internal[tool_name];
|
|
154
279
|
if (!target)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "opencode-autognosis",
|
|
3
|
-
"version": "2.0
|
|
3
|
+
"version": "2.2.0",
|
|
4
4
|
"description": "Advanced RAG-powered codebase awareness for OpenCode agents. Features Chunk Cards synthesis, hierarchical reasoning, ActiveSet working memory, and performance optimization for enterprise-scale repositories.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "dist/index.js",
|
|
@@ -48,10 +48,12 @@
|
|
|
48
48
|
"@opencode-ai/sdk": "^1.1.40",
|
|
49
49
|
"@types/better-sqlite3": "^7.6.13",
|
|
50
50
|
"@types/node": "^20.0.0",
|
|
51
|
+
"bun-types": "^1.3.8",
|
|
51
52
|
"typescript": "^5.0.0",
|
|
52
53
|
"zod": "^4.3.6"
|
|
53
54
|
},
|
|
54
55
|
"dependencies": {
|
|
55
|
-
"better-sqlite3": "^12.6.2"
|
|
56
|
+
"better-sqlite3": "^12.6.2",
|
|
57
|
+
"chokidar": "^5.0.0"
|
|
56
58
|
}
|
|
57
59
|
}
|