opencode-autognosis 2.0.5 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,53 @@
1
+ import { exec } from "node:child_process";
2
+ import { promisify } from "node:util";
3
+ import * as fs from "node:fs";
4
+ import * as path from "node:path";
5
+ import { Logger } from "./logger.js";
6
+ const execAsync = promisify(exec);
7
+ export const DEFAULT_MLX_MODEL = "sentence-transformers/all-MiniLM-L6-v2";
8
+ export class MLXService {
9
+ isAvailable = null;
10
+ async checkAvailability() {
11
+ if (this.isAvailable !== null)
12
+ return this.isAvailable;
13
+ try {
14
+ await execAsync('python3 -c "import mlx.core; import sentence_transformers"');
15
+ this.isAvailable = true;
16
+ }
17
+ catch {
18
+ this.isAvailable = false;
19
+ }
20
+ return this.isAvailable;
21
+ }
22
+ async setup() {
23
+ try {
24
+ Logger.log("MLX", "Setting up MLX dependencies...");
25
+ await execAsync("pip3 install mlx sentence-transformers huggingface_hub");
26
+ this.isAvailable = true;
27
+ return "MLX and sentence-transformers installed successfully.";
28
+ }
29
+ catch (error) {
30
+ throw new Error(`MLX setup failed: ${error.message}`);
31
+ }
32
+ }
33
+ async getEmbedding(text, model = DEFAULT_MLX_MODEL) {
34
+ if (!text || !text.trim())
35
+ return [];
36
+ // Escape text for python string
37
+ const escapedText = text.replace(/"/g, '\\"').replace(/\n/g, ' ');
38
+ // MLX optimized sentence-transformers execution
39
+ const pyScript = "\nimport mlx.core as mx\nfrom sentence_transformers import SentenceTransformer\nimport json\nimport sys\n\ntry:\n model = SentenceTransformer(\"${model}\")\n # Move to GPU if available (MLX default)\n embeddings = model.encode([\"${escapedText}\"])\n print(json.dumps(embeddings[0].tolist()))\nexcept Exception as e:\n print(json.dumps({\"error\": str(e)}))\n sys.exit(1)\n";
40
+ try {
41
+ const { stdout } = await execAsync(`python3 -c '${pyScript}'`);
42
+ const result = JSON.parse(stdout);
43
+ if (result.error)
44
+ throw new Error(result.error);
45
+ return result;
46
+ }
47
+ catch (error) {
48
+ Logger.log("MLX", "Embedding failed", error);
49
+ return [];
50
+ }
51
+ }
52
+ }
53
+ export const mlxService = new MLXService();
@@ -0,0 +1,12 @@
1
+ export interface PolicyViolation {
2
+ file: string;
3
+ line: number;
4
+ message: string;
5
+ severity: "error" | "warning";
6
+ }
7
+ export declare class PolicyEngine {
8
+ private rules;
9
+ checkContent(file: string, content: string): PolicyViolation[];
10
+ checkDiff(diff: string): PolicyViolation[];
11
+ }
12
+ export declare const policyEngine: PolicyEngine;
@@ -0,0 +1,59 @@
1
+ import * as fsSync from "node:fs";
2
+ import * as path from "node:path";
3
+ import { Logger } from "./logger.js";
4
+ export class PolicyEngine {
5
+ rules = [
6
+ {
7
+ name: "No Debug Logs",
8
+ pattern: /console\.(log|debug|info)\(/,
9
+ message: "Direct console logging is forbidden in production code.",
10
+ severity: "error"
11
+ },
12
+ {
13
+ name: "No TODO Debt",
14
+ pattern: /\/\/\s*TODO/,
15
+ message: "New TODOs must be linked to a ticket ID.",
16
+ severity: "warning"
17
+ },
18
+ {
19
+ name: "Forbidden Eval",
20
+ pattern: /eval\(/,
21
+ message: "Use of 'eval' is strictly forbidden for security reasons.",
22
+ severity: "error"
23
+ }
24
+ ];
25
+ checkContent(file, content) {
26
+ const violations = [];
27
+ const lines = content.split('\n');
28
+ for (const rule of this.rules) {
29
+ lines.forEach((line, index) => {
30
+ if (rule.pattern.test(line)) {
31
+ violations.push({
32
+ file,
33
+ line: index + 1,
34
+ message: rule.message,
35
+ severity: rule.severity
36
+ });
37
+ }
38
+ });
39
+ }
40
+ return violations;
41
+ }
42
+ checkDiff(diff) {
43
+ // Check only added lines in diffs
44
+ const addedLines = diff.split('\n').filter(l => l.startsWith('+') && !l.startsWith('+++'));
45
+ const violations = [];
46
+ for (const rule of this.rules) {
47
+ if (rule.pattern.test(addedLines.join('\n'))) {
48
+ violations.push({
49
+ file: "diff",
50
+ line: 0,
51
+ message: `[Policy: ${rule.name}] ${rule.message}`,
52
+ severity: rule.severity
53
+ });
54
+ }
55
+ }
56
+ return violations;
57
+ }
58
+ }
59
+ export const policyEngine = new PolicyEngine();
@@ -0,0 +1,8 @@
1
+ export declare class TUIService {
2
+ private client;
3
+ setClient(client: any): void;
4
+ showProgress(title: string, progress: number, message: string): Promise<void>;
5
+ showSuccess(title: string, message: string): Promise<void>;
6
+ showError(title: string, message: string): Promise<void>;
7
+ }
8
+ export declare const tui: TUIService;
@@ -0,0 +1,43 @@
1
+ export class TUIService {
2
+ client;
3
+ setClient(client) {
4
+ this.client = client;
5
+ }
6
+ async showProgress(title, progress, message) {
7
+ if (!this.client)
8
+ return;
9
+ try {
10
+ await this.client.tui.showToast({
11
+ body: {
12
+ title: `[${progress}%] ${title}`,
13
+ message,
14
+ variant: "info"
15
+ }
16
+ });
17
+ }
18
+ catch (e) {
19
+ // Ignore if TUI not available
20
+ }
21
+ }
22
+ async showSuccess(title, message) {
23
+ if (!this.client)
24
+ return;
25
+ try {
26
+ await this.client.tui.showToast({
27
+ body: { title, message, variant: "success" }
28
+ });
29
+ }
30
+ catch (e) { }
31
+ }
32
+ async showError(title, message) {
33
+ if (!this.client)
34
+ return;
35
+ try {
36
+ await this.client.tui.showToast({
37
+ body: { title, message, variant: "error" }
38
+ });
39
+ }
40
+ catch (e) { }
41
+ }
42
+ }
43
+ export const tui = new TUIService();
@@ -0,0 +1,8 @@
1
+ export declare class CodeWatcher {
2
+ private watcher;
3
+ start(): void;
4
+ stop(): void;
5
+ private handleFileChange;
6
+ private handleFileDelete;
7
+ }
8
+ export declare const codeWatcher: CodeWatcher;
@@ -0,0 +1,50 @@
1
+ import chokidar, { FSWatcher } from "chokidar";
2
+ import * as path from "node:path";
3
+ import { indexFile } from "../performance-optimization.js";
4
+ import { Logger } from "./logger.js";
5
+ const PROJECT_ROOT = process.cwd();
6
+ export class CodeWatcher {
7
+ watcher = null;
8
+ start() {
9
+ if (this.watcher)
10
+ return;
11
+ Logger.log("Watcher", "Starting live codebase watcher...");
12
+ this.watcher = chokidar.watch(PROJECT_ROOT, {
13
+ ignored: [
14
+ "**/node_modules/**",
15
+ "**/dist/**",
16
+ "**/build/**",
17
+ "**/.opencode/**"
18
+ ],
19
+ persistent: true,
20
+ ignoreInitial: true
21
+ });
22
+ this.watcher
23
+ .on("add", (filePath) => this.handleFileChange("added", filePath))
24
+ .on("change", (filePath) => this.handleFileChange("changed", filePath))
25
+ .on("unlink", (filePath) => this.handleFileDelete(filePath));
26
+ }
27
+ stop() {
28
+ if (this.watcher) {
29
+ this.watcher.close();
30
+ this.watcher = null;
31
+ }
32
+ }
33
+ async handleFileChange(event, filePath) {
34
+ const ext = path.extname(filePath);
35
+ const supportedExts = [".ts", ".js", ".tsx", ".jsx", ".cpp", ".c", ".h", ".hpp", ".swift", ".py", ".go", ".rs"];
36
+ if (supportedExts.includes(ext)) {
37
+ Logger.log("Watcher", `File ${event}: ${filePath}`);
38
+ try {
39
+ await indexFile(filePath);
40
+ }
41
+ catch (e) {
42
+ Logger.log("Watcher", `Failed to index ${filePath}`, e);
43
+ }
44
+ }
45
+ }
46
+ handleFileDelete(filePath) {
47
+ Logger.log("Watcher", `File deleted: ${filePath}`);
48
+ }
49
+ }
50
+ export const codeWatcher = new CodeWatcher();
@@ -7,6 +7,7 @@ import { promisify } from "node:util";
7
7
  import * as crypto from "node:crypto";
8
8
  import { Logger } from "./services/logger.js";
9
9
  import { getDb } from "./database.js";
10
+ import { tui } from "./services/tui.js";
10
11
  const execAsync = promisify(exec);
11
12
  const PROJECT_ROOT = process.cwd();
12
13
  const OPENCODE_DIR = path.join(PROJECT_ROOT, ".opencode");
@@ -305,10 +306,12 @@ export function systemTools() {
305
306
  // Spawn background worker
306
307
  (async () => {
307
308
  getDb().updateJob(jobId, { status: "running", progress: 10 });
309
+ await tui.showProgress("Patch Validation", 10, "Creating temporary worktree...");
308
310
  const tempWorktree = path.join(PROJECT_ROOT, ".opencode", "temp-" + jobId);
309
311
  try {
310
312
  await runCmd(`git worktree add -d "${tempWorktree}"`);
311
313
  getDb().updateJob(jobId, { progress: 30 });
314
+ await tui.showProgress("Patch Validation", 30, "Applying diff...");
312
315
  const content = await fs.readFile(patch_path, "utf-8");
313
316
  const parts = content.split('\n\n');
314
317
  const diffOnly = parts.length > 1 ? parts.slice(1).join('\n\n') : content;
@@ -318,6 +321,7 @@ export function systemTools() {
318
321
  if (applyError)
319
322
  throw new Error(`Apply failed: ${applyError.message}`);
320
323
  getDb().updateJob(jobId, { progress: 60 });
324
+ await tui.showProgress("Patch Validation", 60, "Running build verification...");
321
325
  let buildStatus = "SKIPPED";
322
326
  if (fsSync.existsSync(path.join(tempWorktree, "package.json"))) {
323
327
  const { error: buildError } = await runCmd("npm run build", tempWorktree);
@@ -332,9 +336,11 @@ export function systemTools() {
332
336
  progress: 100,
333
337
  result: JSON.stringify({ apply: "OK", build: buildStatus })
334
338
  });
339
+ await tui.showSuccess("Validation Complete", `Apply: OK, Build: ${buildStatus}`);
335
340
  }
336
341
  catch (error) {
337
342
  getDb().updateJob(jobId, { status: "failed", error: error.message });
343
+ await tui.showError("Validation Failed", error.message);
338
344
  }
339
345
  finally {
340
346
  try {
@@ -6,7 +6,8 @@ import { activeSetTools } from "./activeset.js";
6
6
  import { chunkCardsTools } from "./chunk-cards.js";
7
7
  import { moduleSummariesTools } from "./module-summaries.js";
8
8
  import { performanceTools } from "./performance-optimization.js";
9
- import { graphTools } from "./database.js";
9
+ import { graphTools, getDb } from "./database.js";
10
+ import { policyEngine } from "./services/policy.js";
10
11
  const PROJECT_ROOT = process.cwd();
11
12
  // Aggregate all internal tools
12
13
  const internal = {
@@ -19,23 +20,19 @@ const internal = {
19
20
  };
20
21
  async function scoutPlugins() {
21
22
  const plugins = new Set();
22
- // 1. Check opencode.jsonc
23
23
  try {
24
24
  const config = JSON.parse(fsSync.readFileSync(path.join(PROJECT_ROOT, "opencode.jsonc"), "utf-8"));
25
25
  if (config.plugin)
26
26
  config.plugin.forEach((p) => plugins.add(p));
27
27
  }
28
- catch { }
29
- // 2. Check package.json dependencies
28
+ catch { } // Ignore errors if config file doesn't exist
30
29
  try {
31
30
  const pkg = JSON.parse(fsSync.readFileSync(path.join(PROJECT_ROOT, "package.json"), "utf-8"));
32
31
  const allDeps = { ...pkg.dependencies, ...pkg.devDependencies };
33
- Object.keys(allDeps).forEach(d => {
34
- if (d.includes("opencode"))
35
- plugins.add(d);
36
- });
32
+ Object.keys(allDeps).forEach(d => { if (d.includes("opencode"))
33
+ plugins.add(d); });
37
34
  }
38
- catch { }
35
+ catch { } // Ignore errors if package.json doesn't exist
39
36
  return Array.from(plugins);
40
37
  }
41
38
  async function updateBridgePrompt(plugins) {
@@ -43,20 +40,19 @@ async function updateBridgePrompt(plugins) {
43
40
  if (!fsSync.existsSync(bridgePath))
44
41
  return "bridge.md not found at " + bridgePath;
45
42
  const toolsSection = `
46
- ## Current Consolidated Tools (Autognosis v2)
43
+ ## Current Consolidated Tools (Autognosis v2.2)
47
44
  - code_search: Universal search (semantic, symbol, filename, content).
48
45
  - code_analyze: Deep structural analysis and impact reports.
49
- - code_context: Working memory (ActiveSet) management.
46
+ - code_context: Working memory (ActiveSet) management and LRU eviction.
50
47
  - code_read: Precise symbol jumping and file slicing.
51
- - code_propose: Planning and patch generation.
52
- - code_status: System health and background job monitoring.
53
- - code_setup: Environment initialization and maintenance.
48
+ - code_propose: Planning, patch generation, PR promotion, and Intent indexing.
49
+ - code_status: System health, background jobs, compliance, and Multi-Agent Blackboard.
50
+ - code_setup: Environment initialization, AI setup, and Architectural Boundaries.
54
51
 
55
52
  ## Other Detected Plugins
56
53
  ${plugins.filter(p => p !== "opencode-autognosis").map(p => `- ${p}`).join('\n')}
57
54
  `;
58
55
  let content = fsSync.readFileSync(bridgePath, "utf-8");
59
- // Replace or Append Tool Usage section
60
56
  if (content.includes("## Current Consolidated Tools")) {
61
57
  content = content.replace(/## Current Consolidated Tools[\s\S]*?(?=\n#|$)/, toolsSection);
62
58
  }
@@ -90,7 +86,7 @@ export function unifiedTools() {
90
86
  description: "Perform structural analysis on files or modules. Generates summaries, API maps, and impact reports.",
91
87
  args: {
92
88
  target: tool.schema.string().describe("File path or module ID"),
93
- mode: tool.schema.enum(["summary", "api", "invariant", "module", "impact", "reasoning"]).optional().default("summary"),
89
+ mode: tool.schema.enum(["summary", "api", "invariant", "module", "impact", "reasoning", "callers"]).optional().default("summary"),
94
90
  force: tool.schema.boolean().optional().default(false),
95
91
  plan_id: tool.schema.string().optional()
96
92
  },
@@ -99,6 +95,7 @@ export function unifiedTools() {
99
95
  case "module": return internal.module_synthesize.execute({ file_path: args.target, force_resynthesize: args.force });
100
96
  case "impact": return internal.brief_fix_loop.execute({ symbol: args.target, intent: "impact_analysis" });
101
97
  case "reasoning": return internal.module_hierarchical_reasoning.execute({ module_id: args.target });
98
+ case "callers": return internal.graph_search_symbols.execute({ query: args.target });
102
99
  default: return internal.chunk_create_card.execute({ file_path: args.target, chunk_type: args.mode, force_recreate: args.force });
103
100
  }
104
101
  }
@@ -106,18 +103,22 @@ export function unifiedTools() {
106
103
  code_context: tool({
107
104
  description: "Manage working memory (ActiveSets). Limits context window usage by loading/unloading specific chunks.",
108
105
  args: {
109
- action: tool.schema.enum(["create", "load", "add", "remove", "status", "list", "close"]),
110
- target: tool.schema.string().optional().describe("ActiveSet ID or Chunk IDs (comma separated)"),
111
- name: tool.schema.string().optional().describe("Name for new ActiveSet"),
106
+ action: tool.schema.enum(["create", "load", "add", "remove", "status", "list", "close", "evict"]),
107
+ target: tool.schema.string().optional().describe("ActiveSet ID or Chunk IDs"),
108
+ name: tool.schema.string().optional(),
109
+ limit: tool.schema.number().optional().default(5).describe("Eviction limit"),
112
110
  plan_id: tool.schema.string().optional()
113
111
  },
114
112
  async execute(args) {
115
- const chunk_ids = args.target?.split(',').map(s => s.trim());
116
113
  switch (args.action) {
117
- case "create": return internal.activeset_create.execute({ name: args.name || "Context", chunk_ids });
114
+ case "create": return internal.activeset_create.execute({ name: args.name || "Context", chunk_ids: args.target?.split(',').map(s => s.trim()) });
118
115
  case "load": return internal.activeset_load.execute({ set_id: args.target });
119
- case "add": return internal.activeset_add_chunks.execute({ chunk_ids: chunk_ids });
120
- case "remove": return internal.activeset_remove_chunks.execute({ chunk_ids: chunk_ids });
116
+ case "add": return internal.activeset_add_chunks.execute({ chunk_ids: args.target?.split(',').map(s => s.trim()) });
117
+ case "remove": return internal.activeset_remove_chunks.execute({ chunk_ids: args.target?.split(',').map(s => s.trim()) });
118
+ case "evict": {
119
+ const lru = getDb().getLruChunks(args.limit);
120
+ return internal.activeset_remove_chunks.execute({ chunk_ids: lru.map(c => c.chunk_id) });
121
+ }
121
122
  case "list": return internal.activeset_list.execute({});
122
123
  case "close": return internal.activeset_close.execute({});
123
124
  default: return internal.activeset_get_current.execute({});
@@ -134,61 +135,132 @@ export function unifiedTools() {
134
135
  plan_id: tool.schema.string().optional()
135
136
  },
136
137
  async execute(args) {
137
- if (args.symbol)
138
+ if (args.symbol) {
139
+ getDb().logAccess(args.symbol, args.plan_id);
138
140
  return internal.jump_to_symbol.execute({ symbol: args.symbol, plan_id: args.plan_id });
141
+ }
139
142
  if (args.file && args.start_line && args.end_line) {
143
+ getDb().logAccess(args.file, args.plan_id);
140
144
  return internal.read_slice.execute({ file: args.file, start_line: args.start_line, end_line: args.end_line, plan_id: args.plan_id });
141
145
  }
142
146
  throw new Error("Either 'symbol' or 'file' with line range must be provided.");
143
147
  }
144
148
  }),
145
149
  code_propose: tool({
146
- description: "Plan and propose changes. Generates worklists, diffs, and validates them.",
150
+ description: "Plan, propose, and promote changes. Includes patch generation, Intent indexing, and PR promotion.",
147
151
  args: {
148
- action: tool.schema.enum(["plan", "patch", "validate", "finalize"]),
149
- symbol: tool.schema.string().optional().describe("Locus symbol for plan"),
150
- intent: tool.schema.string().optional().describe("Work intent (e.g. refactor)"),
151
- message: tool.schema.string().optional().describe("Commit message for patch"),
152
- patch_path: tool.schema.string().optional().describe("Path to .diff file"),
152
+ action: tool.schema.enum(["plan", "patch", "validate", "finalize", "promote"]),
153
+ symbol: tool.schema.string().optional(),
154
+ intent: tool.schema.string().optional(),
155
+ reasoning: tool.schema.string().optional().describe("Detailed reasoning for the change (Decision Indexing)"),
156
+ message: tool.schema.string().optional(),
157
+ patch_path: tool.schema.string().optional(),
158
+ branch: tool.schema.string().optional(),
153
159
  plan_id: tool.schema.string().optional(),
154
160
  outcome: tool.schema.string().optional()
155
161
  },
156
162
  async execute(args) {
157
163
  switch (args.action) {
158
164
  case "plan": return internal.brief_fix_loop.execute({ symbol: args.symbol, intent: args.intent });
159
- case "patch": return internal.prepare_patch.execute({ message: args.message, plan_id: args.plan_id });
160
- case "validate": return internal.validate_patch.execute({ patch_path: args.patch_path, plan_id: args.plan_id });
165
+ case "patch": {
166
+ const { stdout: diff } = await internal.runCmd("git diff");
167
+ const violations = policyEngine.checkDiff(diff);
168
+ if (violations.some(v => v.severity === "error")) {
169
+ return JSON.stringify({ status: "POLICY_VIOLATION", violations, message: "Patch rejected by policy engine." }, null, 2);
170
+ }
171
+ const res = await internal.prepare_patch.execute({ message: args.message, plan_id: args.plan_id });
172
+ const json = JSON.parse(res);
173
+ if (json.status === "SUCCESS" && args.reasoning) {
174
+ getDb().storeIntent(json.patch_id, args.reasoning, args.plan_id || "adhoc");
175
+ }
176
+ return res;
177
+ }
178
+ case "validate": {
179
+ // Architectural Boundary Check
180
+ const { stdout: diff } = await internal.runCmd("git diff --name-only");
181
+ const changedFiles = diff.split('\n').filter(Boolean);
182
+ for (const file of changedFiles) {
183
+ const deps = await internal.extractDependencies.execute({ content: "", ast: null, filePath: file });
184
+ const imports = JSON.parse(deps);
185
+ for (const imp of imports) {
186
+ const violation = getDb().checkArchViolation(file, imp);
187
+ if (violation)
188
+ return JSON.stringify({ status: "ARCH_VIOLATION", file, forbidden_import: imp, rule: violation }, null, 2);
189
+ }
190
+ }
191
+ return internal.validate_patch.execute({ patch_path: args.patch_path, plan_id: args.plan_id });
192
+ }
193
+ case "promote": {
194
+ const branch = args.branch || `autognosis-fix-${Date.now()}`;
195
+ const { execSync } = await import("node:child_process");
196
+ try {
197
+ execSync(`git checkout -b ${branch}`);
198
+ execSync(`git apply ${args.patch_path}`);
199
+ execSync(`git add . && git commit -m "${args.message || 'Automated promotion'}"`);
200
+ execSync(`gh pr create --title "${args.message}" --body "Automated promotion from Autognosis v2."`);
201
+ return JSON.stringify({ status: "SUCCESS", promoted_to: branch, pr: "OPENED" }, null, 2);
202
+ }
203
+ catch (e) {
204
+ return JSON.stringify({ status: "ERROR", message: e.message }, null, 2);
205
+ }
206
+ }
161
207
  case "finalize": return internal.finalize_plan.execute({ plan_id: args.plan_id, outcome: args.outcome });
162
208
  }
163
209
  }
164
210
  }),
165
211
  code_status: tool({
166
- description: "Monitor system health, background jobs, and plan metrics.",
212
+ description: "Monitor system health, background jobs, compliance, and Multi-Agent Blackboard.",
167
213
  args: {
168
- mode: tool.schema.enum(["stats", "hot_files", "jobs", "plan"]).optional().default("stats"),
214
+ mode: tool.schema.enum(["stats", "hot_files", "jobs", "plan", "doctor", "blackboard"]).optional().default("stats"),
215
+ action: tool.schema.enum(["post", "read"]).optional(),
216
+ topic: tool.schema.string().optional().default("general"),
217
+ message: tool.schema.string().optional(),
169
218
  job_id: tool.schema.string().optional(),
170
219
  plan_id: tool.schema.string().optional(),
171
220
  path: tool.schema.string().optional().default("")
172
221
  },
173
222
  async execute(args) {
174
223
  switch (args.mode) {
224
+ case "blackboard": {
225
+ if (args.action === "post") {
226
+ getDb().postToBlackboard("Agent", args.message, args.topic);
227
+ return JSON.stringify({ status: "SUCCESS", message: "Posted to blackboard." });
228
+ }
229
+ return JSON.stringify({ status: "SUCCESS", entries: getDb().readBlackboard(args.topic) });
230
+ }
175
231
  case "hot_files": return internal.journal_query_hot_files.execute({ path_prefix: args.path });
176
232
  case "jobs": return internal.graph_background_status.execute({ job_id: args.job_id });
177
233
  case "plan": return internal.graph_get_plan_metrics.execute({ plan_id: args.plan_id });
234
+ case "doctor": {
235
+ const stats = getDb().getStats();
236
+ let logSnippet = "";
237
+ try {
238
+ logSnippet = fsSync.readFileSync(path.join(PROJECT_ROOT, ".opencode", "logs", "autognosis.log"), "utf-8").split('\n').slice(-20).join('\n');
239
+ }
240
+ catch (e) { }
241
+ return JSON.stringify({ status: "HEALTHY", stats, recent_logs: logSnippet }, null, 2);
242
+ }
178
243
  default: return internal.graph_stats.execute({});
179
244
  }
180
245
  }
181
246
  }),
182
247
  code_setup: tool({
183
- description: "One-time setup and maintenance tasks (AI, Git Journal, Indexing, Prompt Scouting).",
248
+ description: "Setup and maintenance tasks (AI, Git Journal, Indexing, Prompt Scouting, Arch Boundaries).",
184
249
  args: {
185
- action: tool.schema.enum(["init", "ai", "index", "journal", "scout"]),
186
- model: tool.schema.string().optional().describe("AI Model name"),
187
- limit: tool.schema.number().optional().describe("History limit")
250
+ action: tool.schema.enum(["init", "ai", "index", "journal", "scout", "arch_rule"]),
251
+ provider: tool.schema.enum(["ollama", "mlx"]).optional().default("ollama"),
252
+ model: tool.schema.string().optional(),
253
+ limit: tool.schema.number().optional(),
254
+ source: tool.schema.string().optional().describe("Source target pattern"),
255
+ target: tool.schema.string().optional().describe("Target target pattern (forbidden)")
188
256
  },
189
257
  async execute(args) {
190
258
  switch (args.action) {
191
- case "ai": return internal.autognosis_setup_ai.execute({ model: args.model });
259
+ case "arch_rule": {
260
+ getDb().addArchRule(args.source, args.target);
261
+ return JSON.stringify({ status: "SUCCESS", message: `Architecture rule added: ${args.source} cannot import ${args.target}` });
262
+ }
263
+ case "ai": return internal.autognosis_setup_ai.execute({ provider: args.provider, model: args.model });
192
264
  case "index": return internal.perf_incremental_index.execute({ background: true });
193
265
  case "journal": return internal.journal_build.execute({ limit: args.limit });
194
266
  case "scout": {
@@ -200,11 +272,8 @@ export function unifiedTools() {
200
272
  }
201
273
  }),
202
274
  internal_call: tool({
203
- description: "Advanced access to specialized internal tools. Use only when unified tools are insufficient.",
204
- args: {
205
- tool_name: tool.schema.string().describe("Internal tool name"),
206
- args: tool.schema.any().describe("Arguments for the internal tool")
207
- },
275
+ description: "Advanced access to specialized internal tools.",
276
+ args: { tool_name: tool.schema.string(), args: tool.schema.any() },
208
277
  async execute({ tool_name, args }) {
209
278
  const target = internal[tool_name];
210
279
  if (!target)
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "opencode-autognosis",
3
- "version": "2.0.5",
3
+ "version": "2.2.0",
4
4
  "description": "Advanced RAG-powered codebase awareness for OpenCode agents. Features Chunk Cards synthesis, hierarchical reasoning, ActiveSet working memory, and performance optimization for enterprise-scale repositories.",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -48,10 +48,12 @@
48
48
  "@opencode-ai/sdk": "^1.1.40",
49
49
  "@types/better-sqlite3": "^7.6.13",
50
50
  "@types/node": "^20.0.0",
51
+ "bun-types": "^1.3.8",
51
52
  "typescript": "^5.0.0",
52
53
  "zod": "^4.3.6"
53
54
  },
54
55
  "dependencies": {
55
- "better-sqlite3": "^12.6.2"
56
+ "better-sqlite3": "^12.6.2",
57
+ "chokidar": "^5.0.0"
56
58
  }
57
59
  }