atavi 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE.md +167 -0
- package/CHANGELOG.md +20 -0
- package/CONTRIBUTING.md +42 -0
- package/HOSTS.md +177 -0
- package/LICENSE +22 -0
- package/README.md +232 -0
- package/TESTING.md +68 -0
- package/bin/atavi.js +10 -0
- package/package.json +51 -0
- package/protocol/ATAVI.md +149 -0
- package/protocol/agents/critic.md +19 -0
- package/protocol/agents/experimentalist.md +30 -0
- package/protocol/agents/scout.md +34 -0
- package/protocol/agents/synthesist.md +19 -0
- package/protocol/agents/theorist.md +31 -0
- package/src/cli.js +98 -0
- package/src/commands/doctor.js +39 -0
- package/src/commands/init.js +26 -0
- package/src/commands/memory-export.js +22 -0
- package/src/commands/memory-import.js +31 -0
- package/src/commands/migrate.js +68 -0
- package/src/commands/path.js +6 -0
- package/src/commands/resume-check.js +57 -0
- package/src/commands/validate.js +40 -0
- package/src/lib/config.js +62 -0
- package/src/lib/filesystem.js +27 -0
- package/src/lib/memory.js +75 -0
- package/src/lib/protocol-manifest.js +318 -0
- package/src/lib/status.js +111 -0
- package/templates/cpr.md +12 -0
- package/templates/output-report.md +39 -0
- package/templates/pod.md +22 -0
- package/templates/research-brief.md +32 -0
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import { readFile, writeFile } from "node:fs/promises";
|
|
3
|
+
import { ensureDirectory, pathExists, writeFileIfMissing } from "../lib/filesystem.js";
|
|
4
|
+
import { parseConfigFile } from "../lib/config.js";
|
|
5
|
+
import { CURRENT_WORKSPACE_SCHEMA_VERSION, scaffoldFiles } from "../lib/protocol-manifest.js";
|
|
6
|
+
|
|
7
|
+
async function readOptionalConfig(configPath) {
|
|
8
|
+
if (!(await pathExists(configPath))) {
|
|
9
|
+
return null;
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
return readFile(configPath, "utf8");
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export async function commandMigrate(args, io) {
|
|
16
|
+
const targetArg = args.find((arg) => !arg.startsWith("-")) ?? ".";
|
|
17
|
+
const targetDir = path.resolve(process.cwd(), targetArg);
|
|
18
|
+
const workspaceDir = path.join(targetDir, ".atavi");
|
|
19
|
+
const configPath = path.join(workspaceDir, "config.json");
|
|
20
|
+
|
|
21
|
+
if (!(await pathExists(workspaceDir))) {
|
|
22
|
+
io.stdout.write("ATAVI migrate: FAIL\n");
|
|
23
|
+
io.stdout.write(`missing workspace ${workspaceDir}\n`);
|
|
24
|
+
throw new Error("ATAVI workspace was not found.");
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
await ensureDirectory(workspaceDir);
|
|
28
|
+
|
|
29
|
+
const existingConfigContents = await readOptionalConfig(configPath);
|
|
30
|
+
let existingConfig = null;
|
|
31
|
+
|
|
32
|
+
if (existingConfigContents !== null) {
|
|
33
|
+
try {
|
|
34
|
+
existingConfig = parseConfigFile(existingConfigContents);
|
|
35
|
+
} catch (error) {
|
|
36
|
+
io.stdout.write("ATAVI migrate: FAIL\n");
|
|
37
|
+
io.stdout.write(`invalid JSON ${configPath}\n`);
|
|
38
|
+
throw new Error("ATAVI workspace config is not valid JSON.", { cause: error });
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
let created = 0;
|
|
43
|
+
for (const file of scaffoldFiles()) {
|
|
44
|
+
const destination = path.join(workspaceDir, file.relativePath);
|
|
45
|
+
const didCreate = await writeFileIfMissing(destination, file.contents);
|
|
46
|
+
if (didCreate) {
|
|
47
|
+
created += 1;
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
let migratedConfig = false;
|
|
52
|
+
if (existingConfig !== null && typeof existingConfig === "object" && existingConfig !== null) {
|
|
53
|
+
if (existingConfig.schemaVersion !== CURRENT_WORKSPACE_SCHEMA_VERSION) {
|
|
54
|
+
const updatedConfig = {
|
|
55
|
+
...existingConfig,
|
|
56
|
+
schemaVersion: CURRENT_WORKSPACE_SCHEMA_VERSION
|
|
57
|
+
};
|
|
58
|
+
await writeFile(configPath, `${JSON.stringify(updatedConfig, null, 2)}\n`, "utf8");
|
|
59
|
+
migratedConfig = true;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
io.stdout.write("ATAVI migrate: OK\n");
|
|
64
|
+
io.stdout.write(`workspace: ${workspaceDir}\n`);
|
|
65
|
+
io.stdout.write(`created ${created} missing file(s)\n`);
|
|
66
|
+
io.stdout.write(`schema version: ${CURRENT_WORKSPACE_SCHEMA_VERSION}\n`);
|
|
67
|
+
io.stdout.write(`config updated: ${migratedConfig ? "yes" : "no"}\n`);
|
|
68
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import { readFile } from "node:fs/promises";
|
|
3
|
+
import { parseConfigFile, validateConfig } from "../lib/config.js";
|
|
4
|
+
import { parseStatusFile, validateStatus } from "../lib/status.js";
|
|
5
|
+
|
|
6
|
+
async function readRequiredFile(filePath, label, io) {
|
|
7
|
+
try {
|
|
8
|
+
return await readFile(filePath, "utf8");
|
|
9
|
+
} catch (error) {
|
|
10
|
+
io.stdout.write("ATAVI resume-check: FAIL\n");
|
|
11
|
+
io.stdout.write(`missing ${label} ${filePath}\n`);
|
|
12
|
+
throw new Error(`ATAVI workspace ${label} was not found.`, { cause: error });
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export async function commandResumeCheck(args, io) {
|
|
17
|
+
const targetArg = args.find((arg) => !arg.startsWith("-")) ?? ".";
|
|
18
|
+
const targetDir = path.resolve(process.cwd(), targetArg);
|
|
19
|
+
const configPath = path.join(targetDir, ".atavi", "config.json");
|
|
20
|
+
const statusPath = path.join(targetDir, ".atavi", "status.md");
|
|
21
|
+
|
|
22
|
+
const [configContents, statusContents] = await Promise.all([
|
|
23
|
+
readRequiredFile(configPath, "config", io),
|
|
24
|
+
readRequiredFile(statusPath, "status", io)
|
|
25
|
+
]);
|
|
26
|
+
|
|
27
|
+
let config;
|
|
28
|
+
try {
|
|
29
|
+
config = parseConfigFile(configContents);
|
|
30
|
+
} catch (error) {
|
|
31
|
+
io.stdout.write("ATAVI resume-check: FAIL\n");
|
|
32
|
+
io.stdout.write(`invalid JSON ${configPath}\n`);
|
|
33
|
+
throw new Error("ATAVI workspace config is not valid JSON.", { cause: error });
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
const configErrors = validateConfig(config);
|
|
37
|
+
const statusFields = parseStatusFile(statusContents);
|
|
38
|
+
const statusErrors = validateStatus(statusFields);
|
|
39
|
+
const errors = [
|
|
40
|
+
...configErrors.map((error) => `config: ${error}`),
|
|
41
|
+
...statusErrors.map((error) => `status: ${error}`)
|
|
42
|
+
];
|
|
43
|
+
|
|
44
|
+
if (errors.length > 0) {
|
|
45
|
+
io.stdout.write("ATAVI resume-check: FAIL\n");
|
|
46
|
+
for (const error of errors) {
|
|
47
|
+
io.stdout.write(`${error}\n`);
|
|
48
|
+
}
|
|
49
|
+
throw new Error("ATAVI workspace failed resume-state validation.");
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
io.stdout.write("ATAVI resume-check: OK\n");
|
|
53
|
+
io.stdout.write(`config path: ${configPath}\n`);
|
|
54
|
+
io.stdout.write(`status path: ${statusPath}\n`);
|
|
55
|
+
io.stdout.write(`current_pass: ${statusFields.current_pass}\n`);
|
|
56
|
+
io.stdout.write(`current_phase: ${statusFields.current_phase}\n`);
|
|
57
|
+
}
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import { readFile } from "node:fs/promises";
|
|
3
|
+
import { parseConfigFile, validateConfig } from "../lib/config.js";
|
|
4
|
+
|
|
5
|
+
export async function commandValidate(args, io) {
|
|
6
|
+
const targetArg = args.find((arg) => !arg.startsWith("-")) ?? ".";
|
|
7
|
+
const targetDir = path.resolve(process.cwd(), targetArg);
|
|
8
|
+
const configPath = path.join(targetDir, ".atavi", "config.json");
|
|
9
|
+
|
|
10
|
+
let contents;
|
|
11
|
+
try {
|
|
12
|
+
contents = await readFile(configPath, "utf8");
|
|
13
|
+
} catch (error) {
|
|
14
|
+
io.stdout.write("ATAVI validate: FAIL\n");
|
|
15
|
+
io.stdout.write(`missing ${configPath}\n`);
|
|
16
|
+
throw new Error("ATAVI workspace config was not found.", { cause: error });
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
let config;
|
|
20
|
+
try {
|
|
21
|
+
config = parseConfigFile(contents);
|
|
22
|
+
} catch (error) {
|
|
23
|
+
io.stdout.write("ATAVI validate: FAIL\n");
|
|
24
|
+
io.stdout.write(`invalid JSON ${configPath}\n`);
|
|
25
|
+
throw new Error("ATAVI workspace config is not valid JSON.", { cause: error });
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const errors = validateConfig(config);
|
|
29
|
+
if (errors.length > 0) {
|
|
30
|
+
io.stdout.write("ATAVI validate: FAIL\n");
|
|
31
|
+
for (const error of errors) {
|
|
32
|
+
io.stdout.write(`${error}\n`);
|
|
33
|
+
}
|
|
34
|
+
throw new Error("ATAVI workspace config failed validation.");
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
io.stdout.write("ATAVI validate: OK\n");
|
|
38
|
+
io.stdout.write(`config path: ${configPath}\n`);
|
|
39
|
+
io.stdout.write(`mode: ${config.mode}\n`);
|
|
40
|
+
}
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
const ALLOWED_MODES = new Set(["full", "scout", "theorist", "vision", "audit"]);
|
|
2
|
+
const REQUIRED_FULL_MODE_AGENTS = ["theorist", "experimentalist", "scout"];
|
|
3
|
+
const SUPPORTED_MEMORY_SCOPES = new Set(["project"]);
|
|
4
|
+
|
|
5
|
+
function isPlainObject(value) {
|
|
6
|
+
return typeof value === "object" && value !== null && !Array.isArray(value);
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
function validatePositiveInteger(value, fieldName, errors) {
|
|
10
|
+
if (!Number.isInteger(value) || value <= 0) {
|
|
11
|
+
errors.push(`${fieldName} must be a positive integer`);
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export function parseConfigFile(contents) {
|
|
16
|
+
return JSON.parse(contents);
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
export function validateConfig(config) {
|
|
20
|
+
const errors = [];
|
|
21
|
+
|
|
22
|
+
if (!isPlainObject(config)) {
|
|
23
|
+
return ["config must be a JSON object"];
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
if (!ALLOWED_MODES.has(config.mode)) {
|
|
27
|
+
errors.push(`mode must be one of: ${Array.from(ALLOWED_MODES).join(", ")}`);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
validatePositiveInteger(config.maxPasses, "maxPasses", errors);
|
|
31
|
+
validatePositiveInteger(config.maxExperiments, "maxExperiments", errors);
|
|
32
|
+
|
|
33
|
+
if (typeof config.convergenceThreshold !== "number" || config.convergenceThreshold <= 0 || config.convergenceThreshold > 1) {
|
|
34
|
+
errors.push("convergenceThreshold must be a number greater than 0 and less than or equal to 1");
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
if (!Array.isArray(config.agents) || config.agents.length === 0 || config.agents.some((agent) => typeof agent !== "string" || agent.length === 0)) {
|
|
38
|
+
errors.push("agents must be a non-empty array of strings");
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
if (config.mode === "full" && Array.isArray(config.agents)) {
|
|
42
|
+
for (const agent of REQUIRED_FULL_MODE_AGENTS) {
|
|
43
|
+
if (!config.agents.includes(agent)) {
|
|
44
|
+
errors.push(`full mode must include agent: ${agent}`);
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
if (!isPlainObject(config.memory)) {
|
|
50
|
+
errors.push("memory must be an object");
|
|
51
|
+
} else {
|
|
52
|
+
if (typeof config.memory.enabled !== "boolean") {
|
|
53
|
+
errors.push("memory.enabled must be a boolean");
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
if (!SUPPORTED_MEMORY_SCOPES.has(config.memory.scope)) {
|
|
57
|
+
errors.push(`memory.scope must be one of: ${Array.from(SUPPORTED_MEMORY_SCOPES).join(", ")}`);
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
return errors;
|
|
62
|
+
}
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { mkdir, access, writeFile } from "node:fs/promises";
|
|
2
|
+
import { constants } from "node:fs";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
|
|
5
|
+
export async function ensureDirectory(directoryPath) {
|
|
6
|
+
await mkdir(directoryPath, { recursive: true });
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export async function pathExists(filePath) {
|
|
10
|
+
try {
|
|
11
|
+
await access(filePath, constants.F_OK);
|
|
12
|
+
return true;
|
|
13
|
+
} catch {
|
|
14
|
+
return false;
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export async function writeFileIfMissing(filePath, contents) {
|
|
19
|
+
if (await pathExists(filePath)) {
|
|
20
|
+
return false;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
await ensureDirectory(path.dirname(filePath));
|
|
24
|
+
await writeFile(filePath, contents, "utf8");
|
|
25
|
+
return true;
|
|
26
|
+
}
|
|
27
|
+
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import { cp, mkdir, readdir } from "node:fs/promises";
|
|
3
|
+
import { pathExists } from "./filesystem.js";
|
|
4
|
+
|
|
5
|
+
async function walkFiles(rootDir, relativeDir = "") {
|
|
6
|
+
const currentDir = path.join(rootDir, relativeDir);
|
|
7
|
+
const entries = await readdir(currentDir, { withFileTypes: true });
|
|
8
|
+
const files = [];
|
|
9
|
+
|
|
10
|
+
for (const entry of entries) {
|
|
11
|
+
const entryRelativePath = path.join(relativeDir, entry.name);
|
|
12
|
+
if (entry.isDirectory()) {
|
|
13
|
+
files.push(...(await walkFiles(rootDir, entryRelativePath)));
|
|
14
|
+
continue;
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
files.push(entryRelativePath);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
return files;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export async function resolveMemoryDirectory(inputPath) {
|
|
24
|
+
const directMemoryDir = path.resolve(inputPath);
|
|
25
|
+
if (await pathExists(directMemoryDir)) {
|
|
26
|
+
const maybeWorkspaceMemory = path.join(directMemoryDir, ".atavi", "memory");
|
|
27
|
+
if (await pathExists(maybeWorkspaceMemory)) {
|
|
28
|
+
return maybeWorkspaceMemory;
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const workspaceMemoryDir = path.join(path.resolve(inputPath), ".atavi", "memory");
|
|
33
|
+
if (await pathExists(workspaceMemoryDir)) {
|
|
34
|
+
return workspaceMemoryDir;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
if (await pathExists(directMemoryDir)) {
|
|
38
|
+
return directMemoryDir;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
export async function exportMemory(sourceDir, outputDir) {
|
|
45
|
+
await mkdir(outputDir, { recursive: true });
|
|
46
|
+
await cp(sourceDir, outputDir, { recursive: true, force: true });
|
|
47
|
+
const files = await walkFiles(outputDir);
|
|
48
|
+
return {
|
|
49
|
+
exportedFiles: files.length
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export async function importMemory(sourceDir, targetDir) {
|
|
54
|
+
const files = await walkFiles(sourceDir);
|
|
55
|
+
let importedFiles = 0;
|
|
56
|
+
let skippedFiles = 0;
|
|
57
|
+
|
|
58
|
+
for (const relativePath of files) {
|
|
59
|
+
const sourcePath = path.join(sourceDir, relativePath);
|
|
60
|
+
const destinationPath = path.join(targetDir, relativePath);
|
|
61
|
+
if (await pathExists(destinationPath)) {
|
|
62
|
+
skippedFiles += 1;
|
|
63
|
+
continue;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
await mkdir(path.dirname(destinationPath), { recursive: true });
|
|
67
|
+
await cp(sourcePath, destinationPath, { force: false });
|
|
68
|
+
importedFiles += 1;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
return {
|
|
72
|
+
importedFiles,
|
|
73
|
+
skippedFiles
|
|
74
|
+
};
|
|
75
|
+
}
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
import path from "node:path";
|
|
2
|
+
import { fileURLToPath } from "node:url";
|
|
3
|
+
|
|
4
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
5
|
+
const __dirname = path.dirname(__filename);
|
|
6
|
+
const ROOT = path.resolve(__dirname, "..", "..");
|
|
7
|
+
|
|
8
|
+
const PROTOCOL_FILES = [
|
|
9
|
+
"protocol/ATAVI.md",
|
|
10
|
+
"protocol/agents/theorist.md",
|
|
11
|
+
"protocol/agents/experimentalist.md",
|
|
12
|
+
"protocol/agents/scout.md",
|
|
13
|
+
"protocol/agents/critic.md",
|
|
14
|
+
"protocol/agents/synthesist.md"
|
|
15
|
+
];
|
|
16
|
+
|
|
17
|
+
const TEMPLATE_FILES = [
|
|
18
|
+
"templates/research-brief.md",
|
|
19
|
+
"templates/pod.md",
|
|
20
|
+
"templates/cpr.md",
|
|
21
|
+
"templates/output-report.md"
|
|
22
|
+
];
|
|
23
|
+
|
|
24
|
+
export const CURRENT_WORKSPACE_SCHEMA_VERSION = 1;
|
|
25
|
+
|
|
26
|
+
export function protocolRoot() {
|
|
27
|
+
return ROOT;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export function protocolFiles() {
|
|
31
|
+
return [...PROTOCOL_FILES];
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export function templateFiles() {
|
|
35
|
+
return [...TEMPLATE_FILES];
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export function scaffoldFiles() {
|
|
39
|
+
return [
|
|
40
|
+
{
|
|
41
|
+
relativePath: "brief.md",
|
|
42
|
+
contents: `# ATAVI Research Brief
|
|
43
|
+
|
|
44
|
+
Status: draft
|
|
45
|
+
Mode: FULL
|
|
46
|
+
Source spec:
|
|
47
|
+
Thesis:
|
|
48
|
+
Domain:
|
|
49
|
+
Constraints:
|
|
50
|
+
-
|
|
51
|
+
Novelty sources:
|
|
52
|
+
-
|
|
53
|
+
Selected agents:
|
|
54
|
+
- Theorist
|
|
55
|
+
- Experimentalist
|
|
56
|
+
- Scout
|
|
57
|
+
`
|
|
58
|
+
},
|
|
59
|
+
{
|
|
60
|
+
relativePath: "config.json",
|
|
61
|
+
contents: `${JSON.stringify(
|
|
62
|
+
{
|
|
63
|
+
schemaVersion: CURRENT_WORKSPACE_SCHEMA_VERSION,
|
|
64
|
+
mode: "full",
|
|
65
|
+
maxPasses: 4,
|
|
66
|
+
maxExperiments: 5,
|
|
67
|
+
convergenceThreshold: 0.7,
|
|
68
|
+
agents: ["theorist", "experimentalist", "scout"],
|
|
69
|
+
memory: {
|
|
70
|
+
enabled: true,
|
|
71
|
+
scope: "project"
|
|
72
|
+
}
|
|
73
|
+
},
|
|
74
|
+
null,
|
|
75
|
+
2
|
|
76
|
+
)}\n`
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
relativePath: "status.md",
|
|
80
|
+
contents: `# ATAVI Status
|
|
81
|
+
|
|
82
|
+
run_status: initialized
|
|
83
|
+
current_pass: 0
|
|
84
|
+
current_phase: not_started
|
|
85
|
+
convergence_score: n/a
|
|
86
|
+
blocking_concerns: none
|
|
87
|
+
token_estimate: unknown
|
|
88
|
+
last_updated: pending
|
|
89
|
+
`
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
relativePath: "conflicts.md",
|
|
93
|
+
contents: `# ATAVI Conflict List
|
|
94
|
+
|
|
95
|
+
The host AI records active conflicts, unresolved disagreements, and blocking
|
|
96
|
+
tradeoffs here during synthesis.
|
|
97
|
+
`
|
|
98
|
+
},
|
|
99
|
+
{
|
|
100
|
+
relativePath: "kill-log.md",
|
|
101
|
+
contents: `# ATAVI Kill Log
|
|
102
|
+
|
|
103
|
+
Record rejected claims and experiments here with explicit reasons, evidence, and
|
|
104
|
+
novelty verdicts when applicable.
|
|
105
|
+
`
|
|
106
|
+
},
|
|
107
|
+
{
|
|
108
|
+
relativePath: "run-log.md",
|
|
109
|
+
contents: `# ATAVI Run Log
|
|
110
|
+
|
|
111
|
+
Use this file for the pass-by-pass process log, including POD completion, CPR
|
|
112
|
+
exchange, synthesis updates, convergence scores, and decision gates.
|
|
113
|
+
`
|
|
114
|
+
},
|
|
115
|
+
{
|
|
116
|
+
relativePath: "ATAVI-REPORT.md",
|
|
117
|
+
contents: `# ATAVI Report
|
|
118
|
+
|
|
119
|
+
This file is populated by the host AI after a run completes.
|
|
120
|
+
`
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
relativePath: "pass-1/README.md",
|
|
124
|
+
contents: `# ATAVI Pass 1
|
|
125
|
+
|
|
126
|
+
Place independent agent outputs in this directory as:
|
|
127
|
+
|
|
128
|
+
- theorist-pod.md
|
|
129
|
+
- experimentalist-pod.md
|
|
130
|
+
- scout-pod.md
|
|
131
|
+
- critic-pod.md
|
|
132
|
+
- synthesist-pod.md
|
|
133
|
+
|
|
134
|
+
The host AI also writes:
|
|
135
|
+
|
|
136
|
+
- synthesis.md
|
|
137
|
+
- decision.md
|
|
138
|
+
`
|
|
139
|
+
},
|
|
140
|
+
{
|
|
141
|
+
relativePath: "pass-1/synthesis.md",
|
|
142
|
+
contents: `# ATAVI Pass 1 Synthesis
|
|
143
|
+
|
|
144
|
+
Summarize accepted registry updates, active conflicts, novelty findings, and
|
|
145
|
+
convergence movement for this pass.
|
|
146
|
+
`
|
|
147
|
+
},
|
|
148
|
+
{
|
|
149
|
+
relativePath: "pass-1/decision.md",
|
|
150
|
+
contents: `# ATAVI Pass 1 Decision
|
|
151
|
+
|
|
152
|
+
Record the decision gate outcome for this pass: terminate, continue, or
|
|
153
|
+
escalate, with the rationale and any blocking concerns.
|
|
154
|
+
`
|
|
155
|
+
},
|
|
156
|
+
{
|
|
157
|
+
relativePath: "pass-1/cross-pollination/README.md",
|
|
158
|
+
contents: `# ATAVI Cross-Pollination
|
|
159
|
+
|
|
160
|
+
Place CPR outputs in this directory as:
|
|
161
|
+
|
|
162
|
+
- theorist-cpr.md
|
|
163
|
+
- experimentalist-cpr.md
|
|
164
|
+
- scout-cpr.md
|
|
165
|
+
- critic-cpr.md
|
|
166
|
+
- synthesist-cpr.md
|
|
167
|
+
`
|
|
168
|
+
},
|
|
169
|
+
{
|
|
170
|
+
relativePath: "registries/claims.md",
|
|
171
|
+
contents: `# Claim Registry
|
|
172
|
+
|
|
173
|
+
| Claim ID | Status | Metric | Evidence For | Evidence Against |
|
|
174
|
+
| --- | --- | --- | --- | --- |
|
|
175
|
+
`
|
|
176
|
+
},
|
|
177
|
+
{
|
|
178
|
+
relativePath: "registries/experiments.md",
|
|
179
|
+
contents: `# Experiment Ledger
|
|
180
|
+
|
|
181
|
+
| Experiment ID | Status | Claims Tested | Novelty Verdict | Information Gain |
|
|
182
|
+
| --- | --- | --- | --- | --- |
|
|
183
|
+
`
|
|
184
|
+
},
|
|
185
|
+
{
|
|
186
|
+
relativePath: "registries/prior-art.md",
|
|
187
|
+
contents: `# Prior Art Registry
|
|
188
|
+
|
|
189
|
+
| Source | Type | Relevance Tags | Conceptual Overlap | Methodological Overlap | Empirical Overlap |
|
|
190
|
+
| --- | --- | --- | --- | --- | --- |
|
|
191
|
+
`
|
|
192
|
+
},
|
|
193
|
+
{
|
|
194
|
+
relativePath: "memory/README.md",
|
|
195
|
+
contents: `# ATAVI Memory
|
|
196
|
+
|
|
197
|
+
This directory stores persistent prior art cache entries, kill archive records,
|
|
198
|
+
claim patterns, convergence history, and strategy insights for related runs.
|
|
199
|
+
|
|
200
|
+
Each memory entry should be written as a markdown file in the relevant bucket.
|
|
201
|
+
Use these governance fields consistently:
|
|
202
|
+
|
|
203
|
+
- Domain:
|
|
204
|
+
- Keywords:
|
|
205
|
+
- Confidence:
|
|
206
|
+
- Expires:
|
|
207
|
+
- Contradicts:
|
|
208
|
+
|
|
209
|
+
The host AI writes reusable memory exports into these buckets after a run
|
|
210
|
+
finishes. When new evidence contradicts an older memory, create the new entry
|
|
211
|
+
and link the contradiction explicitly rather than mutating history silently.
|
|
212
|
+
`
|
|
213
|
+
},
|
|
214
|
+
{
|
|
215
|
+
relativePath: "memory/prior-art-cache/README.md",
|
|
216
|
+
contents: `# Prior Art Cache
|
|
217
|
+
|
|
218
|
+
Store reusable novelty-search findings, source summaries, and scoped retrieval
|
|
219
|
+
metadata here for future ATAVI runs.
|
|
220
|
+
|
|
221
|
+
Suggested entry format: one file per source or source cluster.
|
|
222
|
+
|
|
223
|
+
- Source:
|
|
224
|
+
- Retrieved:
|
|
225
|
+
- Domain:
|
|
226
|
+
- Keywords:
|
|
227
|
+
- Confidence:
|
|
228
|
+
- Expires:
|
|
229
|
+
- Contradicts:
|
|
230
|
+
- Summary:
|
|
231
|
+
- Relevance:
|
|
232
|
+
- Reuse Notes:
|
|
233
|
+
`
|
|
234
|
+
},
|
|
235
|
+
{
|
|
236
|
+
relativePath: "memory/kill-archive/README.md",
|
|
237
|
+
contents: `# Kill Archive
|
|
238
|
+
|
|
239
|
+
Record rejected claims and experiments here, including the evidence or novelty
|
|
240
|
+
verdict that killed them.
|
|
241
|
+
|
|
242
|
+
Suggested entry format: one file per rejected claim or experiment.
|
|
243
|
+
|
|
244
|
+
- Killed Item:
|
|
245
|
+
- Item Type:
|
|
246
|
+
- Date:
|
|
247
|
+
- Domain:
|
|
248
|
+
- Keywords:
|
|
249
|
+
- Confidence:
|
|
250
|
+
- Contradicts:
|
|
251
|
+
- Kill Reason:
|
|
252
|
+
- Evidence:
|
|
253
|
+
- Reversal Condition:
|
|
254
|
+
`
|
|
255
|
+
},
|
|
256
|
+
{
|
|
257
|
+
relativePath: "memory/claim-patterns/README.md",
|
|
258
|
+
contents: `# Claim Patterns
|
|
259
|
+
|
|
260
|
+
Capture recurring claim structures, failure modes, and useful formalization
|
|
261
|
+
patterns that can accelerate future theorist passes.
|
|
262
|
+
|
|
263
|
+
Suggested entry format: one file per reusable pattern.
|
|
264
|
+
|
|
265
|
+
- Pattern Name:
|
|
266
|
+
- Domain:
|
|
267
|
+
- Keywords:
|
|
268
|
+
- Confidence:
|
|
269
|
+
- Expires:
|
|
270
|
+
- Contradicts:
|
|
271
|
+
- Pattern Description:
|
|
272
|
+
- Common Failure Mode:
|
|
273
|
+
- Reuse Guidance:
|
|
274
|
+
`
|
|
275
|
+
},
|
|
276
|
+
{
|
|
277
|
+
relativePath: "memory/convergence-history/README.md",
|
|
278
|
+
contents: `# Convergence History
|
|
279
|
+
|
|
280
|
+
Track prior run outcomes, convergence scores, and pass-level decision patterns
|
|
281
|
+
so future runs can detect repetition and premature agreement.
|
|
282
|
+
|
|
283
|
+
Suggested entry format: one file per completed run.
|
|
284
|
+
|
|
285
|
+
- Run ID:
|
|
286
|
+
- Domain:
|
|
287
|
+
- Keywords:
|
|
288
|
+
- Confidence:
|
|
289
|
+
- Started:
|
|
290
|
+
- Finished:
|
|
291
|
+
- Final Convergence Score:
|
|
292
|
+
- Contradicts:
|
|
293
|
+
- Outcome Summary:
|
|
294
|
+
- Repeated Failure Signals:
|
|
295
|
+
`
|
|
296
|
+
},
|
|
297
|
+
{
|
|
298
|
+
relativePath: "memory/strategy-insights/README.md",
|
|
299
|
+
contents: `# Strategy Insights
|
|
300
|
+
|
|
301
|
+
Store durable heuristics about search strategy, evaluation design, and
|
|
302
|
+
cross-domain synthesis that proved useful across runs.
|
|
303
|
+
|
|
304
|
+
Suggested entry format: one file per durable heuristic.
|
|
305
|
+
|
|
306
|
+
- Insight Name:
|
|
307
|
+
- Domain:
|
|
308
|
+
- Keywords:
|
|
309
|
+
- Confidence:
|
|
310
|
+
- Expires:
|
|
311
|
+
- Contradicts:
|
|
312
|
+
- Insight:
|
|
313
|
+
- Evidence:
|
|
314
|
+
- Applicability:
|
|
315
|
+
`
|
|
316
|
+
}
|
|
317
|
+
];
|
|
318
|
+
}
|