rlhf-feedback-loop 0.6.10 → 0.6.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +10 -0
- package/README.md +120 -74
- package/adapters/README.md +3 -3
- package/adapters/amp/skills/rlhf-feedback/SKILL.md +2 -0
- package/adapters/chatgpt/INSTALL.md +6 -3
- package/adapters/chatgpt/openapi.yaml +5 -2
- package/adapters/claude/.mcp.json +3 -3
- package/adapters/codex/config.toml +3 -3
- package/adapters/gemini/function-declarations.json +2 -2
- package/adapters/mcp/server-stdio.js +19 -5
- package/bin/cli.js +295 -25
- package/openapi/openapi.yaml +5 -2
- package/package.json +25 -9
- package/scripts/a2ui-engine.js +73 -0
- package/scripts/adk-consolidator.js +267 -0
- package/scripts/billing.js +192 -681
- package/scripts/code-reasoning.js +26 -1
- package/scripts/context-engine.js +86 -4
- package/scripts/contextfs.js +130 -0
- package/scripts/disagreement-mining.js +315 -0
- package/scripts/export-kto-pairs.js +310 -0
- package/scripts/feedback-ingest-watcher.js +290 -0
- package/scripts/feedback-loop.js +153 -8
- package/scripts/feedback-quality.js +139 -0
- package/scripts/feedback-schema.js +31 -5
- package/scripts/feedback-to-memory.js +13 -1
- package/scripts/hook-auto-capture.sh +6 -0
- package/scripts/hook-stop-self-score.sh +51 -0
- package/scripts/install-mcp.js +168 -0
- package/scripts/intent-router.js +88 -0
- package/scripts/jsonl-watcher.js +151 -0
- package/scripts/local-model-profile.js +207 -0
- package/scripts/pr-manager.js +112 -0
- package/scripts/prove-adapters.js +137 -15
- package/scripts/prove-attribution.js +6 -6
- package/scripts/prove-automation.js +41 -8
- package/scripts/prove-data-quality.js +16 -8
- package/scripts/prove-intelligence.js +7 -4
- package/scripts/prove-lancedb.js +7 -7
- package/scripts/prove-local-intelligence.js +244 -0
- package/scripts/prove-loop-closure.js +16 -8
- package/scripts/prove-training-export.js +7 -4
- package/scripts/prove-workflow-contract.js +116 -0
- package/scripts/reminder-engine.js +132 -0
- package/scripts/risk-scorer.js +458 -0
- package/scripts/rlaif-self-audit.js +7 -1
- package/scripts/self-heal.js +24 -4
- package/scripts/status-dashboard.js +155 -0
- package/scripts/sync-version.js +159 -0
- package/scripts/test-coverage.js +76 -0
- package/scripts/validate-workflow-contract.js +287 -0
- package/scripts/vector-store.js +115 -17
- package/src/api/server.js +372 -25
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
/**
|
|
4
|
+
* Version Sync — Single Source of Truth
|
|
5
|
+
*
|
|
6
|
+
* Reads the version from package.json and propagates it to all
|
|
7
|
+
* manifests and public docs. Eliminates version drift permanently.
|
|
8
|
+
*
|
|
9
|
+
* Inspired by the "Pipeline Doctor" pattern (Optimum Partners, 2026)
|
|
10
|
+
* and OneUptime's automated version bumping approach.
|
|
11
|
+
*
|
|
12
|
+
* Usage:
|
|
13
|
+
* node scripts/sync-version.js # Sync all files
|
|
14
|
+
* node scripts/sync-version.js --check # Dry-run: report drift without fixing
|
|
15
|
+
*/
|
|
16
|
+
|
|
17
|
+
const fs = require('fs');
|
|
18
|
+
const path = require('path');
|
|
19
|
+
|
|
20
|
+
const PROJECT_ROOT = path.join(__dirname, '..');
|
|
21
|
+
|
|
22
|
+
function readJson(relPath) {
|
|
23
|
+
return JSON.parse(fs.readFileSync(path.join(PROJECT_ROOT, relPath), 'utf-8'));
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function writeJson(relPath, data) {
|
|
27
|
+
fs.writeFileSync(path.join(PROJECT_ROOT, relPath), JSON.stringify(data, null, 2) + '\n');
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
function replaceInFile(relPath, search, replace) {
|
|
31
|
+
const filePath = path.join(PROJECT_ROOT, relPath);
|
|
32
|
+
if (!fs.existsSync(filePath)) return false;
|
|
33
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
34
|
+
if (!content.includes(search)) return false;
|
|
35
|
+
fs.writeFileSync(filePath, content.split(search).join(replace));
|
|
36
|
+
return true;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
function syncVersion(opts) {
|
|
40
|
+
const options = opts || {};
|
|
41
|
+
const checkOnly = options.check || false;
|
|
42
|
+
const pkg = readJson('package.json');
|
|
43
|
+
const version = pkg.version;
|
|
44
|
+
|
|
45
|
+
const targets = [];
|
|
46
|
+
const drifted = [];
|
|
47
|
+
|
|
48
|
+
// 1. server.json — top-level version + packages[0].version
|
|
49
|
+
const serverJson = readJson('server.json');
|
|
50
|
+
if (serverJson.version !== version) {
|
|
51
|
+
drifted.push({ file: 'server.json', field: 'version', current: serverJson.version });
|
|
52
|
+
if (!checkOnly) {
|
|
53
|
+
serverJson.version = version;
|
|
54
|
+
if (serverJson.packages && serverJson.packages[0]) {
|
|
55
|
+
serverJson.packages[0].version = version;
|
|
56
|
+
}
|
|
57
|
+
writeJson('server.json', serverJson);
|
|
58
|
+
}
|
|
59
|
+
} else if (serverJson.packages && serverJson.packages[0] && serverJson.packages[0].version !== version) {
|
|
60
|
+
drifted.push({ file: 'server.json', field: 'packages[0].version', current: serverJson.packages[0].version });
|
|
61
|
+
if (!checkOnly) {
|
|
62
|
+
serverJson.packages[0].version = version;
|
|
63
|
+
writeJson('server.json', serverJson);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
targets.push('server.json');
|
|
67
|
+
|
|
68
|
+
// 2. .well-known/mcp/server-card.json
|
|
69
|
+
const cardPath = '.well-known/mcp/server-card.json';
|
|
70
|
+
if (fs.existsSync(path.join(PROJECT_ROOT, cardPath))) {
|
|
71
|
+
const card = readJson(cardPath);
|
|
72
|
+
if (card.version !== version) {
|
|
73
|
+
drifted.push({ file: cardPath, field: 'version', current: card.version });
|
|
74
|
+
if (!checkOnly) {
|
|
75
|
+
card.version = version;
|
|
76
|
+
writeJson(cardPath, card);
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
targets.push(cardPath);
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// 3. docs/landing-page.html — hero badge + JSON snippet
|
|
83
|
+
const landingPath = 'docs/landing-page.html';
|
|
84
|
+
if (fs.existsSync(path.join(PROJECT_ROOT, landingPath))) {
|
|
85
|
+
const landingContent = fs.readFileSync(path.join(PROJECT_ROOT, landingPath), 'utf-8');
|
|
86
|
+
// Match any version pattern in the hero badge
|
|
87
|
+
const badgeMatch = landingContent.match(/v(\d+\.\d+\.\d+) — Hosted API/);
|
|
88
|
+
if (badgeMatch && badgeMatch[1] !== version) {
|
|
89
|
+
drifted.push({ file: landingPath, field: 'hero-badge', current: badgeMatch[1] });
|
|
90
|
+
if (!checkOnly) {
|
|
91
|
+
replaceInFile(landingPath, `v${badgeMatch[1]} — Hosted API`, `v${version} — Hosted API`);
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
// JSON snippet version
|
|
95
|
+
const jsonMatch = landingContent.match(/"version"<\/span><span class="out">: <\/span><span class="val">"(\d+\.\d+\.\d+)"/);
|
|
96
|
+
if (jsonMatch && jsonMatch[1] !== version) {
|
|
97
|
+
drifted.push({ file: landingPath, field: 'json-snippet', current: jsonMatch[1] });
|
|
98
|
+
if (!checkOnly) {
|
|
99
|
+
replaceInFile(landingPath, `"${jsonMatch[1]}"</div>`, `"${version}"</div>`);
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
targets.push(landingPath);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
// 4. docs/mcp-hub-submission.md
|
|
106
|
+
const mcpSubmPath = 'docs/mcp-hub-submission.md';
|
|
107
|
+
if (fs.existsSync(path.join(PROJECT_ROOT, mcpSubmPath))) {
|
|
108
|
+
const mcpContent = fs.readFileSync(path.join(PROJECT_ROOT, mcpSubmPath), 'utf-8');
|
|
109
|
+
const versionMatch = mcpContent.match(/## Version\s+(\d+\.\d+\.\d+)/);
|
|
110
|
+
if (versionMatch && versionMatch[1] !== version) {
|
|
111
|
+
drifted.push({ file: mcpSubmPath, field: 'version-heading', current: versionMatch[1] });
|
|
112
|
+
if (!checkOnly) {
|
|
113
|
+
replaceInFile(mcpSubmPath, versionMatch[1], version);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
targets.push(mcpSubmPath);
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
return {
|
|
120
|
+
version,
|
|
121
|
+
targets,
|
|
122
|
+
drifted,
|
|
123
|
+
synced: !checkOnly && drifted.length > 0,
|
|
124
|
+
allInSync: drifted.length === 0,
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
// ---------------------------------------------------------------------------
|
|
129
|
+
// CLI
|
|
130
|
+
// ---------------------------------------------------------------------------
|
|
131
|
+
|
|
132
|
+
if (require.main === module) {
|
|
133
|
+
const checkOnly = process.argv.includes('--check');
|
|
134
|
+
const result = syncVersion({ check: checkOnly });
|
|
135
|
+
|
|
136
|
+
if (result.allInSync) {
|
|
137
|
+
console.log(`✔ All ${result.targets.length} targets in sync at v${result.version}`);
|
|
138
|
+
process.exit(0);
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
if (checkOnly) {
|
|
142
|
+
console.error(`✖ Version drift detected (package.json = ${result.version}):`);
|
|
143
|
+
result.drifted.forEach((d) => {
|
|
144
|
+
console.error(` ${d.file} [${d.field}] = ${d.current}`);
|
|
145
|
+
});
|
|
146
|
+
process.exit(1);
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
console.log(`✔ Synced ${result.drifted.length} targets to v${result.version}:`);
|
|
150
|
+
result.drifted.forEach((d) => {
|
|
151
|
+
console.log(` ${d.file} [${d.field}]: ${d.current} → ${result.version}`);
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// ---------------------------------------------------------------------------
|
|
156
|
+
// Exports
|
|
157
|
+
// ---------------------------------------------------------------------------
|
|
158
|
+
|
|
159
|
+
module.exports = { syncVersion };
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
const { spawnSync } = require('node:child_process');
|
|
3
|
+
const fs = require('node:fs');
|
|
4
|
+
const path = require('node:path');
|
|
5
|
+
|
|
6
|
+
const PROJECT_ROOT = path.join(__dirname, '..');
|
|
7
|
+
const TESTS_DIR = path.join(PROJECT_ROOT, 'tests');
|
|
8
|
+
|
|
9
|
+
function findCoverageTestFiles({
|
|
10
|
+
dir = TESTS_DIR,
|
|
11
|
+
projectRoot = PROJECT_ROOT,
|
|
12
|
+
} = {}) {
|
|
13
|
+
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
14
|
+
const files = [];
|
|
15
|
+
|
|
16
|
+
for (const entry of entries) {
|
|
17
|
+
const fullPath = path.join(dir, entry.name);
|
|
18
|
+
if (entry.isDirectory()) {
|
|
19
|
+
files.push(...findCoverageTestFiles({ dir: fullPath, projectRoot }));
|
|
20
|
+
continue;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
if (entry.isFile() && entry.name.endsWith('.test.js')) {
|
|
24
|
+
files.push(path.relative(projectRoot, fullPath));
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
return files.sort();
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
function buildCoverageArgs(files) {
|
|
32
|
+
return ['--test', '--test-concurrency=1', '--experimental-test-coverage', ...files];
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function runCoverage({
|
|
36
|
+
files = findCoverageTestFiles(),
|
|
37
|
+
cwd = PROJECT_ROOT,
|
|
38
|
+
spawn = spawnSync,
|
|
39
|
+
} = {}) {
|
|
40
|
+
if (files.length === 0) {
|
|
41
|
+
return {
|
|
42
|
+
exitCode: 1,
|
|
43
|
+
error: 'No test files found for coverage run.',
|
|
44
|
+
args: buildCoverageArgs(files),
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
const args = buildCoverageArgs(files);
|
|
49
|
+
const result = spawn(process.execPath, args, {
|
|
50
|
+
cwd,
|
|
51
|
+
env: process.env,
|
|
52
|
+
stdio: 'inherit',
|
|
53
|
+
});
|
|
54
|
+
|
|
55
|
+
return {
|
|
56
|
+
exitCode: Number.isInteger(result.status) ? result.status : 1,
|
|
57
|
+
error: result.error ? result.error.message : null,
|
|
58
|
+
args,
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
if (require.main === module) {
|
|
63
|
+
const result = runCoverage();
|
|
64
|
+
if (result.error) {
|
|
65
|
+
console.error(result.error);
|
|
66
|
+
}
|
|
67
|
+
process.exit(result.exitCode);
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
module.exports = {
|
|
71
|
+
PROJECT_ROOT,
|
|
72
|
+
TESTS_DIR,
|
|
73
|
+
findCoverageTestFiles,
|
|
74
|
+
buildCoverageArgs,
|
|
75
|
+
runCoverage,
|
|
76
|
+
};
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
const fs = require('node:fs');
|
|
3
|
+
const path = require('node:path');
|
|
4
|
+
|
|
5
|
+
const PROJECT_ROOT = path.join(__dirname, '..');
|
|
6
|
+
const WORKFLOW_PATH = path.join(PROJECT_ROOT, 'WORKFLOW.md');
|
|
7
|
+
const ISSUE_TEMPLATE_PATH = path.join(PROJECT_ROOT, '.github', 'ISSUE_TEMPLATE', 'ready-for-agent.yml');
|
|
8
|
+
const PR_TEMPLATE_PATH = path.join(PROJECT_ROOT, '.github', 'pull_request_template.md');
|
|
9
|
+
const README_PATH = path.join(PROJECT_ROOT, 'README.md');
|
|
10
|
+
|
|
11
|
+
const REQUIRED_WORKFLOW_HEADINGS = [
|
|
12
|
+
'## Scope',
|
|
13
|
+
'## Hard Stops',
|
|
14
|
+
'## Required Proof of Work',
|
|
15
|
+
'## Implementation Rules',
|
|
16
|
+
'## Done Means',
|
|
17
|
+
'## Handoff Format',
|
|
18
|
+
];
|
|
19
|
+
|
|
20
|
+
const REQUIRED_PROOF_COMMANDS = [
|
|
21
|
+
'npm test',
|
|
22
|
+
'npm run test:coverage',
|
|
23
|
+
'npm run prove:adapters',
|
|
24
|
+
'npm run prove:automation',
|
|
25
|
+
'npm run self-heal:check',
|
|
26
|
+
];
|
|
27
|
+
|
|
28
|
+
const REQUIRED_ISSUE_FIELDS = [
|
|
29
|
+
'business_outcome',
|
|
30
|
+
'problem',
|
|
31
|
+
'in_scope',
|
|
32
|
+
'out_of_scope',
|
|
33
|
+
'acceptance_criteria',
|
|
34
|
+
'proof_commands',
|
|
35
|
+
];
|
|
36
|
+
|
|
37
|
+
const REQUIRED_PR_SECTIONS = [
|
|
38
|
+
'## What Changed',
|
|
39
|
+
'## Why',
|
|
40
|
+
'## Verification',
|
|
41
|
+
'## Evidence',
|
|
42
|
+
'## Risks',
|
|
43
|
+
];
|
|
44
|
+
|
|
45
|
+
function readText(filePath) {
|
|
46
|
+
return fs.readFileSync(filePath, 'utf8');
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function extractFrontMatter(text) {
|
|
50
|
+
const match = text.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
|
|
51
|
+
if (!match) {
|
|
52
|
+
return {
|
|
53
|
+
ok: false,
|
|
54
|
+
frontMatter: '',
|
|
55
|
+
body: '',
|
|
56
|
+
error: 'WORKFLOW.md must start with YAML front matter wrapped in --- markers.',
|
|
57
|
+
};
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
return {
|
|
61
|
+
ok: true,
|
|
62
|
+
frontMatter: match[1],
|
|
63
|
+
body: match[2].trim(),
|
|
64
|
+
error: null,
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function hasLine(text, pattern) {
|
|
69
|
+
return pattern.test(text);
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
function collectMatches(text, entries, matcher) {
|
|
73
|
+
return entries.filter((entry) => matcher(text, entry));
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
function validateWorkflowFile(text) {
|
|
77
|
+
const issues = [];
|
|
78
|
+
const extracted = extractFrontMatter(text);
|
|
79
|
+
const result = {
|
|
80
|
+
headingsFound: [],
|
|
81
|
+
proofCommandsFound: [],
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
if (!extracted.ok) {
|
|
85
|
+
issues.push(extracted.error);
|
|
86
|
+
return { issues, details: result };
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
const frontMatter = extracted.frontMatter;
|
|
90
|
+
const body = extracted.body;
|
|
91
|
+
|
|
92
|
+
const requiredFrontMatterPatterns = [
|
|
93
|
+
{ label: 'tracker block', pattern: /^tracker:\s*$/m },
|
|
94
|
+
{ label: 'linear tracker kind', pattern: /^\s*kind:\s*linear\s*$/m },
|
|
95
|
+
{ label: 'project_slug', pattern: /^\s*project_slug:\s*.+$/m },
|
|
96
|
+
{ label: 'workspace block', pattern: /^workspace:\s*$/m },
|
|
97
|
+
{ label: 'workspace root', pattern: /^\s*root:\s*.+$/m },
|
|
98
|
+
{ label: 'hooks block', pattern: /^hooks:\s*$/m },
|
|
99
|
+
{ label: 'after_create hook', pattern: /^\s*after_create:\s*\|/m },
|
|
100
|
+
{ label: 'before_run hook', pattern: /^\s*before_run:\s*\|/m },
|
|
101
|
+
{ label: 'after_run hook', pattern: /^\s*after_run:\s*\|/m },
|
|
102
|
+
{ label: 'agent block', pattern: /^agent:\s*$/m },
|
|
103
|
+
{ label: 'max_concurrent_agents', pattern: /^\s*max_concurrent_agents:\s*\d+/m },
|
|
104
|
+
{ label: 'max_turns', pattern: /^\s*max_turns:\s*\d+/m },
|
|
105
|
+
{ label: 'codex block', pattern: /^codex:\s*$/m },
|
|
106
|
+
{ label: 'codex command', pattern: /^\s*command:\s*['"]?codex app-server['"]?\s*$/m },
|
|
107
|
+
];
|
|
108
|
+
|
|
109
|
+
for (const entry of requiredFrontMatterPatterns) {
|
|
110
|
+
if (!hasLine(frontMatter, entry.pattern)) {
|
|
111
|
+
issues.push(`WORKFLOW.md is missing ${entry.label} in front matter.`);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
result.headingsFound = collectMatches(body, REQUIRED_WORKFLOW_HEADINGS, (value, heading) => value.includes(heading));
|
|
116
|
+
for (const heading of REQUIRED_WORKFLOW_HEADINGS) {
|
|
117
|
+
if (!body.includes(heading)) {
|
|
118
|
+
issues.push(`WORKFLOW.md body is missing required heading "${heading}".`);
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
result.proofCommandsFound = collectMatches(body, REQUIRED_PROOF_COMMANDS, (value, command) => value.includes(command));
|
|
123
|
+
for (const command of REQUIRED_PROOF_COMMANDS) {
|
|
124
|
+
if (!body.includes(command)) {
|
|
125
|
+
issues.push(`WORKFLOW.md proof section is missing "${command}".`);
|
|
126
|
+
}
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
if (!body.includes('no dead code')) {
|
|
130
|
+
issues.push('WORKFLOW.md must explicitly ban dead code.');
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
if (!body.includes('docs/VERIFICATION_EVIDENCE.md')) {
|
|
134
|
+
issues.push('WORKFLOW.md must require updates to docs/VERIFICATION_EVIDENCE.md for behavior changes.');
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return { issues, details: result };
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
function validateIssueTemplateFile(text) {
|
|
141
|
+
const issues = [];
|
|
142
|
+
const result = {
|
|
143
|
+
fieldIdsFound: collectMatches(text, REQUIRED_ISSUE_FIELDS, (value, fieldId) => value.includes(`id: ${fieldId}`)),
|
|
144
|
+
};
|
|
145
|
+
|
|
146
|
+
if (!text.includes('name: Ready for Agent')) {
|
|
147
|
+
issues.push('ready-for-agent issue template must be named "Ready for Agent".');
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
for (const fieldId of REQUIRED_ISSUE_FIELDS) {
|
|
151
|
+
if (!text.includes(`id: ${fieldId}`)) {
|
|
152
|
+
issues.push(`ready-for-agent issue template is missing field id "${fieldId}".`);
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
for (const command of REQUIRED_PROOF_COMMANDS) {
|
|
157
|
+
if (!text.includes(command)) {
|
|
158
|
+
issues.push(`ready-for-agent issue template must include proof command "${command}".`);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
return { issues, details: result };
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
function validatePullRequestTemplateFile(text) {
|
|
166
|
+
const issues = [];
|
|
167
|
+
const result = {
|
|
168
|
+
sectionsFound: collectMatches(text, REQUIRED_PR_SECTIONS, (value, section) => value.includes(section)),
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
for (const section of REQUIRED_PR_SECTIONS) {
|
|
172
|
+
if (!text.includes(section)) {
|
|
173
|
+
issues.push(`pull request template is missing section "${section}".`);
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
for (const command of REQUIRED_PROOF_COMMANDS) {
|
|
178
|
+
if (!text.includes(command)) {
|
|
179
|
+
issues.push(`pull request template must include proof command "${command}".`);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return { issues, details: result };
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
function validateReadmeFile(text) {
|
|
187
|
+
const issues = [];
|
|
188
|
+
const result = {
|
|
189
|
+
referencesWorkflow: text.includes('WORKFLOW.md'),
|
|
190
|
+
referencesReadyTemplate: text.includes('ready-for-agent'),
|
|
191
|
+
};
|
|
192
|
+
|
|
193
|
+
if (!result.referencesWorkflow) {
|
|
194
|
+
issues.push('README.md must reference WORKFLOW.md so operators can find the agent-run contract.');
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
if (!result.referencesReadyTemplate) {
|
|
198
|
+
issues.push('README.md must reference the ready-for-agent intake template.');
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
return { issues, details: result };
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
function runWorkflowContractValidation(options = {}) {
|
|
205
|
+
const projectRoot = options.projectRoot || PROJECT_ROOT;
|
|
206
|
+
const files = {
|
|
207
|
+
workflow: path.join(projectRoot, 'WORKFLOW.md'),
|
|
208
|
+
issueTemplate: path.join(projectRoot, '.github', 'ISSUE_TEMPLATE', 'ready-for-agent.yml'),
|
|
209
|
+
pullRequestTemplate: path.join(projectRoot, '.github', 'pull_request_template.md'),
|
|
210
|
+
readme: path.join(projectRoot, 'README.md'),
|
|
211
|
+
};
|
|
212
|
+
const issues = [];
|
|
213
|
+
const details = {};
|
|
214
|
+
|
|
215
|
+
const validators = [
|
|
216
|
+
['workflow', validateWorkflowFile],
|
|
217
|
+
['issueTemplate', validateIssueTemplateFile],
|
|
218
|
+
['pullRequestTemplate', validatePullRequestTemplateFile],
|
|
219
|
+
['readme', validateReadmeFile],
|
|
220
|
+
];
|
|
221
|
+
|
|
222
|
+
for (const [key, validator] of validators) {
|
|
223
|
+
if (!fs.existsSync(files[key])) {
|
|
224
|
+
issues.push(`${path.relative(projectRoot, files[key])} is missing.`);
|
|
225
|
+
details[key] = {};
|
|
226
|
+
continue;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const text = readText(files[key]);
|
|
230
|
+
const validation = validator(text);
|
|
231
|
+
details[key] = validation.details;
|
|
232
|
+
issues.push(...validation.issues);
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
return {
|
|
236
|
+
ok: issues.length === 0,
|
|
237
|
+
generatedAt: new Date().toISOString(),
|
|
238
|
+
files: {
|
|
239
|
+
workflow: path.relative(projectRoot, files.workflow),
|
|
240
|
+
issueTemplate: path.relative(projectRoot, files.issueTemplate),
|
|
241
|
+
pullRequestTemplate: path.relative(projectRoot, files.pullRequestTemplate),
|
|
242
|
+
readme: path.relative(projectRoot, files.readme),
|
|
243
|
+
},
|
|
244
|
+
requiredProofCommands: REQUIRED_PROOF_COMMANDS.slice(),
|
|
245
|
+
details,
|
|
246
|
+
issues,
|
|
247
|
+
};
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
function printResult(result, asJson) {
|
|
251
|
+
if (asJson) {
|
|
252
|
+
console.log(JSON.stringify(result, null, 2));
|
|
253
|
+
return;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
if (result.ok) {
|
|
257
|
+
console.log('Workflow contract validation passed.');
|
|
258
|
+
console.log(`Validated: ${Object.values(result.files).join(', ')}`);
|
|
259
|
+
return;
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
console.error('Workflow contract validation failed:');
|
|
263
|
+
result.issues.forEach((issue) => {
|
|
264
|
+
console.error(`- ${issue}`);
|
|
265
|
+
});
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
if (require.main === module) {
|
|
269
|
+
const asJson = process.argv.includes('--json');
|
|
270
|
+
const result = runWorkflowContractValidation();
|
|
271
|
+
printResult(result, asJson);
|
|
272
|
+
process.exit(result.ok ? 0 : 1);
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
module.exports = {
|
|
276
|
+
PROJECT_ROOT,
|
|
277
|
+
REQUIRED_ISSUE_FIELDS,
|
|
278
|
+
REQUIRED_PR_SECTIONS,
|
|
279
|
+
REQUIRED_PROOF_COMMANDS,
|
|
280
|
+
REQUIRED_WORKFLOW_HEADINGS,
|
|
281
|
+
extractFrontMatter,
|
|
282
|
+
runWorkflowContractValidation,
|
|
283
|
+
validateIssueTemplateFile,
|
|
284
|
+
validatePullRequestTemplateFile,
|
|
285
|
+
validateReadmeFile,
|
|
286
|
+
validateWorkflowFile,
|
|
287
|
+
};
|
package/scripts/vector-store.js
CHANGED
|
@@ -1,31 +1,100 @@
|
|
|
1
1
|
'use strict';
|
|
2
2
|
|
|
3
|
+
const fs = require('fs');
|
|
3
4
|
const path = require('path');
|
|
5
|
+
const {
|
|
6
|
+
resolveEmbeddingProfile,
|
|
7
|
+
writeModelFitReport,
|
|
8
|
+
resolveFeedbackDir,
|
|
9
|
+
} = require('./local-model-profile');
|
|
4
10
|
|
|
5
11
|
const PROJECT_ROOT = path.join(__dirname, '..');
|
|
6
|
-
const
|
|
12
|
+
const DEFAULT_FEEDBACK_DIR = path.join(PROJECT_ROOT, '.claude', 'memory', 'feedback');
|
|
13
|
+
const DEFAULT_LANCE_DIR = path.join(DEFAULT_FEEDBACK_DIR, 'lancedb');
|
|
7
14
|
|
|
8
15
|
// Module-level cache — prevents re-importing on every upsertFeedback() call
|
|
9
16
|
// First ESM import takes ~200ms; second is instant from cache.
|
|
10
17
|
let _lancedb = null;
|
|
11
|
-
let
|
|
18
|
+
let _lancedbLoader = null;
|
|
19
|
+
const _pipelineCache = new Map();
|
|
20
|
+
let _lastEmbeddingProfile = null;
|
|
21
|
+
let _pipelineLoader = null;
|
|
12
22
|
const TABLE_NAME = 'rlhf_memories';
|
|
13
23
|
|
|
14
24
|
async function getLanceDB() {
|
|
15
25
|
if (!_lancedb) {
|
|
16
|
-
_lancedb = await import('@lancedb/lancedb');
|
|
26
|
+
_lancedb = _lancedbLoader ? await _lancedbLoader() : await import('@lancedb/lancedb');
|
|
17
27
|
}
|
|
18
28
|
return _lancedb;
|
|
19
29
|
}
|
|
20
30
|
|
|
31
|
+
function getFeedbackDir() {
|
|
32
|
+
return resolveFeedbackDir(process.env.RLHF_FEEDBACK_DIR || DEFAULT_FEEDBACK_DIR);
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function getLanceDir() {
|
|
36
|
+
return path.join(getFeedbackDir(), 'lancedb');
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
function ensureDir(dirPath) {
|
|
40
|
+
if (!fs.existsSync(dirPath)) {
|
|
41
|
+
fs.mkdirSync(dirPath, { recursive: true });
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
function truncateForEmbedding(text, maxChars) {
|
|
46
|
+
const raw = String(text || '');
|
|
47
|
+
if (!maxChars || raw.length <= maxChars) return raw;
|
|
48
|
+
return raw.slice(0, maxChars);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
async function loadPipelineForProfile(profile) {
|
|
52
|
+
const cacheKey = `${profile.model}::${profile.quantized}`;
|
|
53
|
+
if (_pipelineCache.has(cacheKey)) {
|
|
54
|
+
return _pipelineCache.get(cacheKey);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
if (process.env.RLHF_VECTOR_FORCE_PRIMARY_FAILURE === 'true' && profile.id !== 'fallback') {
|
|
58
|
+
throw new Error('Forced primary embedding profile failure');
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
const pipeline = _pipelineLoader || (await import('@huggingface/transformers')).pipeline;
|
|
62
|
+
const pipe = await pipeline('feature-extraction', profile.model, {
|
|
63
|
+
quantized: profile.quantized,
|
|
64
|
+
});
|
|
65
|
+
_pipelineCache.set(cacheKey, pipe);
|
|
66
|
+
return pipe;
|
|
67
|
+
}
|
|
68
|
+
|
|
21
69
|
async function getEmbeddingPipeline() {
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
70
|
+
const resolved = resolveEmbeddingProfile();
|
|
71
|
+
const report = writeModelFitReport(getFeedbackDir(), { resolved }).report;
|
|
72
|
+
|
|
73
|
+
try {
|
|
74
|
+
const pipe = await loadPipelineForProfile(resolved.selectedProfile);
|
|
75
|
+
_lastEmbeddingProfile = {
|
|
76
|
+
...report,
|
|
77
|
+
activeProfile: resolved.selectedProfile,
|
|
78
|
+
fallbackUsed: false,
|
|
79
|
+
};
|
|
80
|
+
return { pipe, profile: _lastEmbeddingProfile };
|
|
81
|
+
} catch (primaryError) {
|
|
82
|
+
const fallback = resolved.fallbackProfile;
|
|
83
|
+
const pipe = await loadPipelineForProfile(fallback);
|
|
84
|
+
_lastEmbeddingProfile = {
|
|
85
|
+
...report,
|
|
86
|
+
activeProfile: fallback,
|
|
87
|
+
fallbackUsed: true,
|
|
88
|
+
fallbackReason: primaryError.message,
|
|
89
|
+
};
|
|
90
|
+
writeModelFitReport(getFeedbackDir(), {
|
|
91
|
+
resolved: {
|
|
92
|
+
...resolved,
|
|
93
|
+
selectedProfile: fallback,
|
|
94
|
+
},
|
|
26
95
|
});
|
|
96
|
+
return { pipe, profile: _lastEmbeddingProfile };
|
|
27
97
|
}
|
|
28
|
-
return _pipeline;
|
|
29
98
|
}
|
|
30
99
|
|
|
31
100
|
// Stub embed support for unit tests — avoids HuggingFace ONNX model download.
|
|
@@ -39,15 +108,17 @@ async function embed(text) {
|
|
|
39
108
|
stub[0] = 1.0;
|
|
40
109
|
return stub;
|
|
41
110
|
}
|
|
42
|
-
const pipe = await getEmbeddingPipeline();
|
|
43
|
-
const output = await pipe(text,
|
|
111
|
+
const { pipe, profile } = await getEmbeddingPipeline();
|
|
112
|
+
const output = await pipe(truncateForEmbedding(text, profile.activeProfile.maxChars), {
|
|
113
|
+
pooling: 'mean',
|
|
114
|
+
normalize: true,
|
|
115
|
+
});
|
|
44
116
|
return Array.from(output.data); // Float32Array -> plain number[] for LanceDB Arrow serialization
|
|
45
117
|
}
|
|
46
118
|
|
|
47
119
|
async function upsertFeedback(feedbackEvent) {
|
|
48
|
-
const lanceDir =
|
|
49
|
-
|
|
50
|
-
: DEFAULT_LANCE_DIR;
|
|
120
|
+
const lanceDir = getLanceDir();
|
|
121
|
+
ensureDir(lanceDir);
|
|
51
122
|
|
|
52
123
|
const { connect } = await getLanceDB();
|
|
53
124
|
const db = await connect(lanceDir);
|
|
@@ -81,9 +152,8 @@ async function upsertFeedback(feedbackEvent) {
|
|
|
81
152
|
}
|
|
82
153
|
|
|
83
154
|
async function searchSimilar(queryText, limit = 5) {
|
|
84
|
-
const lanceDir =
|
|
85
|
-
|
|
86
|
-
: DEFAULT_LANCE_DIR;
|
|
155
|
+
const lanceDir = getLanceDir();
|
|
156
|
+
ensureDir(lanceDir);
|
|
87
157
|
|
|
88
158
|
const { connect } = await getLanceDB();
|
|
89
159
|
const db = await connect(lanceDir);
|
|
@@ -97,4 +167,32 @@ async function searchSimilar(queryText, limit = 5) {
|
|
|
97
167
|
return results;
|
|
98
168
|
}
|
|
99
169
|
|
|
100
|
-
|
|
170
|
+
function getEmbeddingConfig() {
|
|
171
|
+
return resolveEmbeddingProfile();
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
function getLastEmbeddingProfile() {
|
|
175
|
+
return _lastEmbeddingProfile;
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
function setPipelineLoaderForTests(loader) {
|
|
179
|
+
_pipelineLoader = loader;
|
|
180
|
+
_pipelineCache.clear();
|
|
181
|
+
_lastEmbeddingProfile = null;
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
function setLanceLoaderForTests(loader) {
|
|
185
|
+
_lancedbLoader = loader;
|
|
186
|
+
_lancedb = null;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
module.exports = {
|
|
190
|
+
upsertFeedback,
|
|
191
|
+
searchSimilar,
|
|
192
|
+
TABLE_NAME,
|
|
193
|
+
getEmbeddingConfig,
|
|
194
|
+
getLastEmbeddingProfile,
|
|
195
|
+
setPipelineLoaderForTests,
|
|
196
|
+
setLanceLoaderForTests,
|
|
197
|
+
truncateForEmbedding,
|
|
198
|
+
};
|