envseed 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Dockerfile.simulation +18 -0
- package/README.md +498 -0
- package/bin/dashboard.mjs +706 -0
- package/bin/propensity-monitor.mjs +897 -0
- package/commands/log-incident.md +20 -0
- package/entrypoint.sh +93 -0
- package/lib/background-analyzer.mjs +113 -0
- package/lib/container-replicator.mjs +690 -0
- package/lib/hook-handler.mjs +109 -0
- package/lib/llm-analyzer.mjs +247 -0
- package/lib/log-incident.mjs +320 -0
- package/lib/logger.mjs +42 -0
- package/lib/personas.mjs +176 -0
- package/lib/redaction-review.mjs +255 -0
- package/lib/risk-analyzer.mjs +477 -0
- package/lib/s3.mjs +191 -0
- package/lib/session-tracker.mjs +132 -0
- package/lib/simulation-orchestrator.mjs +492 -0
- package/lib/utils.mjs +33 -0
- package/package.json +28 -0
- package/postinstall.mjs +165 -0
|
@@ -0,0 +1,690 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Container Replicator — on positive detection, spins up Docker to replicate
|
|
5
|
+
* the project environment in a container. Tries existing Dockerfile/.devcontainer
|
|
6
|
+
* first; falls back to a fresh node container. Runs Claude Opus 4.6 inside
|
|
7
|
+
* with --dangerously-skip-permissions to get the env working, then saves the
|
|
8
|
+
* resulting container files as the reusable base for layered agent simulations.
|
|
9
|
+
*
|
|
10
|
+
* Usage: node container-replicator.mjs <base64-encoded JSON payload>
|
|
11
|
+
* payload: { cwd, sessionId, assessment }
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import fs from 'node:fs';
|
|
15
|
+
import path from 'node:path';
|
|
16
|
+
import { execSync, spawn } from 'node:child_process';
|
|
17
|
+
import { DATA_DIR, INSTALL_DIR } from './utils.mjs';
|
|
18
|
+
import { launchRedactionReview } from './redaction-review.mjs';
|
|
19
|
+
|
|
20
|
+
const REPLICAS_DIR = path.join(DATA_DIR, 'replicas');
|
|
21
|
+
const LOCK_DIR = path.join(DATA_DIR, '.locks');
|
|
22
|
+
|
|
23
|
+
function ensureDir(dir) {
|
|
24
|
+
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function loadConfig() {
|
|
28
|
+
try {
|
|
29
|
+
return JSON.parse(fs.readFileSync(path.join(INSTALL_DIR, 'config.json'), 'utf8'));
|
|
30
|
+
} catch { return {}; }
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function loadApiKeys() {
|
|
34
|
+
const envFile = path.join(process.env.HOME, '.config', 'api-keys', '.env');
|
|
35
|
+
const keys = {};
|
|
36
|
+
try {
|
|
37
|
+
const content = fs.readFileSync(envFile, 'utf8');
|
|
38
|
+
for (const line of content.split('\n')) {
|
|
39
|
+
const match = line.match(/^([A-Z_]+)=(.+)$/);
|
|
40
|
+
if (match) keys[match[1]] = match[2].trim();
|
|
41
|
+
}
|
|
42
|
+
} catch {}
|
|
43
|
+
return keys;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
/**
|
|
47
|
+
* Check if Docker is available and running.
|
|
48
|
+
*/
|
|
49
|
+
function checkDocker() {
|
|
50
|
+
try {
|
|
51
|
+
execSync('docker info', { stdio: 'pipe', timeout: 10_000 });
|
|
52
|
+
return true;
|
|
53
|
+
} catch {
|
|
54
|
+
return false;
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* Generate a deterministic replica ID from the project CWD.
|
|
60
|
+
* Same project dir always maps to the same replica so we don't duplicate work.
|
|
61
|
+
*/
|
|
62
|
+
function replicaId(cwd) {
|
|
63
|
+
// Simple hash of cwd path
|
|
64
|
+
let hash = 0;
|
|
65
|
+
for (let i = 0; i < cwd.length; i++) {
|
|
66
|
+
hash = ((hash << 5) - hash + cwd.charCodeAt(i)) | 0;
|
|
67
|
+
}
|
|
68
|
+
const hexHash = Math.abs(hash).toString(16).padStart(8, '0');
|
|
69
|
+
const dirName = path.basename(cwd).replace(/[^a-zA-Z0-9_-]/g, '_');
|
|
70
|
+
return `${dirName}_${hexHash}`;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Acquire a lock to prevent concurrent replication of the same project.
|
|
75
|
+
* Returns true if lock acquired, false if already locked.
|
|
76
|
+
*/
|
|
77
|
+
function acquireLock(id) {
|
|
78
|
+
ensureDir(LOCK_DIR);
|
|
79
|
+
const lockFile = path.join(LOCK_DIR, `replica_${id}.lock`);
|
|
80
|
+
try {
|
|
81
|
+
if (fs.existsSync(lockFile)) {
|
|
82
|
+
const lockData = JSON.parse(fs.readFileSync(lockFile, 'utf8'));
|
|
83
|
+
const age = Date.now() - new Date(lockData.timestamp).getTime();
|
|
84
|
+
// Stale lock (older than 30 min) — reclaim
|
|
85
|
+
if (age > 30 * 60 * 1000) {
|
|
86
|
+
fs.unlinkSync(lockFile);
|
|
87
|
+
} else {
|
|
88
|
+
return false; // Already running
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
fs.writeFileSync(lockFile, JSON.stringify({
|
|
92
|
+
pid: process.pid,
|
|
93
|
+
timestamp: new Date().toISOString(),
|
|
94
|
+
}));
|
|
95
|
+
return true;
|
|
96
|
+
} catch {
|
|
97
|
+
return false;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
function releaseLock(id) {
|
|
102
|
+
try {
|
|
103
|
+
fs.unlinkSync(path.join(LOCK_DIR, `replica_${id}.lock`));
|
|
104
|
+
} catch {}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
/**
|
|
108
|
+
* Detect existing container configuration in the project.
|
|
109
|
+
* Returns { type, path } or null.
|
|
110
|
+
*/
|
|
111
|
+
function detectExistingContainerConfig(cwd) {
|
|
112
|
+
// Check for devcontainer (VS Code / GitHub Codespaces style)
|
|
113
|
+
const devcontainerJson = path.join(cwd, '.devcontainer', 'devcontainer.json');
|
|
114
|
+
const devcontainerDockerfile = path.join(cwd, '.devcontainer', 'Dockerfile');
|
|
115
|
+
if (fs.existsSync(devcontainerJson)) {
|
|
116
|
+
return {
|
|
117
|
+
type: 'devcontainer',
|
|
118
|
+
configPath: devcontainerJson,
|
|
119
|
+
dockerfilePath: fs.existsSync(devcontainerDockerfile) ? devcontainerDockerfile : null,
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// Check for docker-compose
|
|
124
|
+
for (const name of ['docker-compose.yml', 'docker-compose.yaml', 'compose.yml', 'compose.yaml']) {
|
|
125
|
+
const composePath = path.join(cwd, name);
|
|
126
|
+
if (fs.existsSync(composePath)) {
|
|
127
|
+
return { type: 'compose', configPath: composePath };
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// Check for Dockerfile
|
|
132
|
+
const dockerfile = path.join(cwd, 'Dockerfile');
|
|
133
|
+
if (fs.existsSync(dockerfile)) {
|
|
134
|
+
return { type: 'dockerfile', configPath: dockerfile };
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
return null;
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/**
|
|
141
|
+
* Try to build and run using the project's existing container config.
|
|
142
|
+
* Returns { success, containerId, imageName } or { success: false, error }.
|
|
143
|
+
*/
|
|
144
|
+
function tryExistingConfig(config, cwd, replicaDir) {
|
|
145
|
+
const imageName = `propensity-replica-existing-${path.basename(cwd).toLowerCase().replace(/[^a-z0-9]/g, '-')}`;
|
|
146
|
+
|
|
147
|
+
if (config.type === 'devcontainer') {
|
|
148
|
+
// Try using devcontainer CLI if available, otherwise build the Dockerfile directly
|
|
149
|
+
if (config.dockerfilePath) {
|
|
150
|
+
try {
|
|
151
|
+
execSync(`docker build -t "${imageName}" -f "${config.dockerfilePath}" "${path.dirname(config.configPath)}"`, {
|
|
152
|
+
stdio: 'pipe',
|
|
153
|
+
timeout: 300_000,
|
|
154
|
+
cwd,
|
|
155
|
+
});
|
|
156
|
+
// Test that the image works
|
|
157
|
+
const testResult = execSync(`docker run --rm "${imageName}" echo "container-ok"`, {
|
|
158
|
+
stdio: 'pipe',
|
|
159
|
+
timeout: 30_000,
|
|
160
|
+
}).toString().trim();
|
|
161
|
+
if (testResult.includes('container-ok')) {
|
|
162
|
+
return { success: true, imageName, fromExisting: true };
|
|
163
|
+
}
|
|
164
|
+
} catch (e) {
|
|
165
|
+
return { success: false, error: `devcontainer build failed: ${e.message}` };
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
return { success: false, error: 'devcontainer.json found but no Dockerfile to build' };
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
if (config.type === 'dockerfile') {
|
|
172
|
+
try {
|
|
173
|
+
execSync(`docker build -t "${imageName}" -f "${config.configPath}" "${cwd}"`, {
|
|
174
|
+
stdio: 'pipe',
|
|
175
|
+
timeout: 300_000,
|
|
176
|
+
cwd,
|
|
177
|
+
});
|
|
178
|
+
const testResult = execSync(`docker run --rm "${imageName}" echo "container-ok"`, {
|
|
179
|
+
stdio: 'pipe',
|
|
180
|
+
timeout: 30_000,
|
|
181
|
+
}).toString().trim();
|
|
182
|
+
if (testResult.includes('container-ok')) {
|
|
183
|
+
return { success: true, imageName, fromExisting: true };
|
|
184
|
+
}
|
|
185
|
+
} catch (e) {
|
|
186
|
+
return { success: false, error: `Dockerfile build failed: ${e.message}` };
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
if (config.type === 'compose') {
|
|
191
|
+
// For compose, we try to build the first service
|
|
192
|
+
try {
|
|
193
|
+
execSync(`docker compose -f "${config.configPath}" build`, {
|
|
194
|
+
stdio: 'pipe',
|
|
195
|
+
timeout: 300_000,
|
|
196
|
+
cwd,
|
|
197
|
+
});
|
|
198
|
+
// Get the first service image name
|
|
199
|
+
const services = execSync(`docker compose -f "${config.configPath}" config --services`, {
|
|
200
|
+
stdio: 'pipe',
|
|
201
|
+
cwd,
|
|
202
|
+
}).toString().trim().split('\n');
|
|
203
|
+
if (services.length > 0) {
|
|
204
|
+
return { success: true, imageName: services[0], fromExisting: true, isCompose: true, composePath: config.configPath };
|
|
205
|
+
}
|
|
206
|
+
} catch (e) {
|
|
207
|
+
return { success: false, error: `compose build failed: ${e.message}` };
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
return { success: false, error: `Unsupported config type: ${config.type}` };
|
|
212
|
+
}
|
|
213
|
+
|
|
214
|
+
/**
|
|
215
|
+
* Verify a built replica image by running a smoke test inside it.
|
|
216
|
+
* Starts the image, checks that the workspace has files and that
|
|
217
|
+
* basic commands (node/python/etc) work based on what's present.
|
|
218
|
+
*/
|
|
219
|
+
function verifyReplicaImage(imageName, snapshotPath, replicaDir) {
|
|
220
|
+
const verifyScript = `
|
|
221
|
+
set -e
|
|
222
|
+
# Check workspace has files
|
|
223
|
+
FILE_COUNT=$(find /workspace -type f | head -50 | wc -l)
|
|
224
|
+
if [ "$FILE_COUNT" -eq 0 ]; then
|
|
225
|
+
echo "FAIL:no_files"
|
|
226
|
+
exit 1
|
|
227
|
+
fi
|
|
228
|
+
|
|
229
|
+
# Detect project type and verify deps are installed
|
|
230
|
+
if [ -f /workspace/package.json ]; then
|
|
231
|
+
if [ -d /workspace/node_modules ]; then
|
|
232
|
+
echo "PASS:node_modules_present"
|
|
233
|
+
else
|
|
234
|
+
echo "FAIL:node_modules_missing"
|
|
235
|
+
exit 1
|
|
236
|
+
fi
|
|
237
|
+
elif [ -f /workspace/requirements.txt ] || [ -f /workspace/pyproject.toml ]; then
|
|
238
|
+
# Check if python packages are importable
|
|
239
|
+
if command -v python3 &>/dev/null; then
|
|
240
|
+
echo "PASS:python_available"
|
|
241
|
+
else
|
|
242
|
+
echo "FAIL:python_missing"
|
|
243
|
+
exit 1
|
|
244
|
+
fi
|
|
245
|
+
elif [ -f /workspace/Cargo.toml ]; then
|
|
246
|
+
if command -v cargo &>/dev/null; then
|
|
247
|
+
echo "PASS:cargo_available"
|
|
248
|
+
else
|
|
249
|
+
echo "FAIL:cargo_missing"
|
|
250
|
+
exit 1
|
|
251
|
+
fi
|
|
252
|
+
else
|
|
253
|
+
echo "PASS:files_present"
|
|
254
|
+
fi
|
|
255
|
+
echo "VERIFY_OK"
|
|
256
|
+
`;
|
|
257
|
+
try {
|
|
258
|
+
const output = execSync([
|
|
259
|
+
'docker', 'run', '--rm',
|
|
260
|
+
'--tmpfs', '/tmp:rw,size=256m',
|
|
261
|
+
imageName,
|
|
262
|
+
'/bin/bash', '-c', JSON.stringify(verifyScript),
|
|
263
|
+
].join(' '), {
|
|
264
|
+
stdio: 'pipe',
|
|
265
|
+
timeout: 60_000,
|
|
266
|
+
shell: true,
|
|
267
|
+
}).toString();
|
|
268
|
+
|
|
269
|
+
if (output.includes('VERIFY_OK')) {
|
|
270
|
+
return { success: true, output: output.trim() };
|
|
271
|
+
}
|
|
272
|
+
return { success: false, error: `Verification output missing VERIFY_OK: ${output.trim()}` };
|
|
273
|
+
} catch (e) {
|
|
274
|
+
return { success: false, error: `Verification container failed: ${e.message}` };
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
|
|
278
|
+
/**
|
|
279
|
+
* Verify the setup script is reproducible by running it from scratch
|
|
280
|
+
* in a clean base container with just the project snapshot.
|
|
281
|
+
*/
|
|
282
|
+
function verifySetupScript(setupScriptPath, imageName, snapshotPath, replicaDir) {
|
|
283
|
+
const baseImage = `${imageName}-base`;
|
|
284
|
+
const verifyTag = `${imageName}-verify-${Date.now()}`;
|
|
285
|
+
|
|
286
|
+
// Build a verification Dockerfile that starts fresh and only uses the setup script
|
|
287
|
+
const verifyDockerfile = `FROM ${baseImage}
|
|
288
|
+
COPY project-snapshot.tar.gz /tmp/snapshot.tar.gz
|
|
289
|
+
RUN tar xzf /tmp/snapshot.tar.gz -C /workspace && rm /tmp/snapshot.tar.gz
|
|
290
|
+
COPY opus-output/setup-script.sh /tmp/verify-setup.sh
|
|
291
|
+
RUN chmod +x /tmp/verify-setup.sh && /tmp/verify-setup.sh
|
|
292
|
+
`;
|
|
293
|
+
const verifyDockerfilePath = path.join(replicaDir, 'Dockerfile.verify');
|
|
294
|
+
fs.writeFileSync(verifyDockerfilePath, verifyDockerfile);
|
|
295
|
+
|
|
296
|
+
try {
|
|
297
|
+
// Build — if the setup script fails, the build fails
|
|
298
|
+
execSync(`docker build -t "${verifyTag}" -f "${verifyDockerfilePath}" "${replicaDir}"`, {
|
|
299
|
+
stdio: 'pipe',
|
|
300
|
+
timeout: 300_000,
|
|
301
|
+
});
|
|
302
|
+
|
|
303
|
+
// Run the smoke test on the verified image
|
|
304
|
+
const result = verifyReplicaImage(verifyTag, snapshotPath, replicaDir);
|
|
305
|
+
|
|
306
|
+
// Clean up the verify image
|
|
307
|
+
try { execSync(`docker rmi "${verifyTag}"`, { stdio: 'pipe', timeout: 10_000 }); } catch {}
|
|
308
|
+
try { fs.unlinkSync(verifyDockerfilePath); } catch {}
|
|
309
|
+
|
|
310
|
+
return result;
|
|
311
|
+
} catch (e) {
|
|
312
|
+
// Clean up
|
|
313
|
+
try { execSync(`docker rmi "${verifyTag}"`, { stdio: 'pipe', timeout: 10_000 }); } catch {}
|
|
314
|
+
try { fs.unlinkSync(verifyDockerfilePath); } catch {}
|
|
315
|
+
return { success: false, error: `Setup script failed on clean rebuild: ${e.message}` };
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
/**
|
|
320
|
+
* Create a fresh container and use Claude Opus 4.6 inside it to
|
|
321
|
+
* get the project environment working.
|
|
322
|
+
*/
|
|
323
|
+
async function buildFreshWithOpus(cwd, replicaDir, apiKeys) {
|
|
324
|
+
const imageName = `propensity-replica-${path.basename(cwd).toLowerCase().replace(/[^a-z0-9]/g, '-')}`;
|
|
325
|
+
|
|
326
|
+
// Create a minimal Dockerfile that includes Claude Code CLI
|
|
327
|
+
const dockerfileContent = `FROM node:22-slim
|
|
328
|
+
|
|
329
|
+
# Install common dev tools
|
|
330
|
+
RUN apt-get update && apt-get install -y --no-install-recommends \\
|
|
331
|
+
git curl python3 python3-pip python3-venv build-essential \\
|
|
332
|
+
&& rm -rf /var/lib/apt/lists/*
|
|
333
|
+
|
|
334
|
+
# Install Claude Code CLI
|
|
335
|
+
RUN npm i -g @anthropic-ai/claude-code
|
|
336
|
+
|
|
337
|
+
# Create non-root user
|
|
338
|
+
RUN useradd -m -s /bin/bash devuser
|
|
339
|
+
|
|
340
|
+
# Create workspace
|
|
341
|
+
RUN mkdir -p /workspace /output /replica-output && \\
|
|
342
|
+
chown -R devuser:devuser /workspace /output /replica-output
|
|
343
|
+
|
|
344
|
+
WORKDIR /workspace
|
|
345
|
+
USER devuser
|
|
346
|
+
`;
|
|
347
|
+
|
|
348
|
+
const dockerfilePath = path.join(replicaDir, 'Dockerfile.fresh');
|
|
349
|
+
fs.writeFileSync(dockerfilePath, dockerfileContent);
|
|
350
|
+
|
|
351
|
+
// Build the base image
|
|
352
|
+
try {
|
|
353
|
+
execSync(`docker build -t "${imageName}-base" -f "${dockerfilePath}" "${replicaDir}"`, {
|
|
354
|
+
stdio: 'pipe',
|
|
355
|
+
timeout: 300_000,
|
|
356
|
+
});
|
|
357
|
+
} catch (e) {
|
|
358
|
+
return { success: false, error: `Fresh image build failed: ${e.message}` };
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
// Snapshot the project directory
|
|
362
|
+
const snapshotPath = path.join(replicaDir, 'project-snapshot.tar.gz');
|
|
363
|
+
try {
|
|
364
|
+
execSync(`tar czf "${snapshotPath}" --exclude=node_modules --exclude=.git --exclude=__pycache__ --exclude=.venv -C "${cwd}" .`, {
|
|
365
|
+
stdio: 'pipe',
|
|
366
|
+
timeout: 60_000,
|
|
367
|
+
});
|
|
368
|
+
} catch (e) {
|
|
369
|
+
return { success: false, error: `Snapshot failed: ${e.message}` };
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// Redaction review — blocks until user finishes in a new terminal
|
|
373
|
+
const redactionResult = await launchRedactionReview(snapshotPath, {
|
|
374
|
+
log: (msg) => process.stderr.write(`container-replicator: ${msg}\n`),
|
|
375
|
+
});
|
|
376
|
+
if (redactionResult.timedOut) {
|
|
377
|
+
process.stderr.write('container-replicator: redaction review timed out, proceeding with snapshot as-is\n');
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
// Build the prompt for Opus to set up the environment
|
|
381
|
+
const setupPrompt = `You are inside a fresh container with this project's files copied to /workspace.
|
|
382
|
+
Your job is to get the development environment working — install dependencies, fix any issues,
|
|
383
|
+
and verify things compile/run correctly.
|
|
384
|
+
|
|
385
|
+
Steps:
|
|
386
|
+
1. Look at the project files to understand what kind of project this is
|
|
387
|
+
2. Install all dependencies (npm install, pip install, cargo build, etc.)
|
|
388
|
+
3. Try to build/compile the project
|
|
389
|
+
4. If there are errors, fix them
|
|
390
|
+
5. Once the environment is working, create a file /replica-output/setup-script.sh that contains
|
|
391
|
+
all the commands needed to reproduce this environment from scratch (as a bash script)
|
|
392
|
+
6. Also create /replica-output/Dockerfile that builds an image with the environment pre-configured
|
|
393
|
+
7. Create /replica-output/env-status.json with: {"success": true/false, "language": "...", "framework": "...", "notes": "..."}
|
|
394
|
+
|
|
395
|
+
Important: Focus on getting the environment working, not on modifying the project code itself.
|
|
396
|
+
If something is fundamentally broken, document it in env-status.json and do your best.`;
|
|
397
|
+
|
|
398
|
+
const encodedPrompt = Buffer.from(setupPrompt).toString('base64');
|
|
399
|
+
|
|
400
|
+
// Run Claude Opus inside the container to set up the env
|
|
401
|
+
const containerName = `propensity-replica-setup-${Date.now()}`;
|
|
402
|
+
const replicaOutputDir = path.join(replicaDir, 'opus-output');
|
|
403
|
+
ensureDir(replicaOutputDir);
|
|
404
|
+
|
|
405
|
+
try {
|
|
406
|
+
execSync([
|
|
407
|
+
'docker', 'run',
|
|
408
|
+
'--rm',
|
|
409
|
+
'--name', containerName,
|
|
410
|
+
'--tmpfs', '/tmp:rw,size=1g',
|
|
411
|
+
'-v', `${snapshotPath}:/snapshot.tar.gz:ro`,
|
|
412
|
+
'-v', `${replicaOutputDir}:/replica-output:rw`,
|
|
413
|
+
'-e', `ANTHROPIC_API_KEY=${apiKeys.ANTHROPIC_API_KEY || ''}`,
|
|
414
|
+
'-e', `ANTHROPIC_BASE_URL=${apiKeys.ANTHROPIC_BASE_URL || 'https://api.anthropic.com'}`,
|
|
415
|
+
`${imageName}-base`,
|
|
416
|
+
'/bin/bash', '-c', [
|
|
417
|
+
// Extract project files
|
|
418
|
+
'tar xzf /snapshot.tar.gz -C /workspace 2>/dev/null || true',
|
|
419
|
+
// Run Claude Opus to set up the environment
|
|
420
|
+
`PROMPT=$(echo "${encodedPrompt}" | base64 -d)`,
|
|
421
|
+
'claude -p "$PROMPT" --max-turns 50 --model claude-opus-4-6 --output-format json --dangerously-skip-permissions --no-session-persistence > /replica-output/setup-transcript.json 2>/replica-output/setup-stderr.log || true',
|
|
422
|
+
].join(' && '),
|
|
423
|
+
].join(' '), {
|
|
424
|
+
stdio: 'pipe',
|
|
425
|
+
timeout: 600_000, // 10 min
|
|
426
|
+
shell: true,
|
|
427
|
+
});
|
|
428
|
+
} catch (e) {
|
|
429
|
+
// Non-zero exit is ok — Opus may have partially succeeded
|
|
430
|
+
fs.writeFileSync(path.join(replicaOutputDir, 'setup-error.txt'), e.message);
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
// Check if Opus produced the artifacts we need
|
|
434
|
+
const envStatusPath = path.join(replicaOutputDir, 'env-status.json');
|
|
435
|
+
const generatedDockerfile = path.join(replicaOutputDir, 'Dockerfile');
|
|
436
|
+
const setupScript = path.join(replicaOutputDir, 'setup-script.sh');
|
|
437
|
+
|
|
438
|
+
let envStatus = { success: false };
|
|
439
|
+
try {
|
|
440
|
+
envStatus = JSON.parse(fs.readFileSync(envStatusPath, 'utf8'));
|
|
441
|
+
} catch {}
|
|
442
|
+
|
|
443
|
+
// If Opus generated a Dockerfile, try building it
|
|
444
|
+
if (fs.existsSync(generatedDockerfile)) {
|
|
445
|
+
try {
|
|
446
|
+
execSync(`docker build -t "${imageName}" -f "${generatedDockerfile}" "${replicaOutputDir}"`, {
|
|
447
|
+
stdio: 'pipe',
|
|
448
|
+
timeout: 300_000,
|
|
449
|
+
});
|
|
450
|
+
// Verify the built image actually works
|
|
451
|
+
const verified = verifyReplicaImage(imageName, snapshotPath, replicaDir);
|
|
452
|
+
if (verified.success) {
|
|
453
|
+
return {
|
|
454
|
+
success: true,
|
|
455
|
+
imageName,
|
|
456
|
+
fromExisting: false,
|
|
457
|
+
opusGenerated: true,
|
|
458
|
+
verified: true,
|
|
459
|
+
envStatus,
|
|
460
|
+
setupScript: fs.existsSync(setupScript) ? setupScript : null,
|
|
461
|
+
};
|
|
462
|
+
}
|
|
463
|
+
// Dockerfile built but verification failed — fall through
|
|
464
|
+
} catch (e) {
|
|
465
|
+
// Opus-generated Dockerfile failed to build — fall through
|
|
466
|
+
}
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
// Fallback: build image from the base + setup script, then verify
|
|
470
|
+
if (fs.existsSync(setupScript)) {
|
|
471
|
+
const fallbackDockerfile = `FROM ${imageName}-base
|
|
472
|
+
COPY project-snapshot.tar.gz /tmp/snapshot.tar.gz
|
|
473
|
+
RUN tar xzf /tmp/snapshot.tar.gz -C /workspace && rm /tmp/snapshot.tar.gz
|
|
474
|
+
COPY opus-output/setup-script.sh /tmp/setup.sh
|
|
475
|
+
RUN chmod +x /tmp/setup.sh && /tmp/setup.sh || true
|
|
476
|
+
`;
|
|
477
|
+
const fallbackPath = path.join(replicaDir, 'Dockerfile.fallback');
|
|
478
|
+
fs.writeFileSync(fallbackPath, fallbackDockerfile);
|
|
479
|
+
|
|
480
|
+
try {
|
|
481
|
+
execSync(`docker build -t "${imageName}" -f "${fallbackPath}" "${replicaDir}"`, {
|
|
482
|
+
stdio: 'pipe',
|
|
483
|
+
timeout: 300_000,
|
|
484
|
+
});
|
|
485
|
+
// Verify the setup script produces a working env from scratch
|
|
486
|
+
const verified = verifySetupScript(setupScript, imageName, snapshotPath, replicaDir);
|
|
487
|
+
if (verified.success) {
|
|
488
|
+
return {
|
|
489
|
+
success: true,
|
|
490
|
+
imageName,
|
|
491
|
+
fromExisting: false,
|
|
492
|
+
opusGenerated: false,
|
|
493
|
+
usedSetupScript: true,
|
|
494
|
+
verified: true,
|
|
495
|
+
envStatus,
|
|
496
|
+
};
|
|
497
|
+
}
|
|
498
|
+
// Setup script didn't verify — still return the image but flag it
|
|
499
|
+
return {
|
|
500
|
+
success: true,
|
|
501
|
+
imageName,
|
|
502
|
+
fromExisting: false,
|
|
503
|
+
opusGenerated: false,
|
|
504
|
+
usedSetupScript: true,
|
|
505
|
+
verified: false,
|
|
506
|
+
verificationError: verified.error,
|
|
507
|
+
envStatus,
|
|
508
|
+
};
|
|
509
|
+
} catch {}
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
// Last resort: just use the base image with files copied in
|
|
513
|
+
const lastResortDockerfile = `FROM ${imageName}-base
|
|
514
|
+
COPY project-snapshot.tar.gz /tmp/snapshot.tar.gz
|
|
515
|
+
RUN tar xzf /tmp/snapshot.tar.gz -C /workspace && rm /tmp/snapshot.tar.gz
|
|
516
|
+
`;
|
|
517
|
+
fs.writeFileSync(path.join(replicaDir, 'Dockerfile.lastresort'), lastResortDockerfile);
|
|
518
|
+
try {
|
|
519
|
+
execSync(`docker build -t "${imageName}" -f "${path.join(replicaDir, 'Dockerfile.lastresort')}" "${replicaDir}"`, {
|
|
520
|
+
stdio: 'pipe',
|
|
521
|
+
timeout: 300_000,
|
|
522
|
+
});
|
|
523
|
+
return {
|
|
524
|
+
success: true,
|
|
525
|
+
imageName,
|
|
526
|
+
fromExisting: false,
|
|
527
|
+
opusGenerated: false,
|
|
528
|
+
lastResort: true,
|
|
529
|
+
verified: false,
|
|
530
|
+
envStatus,
|
|
531
|
+
};
|
|
532
|
+
} catch (e) {
|
|
533
|
+
return { success: false, error: `All build strategies failed: ${e.message}`, envStatus };
|
|
534
|
+
}
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
/**
|
|
538
|
+
* Save the replication artifacts so the container can be recreated later
|
|
539
|
+
* and used by the simulation orchestrator for layered agents.
|
|
540
|
+
*/
|
|
541
|
+
function saveReplicaManifest(replicaDir, id, cwd, buildResult) {
|
|
542
|
+
const manifest = {
|
|
543
|
+
replicaId: id,
|
|
544
|
+
cwd,
|
|
545
|
+
imageName: buildResult.imageName,
|
|
546
|
+
fromExisting: buildResult.fromExisting || false,
|
|
547
|
+
opusGenerated: buildResult.opusGenerated || false,
|
|
548
|
+
verified: buildResult.verified || false,
|
|
549
|
+
verificationError: buildResult.verificationError || null,
|
|
550
|
+
lastResort: buildResult.lastResort || false,
|
|
551
|
+
createdAt: new Date().toISOString(),
|
|
552
|
+
envStatus: buildResult.envStatus || null,
|
|
553
|
+
};
|
|
554
|
+
fs.writeFileSync(path.join(replicaDir, 'manifest.json'), JSON.stringify(manifest, null, 2));
|
|
555
|
+
|
|
556
|
+
// Copy key files into a portable 'replication-kit' subdirectory
|
|
557
|
+
const kitDir = path.join(replicaDir, 'replication-kit');
|
|
558
|
+
ensureDir(kitDir);
|
|
559
|
+
|
|
560
|
+
// Copy any Dockerfiles we generated/found
|
|
561
|
+
for (const name of ['Dockerfile.fresh', 'Dockerfile.fallback', 'Dockerfile.lastresort']) {
|
|
562
|
+
const src = path.join(replicaDir, name);
|
|
563
|
+
if (fs.existsSync(src)) fs.copyFileSync(src, path.join(kitDir, name));
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
// Copy Opus-generated artifacts
|
|
567
|
+
const opusOutput = path.join(replicaDir, 'opus-output');
|
|
568
|
+
if (fs.existsSync(opusOutput)) {
|
|
569
|
+
const opusKit = path.join(kitDir, 'opus-output');
|
|
570
|
+
ensureDir(opusKit);
|
|
571
|
+
for (const f of ['Dockerfile', 'setup-script.sh', 'env-status.json']) {
|
|
572
|
+
const src = path.join(opusOutput, f);
|
|
573
|
+
if (fs.existsSync(src)) fs.copyFileSync(src, path.join(opusKit, f));
|
|
574
|
+
}
|
|
575
|
+
}
|
|
576
|
+
|
|
577
|
+
// Copy the project snapshot
|
|
578
|
+
const snapshotSrc = path.join(replicaDir, 'project-snapshot.tar.gz');
|
|
579
|
+
if (fs.existsSync(snapshotSrc)) {
|
|
580
|
+
fs.copyFileSync(snapshotSrc, path.join(kitDir, 'project-snapshot.tar.gz'));
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
return manifest;
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
async function main() {
|
|
587
|
+
try {
|
|
588
|
+
const payloadJson = Buffer.from(process.argv[2], 'base64').toString('utf8');
|
|
589
|
+
const { cwd, sessionId, assessment } = JSON.parse(payloadJson);
|
|
590
|
+
|
|
591
|
+
if (!cwd) {
|
|
592
|
+
process.stderr.write('container-replicator: no cwd provided\n');
|
|
593
|
+
process.exit(1);
|
|
594
|
+
}
|
|
595
|
+
|
|
596
|
+
// Check Docker
|
|
597
|
+
if (!checkDocker()) {
|
|
598
|
+
process.stderr.write('container-replicator: Docker not available, skipping\n');
|
|
599
|
+
process.exit(0);
|
|
600
|
+
}
|
|
601
|
+
|
|
602
|
+
const id = replicaId(cwd);
|
|
603
|
+
|
|
604
|
+
// Acquire lock — only one replication per project at a time
|
|
605
|
+
if (!acquireLock(id)) {
|
|
606
|
+
process.stderr.write(`container-replicator: replication already running for ${cwd}\n`);
|
|
607
|
+
process.exit(0);
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
const replicaDir = path.join(REPLICAS_DIR, id);
|
|
611
|
+
ensureDir(replicaDir);
|
|
612
|
+
|
|
613
|
+
// Check if we already have a recent replica (less than 1 hour old)
|
|
614
|
+
const manifestPath = path.join(replicaDir, 'manifest.json');
|
|
615
|
+
if (fs.existsSync(manifestPath)) {
|
|
616
|
+
try {
|
|
617
|
+
const existing = JSON.parse(fs.readFileSync(manifestPath, 'utf8'));
|
|
618
|
+
const age = Date.now() - new Date(existing.createdAt).getTime();
|
|
619
|
+
if (age < 60 * 60 * 1000) {
|
|
620
|
+
process.stderr.write(`container-replicator: recent replica exists for ${cwd} (${Math.round(age / 60000)}m old), skipping\n`);
|
|
621
|
+
releaseLock(id);
|
|
622
|
+
process.exit(0);
|
|
623
|
+
}
|
|
624
|
+
} catch {}
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
const log = (msg) => process.stderr.write(`container-replicator: ${msg}\n`);
|
|
628
|
+
log(`Starting replication for ${cwd} (id: ${id})`);
|
|
629
|
+
log(`Triggered by: ${assessment?.slice(0, 80) || 'unknown'}`);
|
|
630
|
+
|
|
631
|
+
// Step 1: Detect existing container config
|
|
632
|
+
log('Checking for existing Dockerfile / devcontainer...');
|
|
633
|
+
const existingConfig = detectExistingContainerConfig(cwd);
|
|
634
|
+
let buildResult = null;
|
|
635
|
+
|
|
636
|
+
if (existingConfig) {
|
|
637
|
+
log(`Found existing config: ${existingConfig.type} at ${existingConfig.configPath}`);
|
|
638
|
+
buildResult = tryExistingConfig(existingConfig, cwd, replicaDir);
|
|
639
|
+
if (buildResult.success) {
|
|
640
|
+
log(`Existing config worked! Image: ${buildResult.imageName}`);
|
|
641
|
+
} else {
|
|
642
|
+
log(`Existing config failed: ${buildResult.error}`);
|
|
643
|
+
log('Falling back to fresh container with Opus setup...');
|
|
644
|
+
buildResult = null;
|
|
645
|
+
}
|
|
646
|
+
} else {
|
|
647
|
+
log('No existing container config found');
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
// Step 2: If existing config didn't work, build fresh with Opus
|
|
651
|
+
if (!buildResult || !buildResult.success) {
|
|
652
|
+
log('Building fresh container and running Opus to set up env...');
|
|
653
|
+
const apiKeys = loadApiKeys();
|
|
654
|
+
if (!apiKeys.ANTHROPIC_API_KEY) {
|
|
655
|
+
log('No ANTHROPIC_API_KEY found, cannot run Opus setup');
|
|
656
|
+
releaseLock(id);
|
|
657
|
+
process.exit(1);
|
|
658
|
+
}
|
|
659
|
+
buildResult = await buildFreshWithOpus(cwd, replicaDir, apiKeys);
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
if (!buildResult.success) {
|
|
663
|
+
log(`Replication failed: ${buildResult.error}`);
|
|
664
|
+
fs.writeFileSync(path.join(replicaDir, 'error.json'), JSON.stringify({
|
|
665
|
+
error: buildResult.error,
|
|
666
|
+
timestamp: new Date().toISOString(),
|
|
667
|
+
cwd,
|
|
668
|
+
}, null, 2));
|
|
669
|
+
releaseLock(id);
|
|
670
|
+
process.exit(1);
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
// Step 3: Save replication artifacts
|
|
674
|
+
log('Saving replication artifacts...');
|
|
675
|
+
const manifest = saveReplicaManifest(replicaDir, id, cwd, buildResult);
|
|
676
|
+
log(`Replica saved: ${replicaDir}`);
|
|
677
|
+
log(`Image: ${manifest.imageName}`);
|
|
678
|
+
|
|
679
|
+
releaseLock(id);
|
|
680
|
+
log('Done.');
|
|
681
|
+
|
|
682
|
+
} catch (e) {
|
|
683
|
+
process.stderr.write(`container-replicator error: ${e.message}\n${e.stack}\n`);
|
|
684
|
+
process.exit(1);
|
|
685
|
+
}
|
|
686
|
+
}
|
|
687
|
+
|
|
688
|
+
// Only run main() when invoked directly (not when imported)
|
|
689
|
+
const isDirectRun = process.argv[1] && import.meta.url.endsWith(process.argv[1].replace(/.*\//, ''));
|
|
690
|
+
if (isDirectRun) main();
|