atris 2.6.2 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/README.md +124 -34
  2. package/atris/CLAUDE.md +5 -1
  3. package/atris/atris.md +4 -0
  4. package/atris/features/README.md +24 -0
  5. package/atris/skills/autopilot/SKILL.md +74 -75
  6. package/atris/skills/endgame/SKILL.md +179 -0
  7. package/atris/skills/flow/SKILL.md +121 -0
  8. package/atris/skills/improve/SKILL.md +84 -0
  9. package/atris/skills/loop/SKILL.md +72 -0
  10. package/atris/skills/wiki/SKILL.md +61 -0
  11. package/atris/team/executor/MEMBER.md +10 -4
  12. package/atris/team/navigator/MEMBER.md +2 -0
  13. package/atris/team/validator/MEMBER.md +8 -5
  14. package/atris.md +33 -0
  15. package/bin/atris.js +210 -41
  16. package/commands/activate.js +28 -2
  17. package/commands/align.js +720 -0
  18. package/commands/auth.js +75 -2
  19. package/commands/autopilot.js +1213 -270
  20. package/commands/browse.js +100 -0
  21. package/commands/business.js +785 -12
  22. package/commands/clean.js +107 -2
  23. package/commands/computer.js +429 -0
  24. package/commands/context-sync.js +78 -8
  25. package/commands/experiments.js +351 -0
  26. package/commands/feedback.js +150 -0
  27. package/commands/fleet.js +395 -0
  28. package/commands/fork.js +127 -0
  29. package/commands/init.js +50 -1
  30. package/commands/learn.js +407 -0
  31. package/commands/lifecycle.js +94 -0
  32. package/commands/loop.js +114 -0
  33. package/commands/publish.js +129 -0
  34. package/commands/pull.js +434 -48
  35. package/commands/push.js +312 -164
  36. package/commands/review.js +149 -0
  37. package/commands/run.js +76 -43
  38. package/commands/serve.js +360 -0
  39. package/commands/setup.js +1 -1
  40. package/commands/soul.js +381 -0
  41. package/commands/status.js +119 -1
  42. package/commands/sync.js +147 -1
  43. package/commands/terminal.js +201 -0
  44. package/commands/wiki.js +376 -0
  45. package/commands/workflow.js +191 -74
  46. package/commands/workspace-clean.js +3 -3
  47. package/lib/endstate.js +259 -0
  48. package/lib/learnings.js +235 -0
  49. package/lib/manifest.js +1 -0
  50. package/lib/todo.js +9 -5
  51. package/lib/wiki.js +578 -0
  52. package/package.json +2 -2
  53. package/utils/api.js +48 -36
  54. package/utils/auth.js +1 -0
@@ -0,0 +1,720 @@
1
+ /**
2
+ * atris align <business> [--fix] [--from cloud|local] [--dry-run]
3
+ *
4
+ * Compare a business workspace's local files against its EC2 cloud state,
5
+ * report drift, and optionally fix it.
6
+ *
7
+ * SAFETY:
8
+ * - Always wakes the EC2 computer first (the rule: never operate on cache)
9
+ * - Walks via the warm runner /files endpoint, NOT agent_files
10
+ * - Refuses destructive ops without --fix
11
+ * - Throttles API calls to avoid rate limit (60/min on /file DELETE)
12
+ *
13
+ * USAGE:
14
+ * atris align # auto-detect business from .atris/business.json
15
+ * atris align pallet # explicit business slug
16
+ * atris align pallet --dry-run # show diff, do nothing
17
+ * atris align pallet --fix # local is canonical: delete EC2 extras, push local-only
18
+ * atris align pallet --fix --from cloud # cloud is canonical: pull EC2-only, delete local extras
19
+ */
20
+
21
+ const fs = require('fs');
22
+ const path = require('path');
23
+ const crypto = require('crypto');
24
+ const { loadCredentials } = require('../utils/auth');
25
+ const { apiRequestJson } = require('../utils/api');
26
+ const { loadBusinesses, saveBusinesses } = require('./business');
27
+
28
+ const SKIP_DIRS = new Set([
29
+ 'node_modules', '__pycache__', '.git', 'venv', '.venv',
30
+ 'lost+found', '.cache', '.atris', '.claude', 'default',
31
+ ]);
32
+
33
+ const SKIP_FILES = new Set(['.DS_Store', 'Thumbs.db']);
34
+
35
+ function sleep(ms) {
36
+ return new Promise((r) => setTimeout(r, ms));
37
+ }
38
+
39
+ function hashContent(content) {
40
+ return crypto.createHash('sha256').update(content).digest('hex');
41
+ }
42
+
43
+ function hashFile(absPath) {
44
+ try {
45
+ const buf = fs.readFileSync(absPath);
46
+ return crypto.createHash('sha256').update(buf).digest('hex');
47
+ } catch {
48
+ return null;
49
+ }
50
+ }
51
+
52
+ /**
53
+ * Walk a local directory and return { relPath: hash } map.
54
+ */
55
+ function walkLocal(rootDir) {
56
+ const out = {};
57
+ function walk(dir) {
58
+ let entries;
59
+ try { entries = fs.readdirSync(dir, { withFileTypes: true }); }
60
+ catch { return; }
61
+ for (const e of entries) {
62
+ if (e.name.startsWith('.') && e.name !== '.atris' && e.name !== '.claude') continue;
63
+ if (SKIP_DIRS.has(e.name)) continue;
64
+ if (SKIP_FILES.has(e.name)) continue;
65
+ if (e.name.endsWith('.remote')) continue;
66
+ const full = path.join(dir, e.name);
67
+ if (e.isDirectory()) {
68
+ walk(full);
69
+ } else if (e.isFile()) {
70
+ const rel = path.relative(rootDir, full);
71
+ const h = hashFile(full);
72
+ if (h) out[rel] = h;
73
+ }
74
+ }
75
+ }
76
+ walk(rootDir);
77
+ return out;
78
+ }
79
+
80
+ /**
81
+ * Get EC2 file list via the /snapshot endpoint (1 API call, fast).
82
+ * Returns { path: { hash, size } } map.
83
+ *
84
+ * Why this instead of recursive /files walk: the recursive walk is rate-limited
85
+ * (60/min) and hangs on workspaces with many subdirs. The /snapshot endpoint
86
+ * returns the full file tree in one call. Trade-off: snapshot can be incomplete
87
+ * for very deep hierarchies (server-side bug we've seen) — but it's vastly faster
88
+ * and we accept that risk in exchange for not hanging.
89
+ */
90
+ async function walkCloud(token, businessId, workspaceId) {
91
+ const out = {};
92
+ const errors = [];
93
+
94
+ // First try the snapshot endpoint (1 call, much faster than walking)
95
+ const snapshotResult = await apiRequestJson(
96
+ `/business/${businessId}/workspaces/${workspaceId}/snapshot?include_content=false`,
97
+ { method: 'GET', token, timeoutMs: 120000 }
98
+ );
99
+
100
+ if (snapshotResult.ok && snapshotResult.data && Array.isArray(snapshotResult.data.files)) {
101
+ for (const f of snapshotResult.data.files) {
102
+ const p = (f.path || '').replace(/^\//, '');
103
+ if (!p) continue;
104
+ const base = p.split('/').pop();
105
+ if (SKIP_FILES.has(base)) continue;
106
+ if (p.endsWith('.remote')) continue;
107
+ // Check skip dirs
108
+ const parts = p.split('/');
109
+ if (parts.some((part) => SKIP_DIRS.has(part))) continue;
110
+ out[p] = { hash: f.hash || null, size: f.size || 0 };
111
+ }
112
+ // CRITICAL: only return if snapshot actually had files. Empty snapshot is
113
+ // a known server-side bug — fall back to recursive walk in that case so
114
+ // we don't report every local file as "only on local" against a phantom empty cloud.
115
+ if (Object.keys(out).length > 0) {
116
+ return { files: out, errors };
117
+ }
118
+ errors.push({ path: '<snapshot>', status: 'empty', fallback: 'recursive walk' });
119
+ } else {
120
+ // Snapshot failed entirely — fall back to recursive walk
121
+ errors.push({ path: '<snapshot>', status: snapshotResult.status, fallback: 'recursive walk' });
122
+ }
123
+
124
+ async function walk(dirPath, depth) {
125
+ if (depth > 12) return;
126
+ await sleep(300);
127
+ let result = await apiRequestJson(
128
+ `/business/${businessId}/workspaces/${workspaceId}/files${dirPath ? `?path=${encodeURIComponent(dirPath)}` : ''}`,
129
+ { method: 'GET', token }
130
+ );
131
+ if (!result.ok && result.status === 429) {
132
+ await sleep(15000);
133
+ result = await apiRequestJson(
134
+ `/business/${businessId}/workspaces/${workspaceId}/files${dirPath ? `?path=${encodeURIComponent(dirPath)}` : ''}`,
135
+ { method: 'GET', token }
136
+ );
137
+ }
138
+ if (!result.ok) {
139
+ errors.push({ path: dirPath, status: result.status });
140
+ return;
141
+ }
142
+ const entries = (result.data && result.data.files) || [];
143
+ for (const entry of entries) {
144
+ const name = entry.name || '';
145
+ const fullPath = dirPath ? `${dirPath}/${name}` : name;
146
+ if (entry.type === 'file') {
147
+ if (SKIP_FILES.has(name)) continue;
148
+ if (name.endsWith('.remote')) continue;
149
+ out[fullPath] = { hash: entry.hash || null, size: entry.size || 0 };
150
+ } else if (entry.type === 'dir') {
151
+ if (SKIP_DIRS.has(name)) continue;
152
+ await walk(fullPath, depth + 1);
153
+ }
154
+ }
155
+ }
156
+ await walk('', 0);
157
+ return { files: out, errors };
158
+ }
159
+
160
+ /**
161
+ * Get cloud file content (for hash computation when /files doesn't return hashes).
162
+ */
163
+ async function fetchCloudFileHash(token, businessId, workspaceId, filePath) {
164
+ const result = await apiRequestJson(
165
+ `/business/${businessId}/workspaces/${workspaceId}/file?path=${encodeURIComponent(filePath)}`,
166
+ { method: 'GET', token }
167
+ );
168
+ if (!result.ok) return null;
169
+ const content = result.data && result.data.content;
170
+ if (typeof content !== 'string') return null;
171
+ return hashContent(Buffer.from(content, 'utf-8'));
172
+ }
173
+
174
+ /**
175
+ * Wake the EC2 computer and wait until it's running.
176
+ * Returns the endpoint URL or null on timeout.
177
+ */
178
+ async function ensureAwake(token, businessId, maxWaitSec = 90) {
179
+ const status = await apiRequestJson(`/business/${businessId}/ai-computer/status`, { method: 'GET', token });
180
+ if (status.ok && status.data && status.data.status === 'running' && status.data.endpoint) {
181
+ return status.data.endpoint;
182
+ }
183
+
184
+ process.stdout.write(' Waking EC2 computer... ');
185
+ await apiRequestJson(`/business/${businessId}/ai-computer/wake`, { method: 'POST', token });
186
+
187
+ const start = Date.now();
188
+ while (Date.now() - start < maxWaitSec * 1000) {
189
+ await sleep(3000);
190
+ const s = await apiRequestJson(`/business/${businessId}/ai-computer/status`, { method: 'GET', token });
191
+ if (s.ok && s.data && s.data.status === 'running' && s.data.endpoint) {
192
+ const elapsed = Math.floor((Date.now() - start) / 1000);
193
+ console.log(`awake (${elapsed}s)`);
194
+ return s.data.endpoint;
195
+ }
196
+ }
197
+ console.log('timeout');
198
+ return null;
199
+ }
200
+
201
+ /**
202
+ * Resolve business slug → { businessId, workspaceId, businessName }.
203
+ */
204
+ async function resolveBusiness(token, slug) {
205
+ const businesses = loadBusinesses();
206
+ const list = await apiRequestJson('/business/', { method: 'GET', token });
207
+ if (list.ok) {
208
+ const match = (list.data || []).find(
209
+ (b) => b.slug === slug || b.name.toLowerCase() === slug.toLowerCase()
210
+ );
211
+ if (!match) return null;
212
+ businesses[slug] = {
213
+ business_id: match.id,
214
+ workspace_id: match.workspace_id,
215
+ name: match.name,
216
+ slug: match.slug,
217
+ added_at: new Date().toISOString(),
218
+ };
219
+ saveBusinesses(businesses);
220
+ return { businessId: match.id, workspaceId: match.workspace_id, businessName: match.name, resolvedSlug: match.slug };
221
+ }
222
+ if (businesses[slug]) {
223
+ return {
224
+ businessId: businesses[slug].business_id,
225
+ workspaceId: businesses[slug].workspace_id,
226
+ businessName: businesses[slug].name || slug,
227
+ resolvedSlug: businesses[slug].slug || slug,
228
+ };
229
+ }
230
+ return null;
231
+ }
232
+
233
+ /**
234
+ * Push files via /sync (single batched call).
235
+ *
236
+ * The /sync endpoint writes each file to EC2 + DB sequentially server-side,
237
+ * so wall time scales with batch size. Default node timeout (30s) is too
238
+ * short for batches of more than ~15-20 files; pass an explicit timeoutMs.
239
+ */
240
+ async function pushFiles(token, businessId, workspaceId, fileObjs, timeoutMs = 180000) {
241
+ if (!fileObjs.length) return { ok: true, written: 0 };
242
+ const result = await apiRequestJson(
243
+ `/business/${businessId}/workspaces/${workspaceId}/sync`,
244
+ {
245
+ method: 'POST',
246
+ token,
247
+ body: { files: fileObjs },
248
+ headers: { 'X-Atris-Actor-Source': 'cli-align' },
249
+ timeoutMs,
250
+ }
251
+ );
252
+ return result;
253
+ }
254
+
255
+ /**
256
+ * Delete a file from EC2 via /file DELETE.
257
+ */
258
+ async function deleteCloudFile(token, businessId, workspaceId, filePath) {
259
+ return apiRequestJson(
260
+ `/business/${businessId}/workspaces/${workspaceId}/file?path=${encodeURIComponent(filePath)}`,
261
+ { method: 'DELETE', token }
262
+ );
263
+ }
264
+
265
+ /**
266
+ * --hard force-push: local is canonical, wipe cloud cruft + upload local.
267
+ *
268
+ * Strategy (deliberately simple, designed to finish in seconds not minutes):
269
+ * 1. List top-level entries on cloud via /files?path= (1 API call).
270
+ * 2. Compute top-level entries in local (1 fs read).
271
+ * 3. For each top-level cloud entry NOT present in local: DELETE it.
272
+ * Server-side delete is recursive, so one call per top-level dir
273
+ * replaces hundreds of file-by-file deletes.
274
+ * 4. Walk local once, push every file via /sync in 10-file batches.
275
+ * 5. Save manifest so subsequent atris pull/push diffs work correctly.
276
+ *
277
+ * Why bypass the diff: in the bloated-cloud case the diff itself is the
278
+ * slow part (cloud has hundreds of files local doesn't, walking them
279
+ * times out). --hard just trusts local and overwrites cloud.
280
+ */
281
+ async function alignHardLocalToCloud(token, biz, localDir) {
282
+ const { businessId, workspaceId, businessName, resolvedSlug } = biz;
283
+ const { buildManifest, saveManifest, computeLocalHashes } = require('../lib/manifest');
284
+
285
+ // 1. List top-level entries on cloud via /files (1 fast API call).
286
+ // We deliberately AVOID /snapshot here — its hangs/timeouts on bloated
287
+ // workspaces are the whole reason --hard exists. /files at depth 0 is
288
+ // a single dir-listing call that returns reliably.
289
+ //
290
+ // The /files endpoint shares a 60/min rate limit with the rest of the
291
+ // business workspace API, so a recent --hard run can leave us throttled.
292
+ // Retry once on 429 with a generous backoff before giving up.
293
+ process.stdout.write(' Listing cloud top-level... ');
294
+ let topResult;
295
+ // Up to 3 attempts with 60s backoff between rate-limit retries. The
296
+ // /files endpoint shares the global 60/min business API quota and a
297
+ // recent --hard run can leave us heavily throttled.
298
+ for (let attempt = 0; attempt < 3; attempt++) {
299
+ topResult = await apiRequestJson(
300
+ `/business/${businessId}/workspaces/${workspaceId}/files`,
301
+ { method: 'GET', token }
302
+ );
303
+ if (topResult.ok || topResult.status !== 429) break;
304
+ if (attempt < 2) {
305
+ process.stdout.write(`rate-limited, waiting 60s (attempt ${attempt + 2}/3)... `);
306
+ await sleep(60000);
307
+ }
308
+ }
309
+ if (!topResult.ok) {
310
+ console.log('failed');
311
+ console.error(` Could not list cloud workspace: ${topResult.errorMessage || topResult.status}`);
312
+ process.exit(1);
313
+ }
314
+ const cloudTopEntries = (topResult.data && topResult.data.files) || [];
315
+ console.log(`${cloudTopEntries.length} entries`);
316
+
317
+ // The `workspace` top-level entry is the workspace root itself — it's
318
+ // protected server-side (DELETE returns 400) and is NEVER cruft. Don't
319
+ // try to delete it; pretend it isn't there for the diff.
320
+ const PROTECTED_TOP = new Set(['workspace']);
321
+
322
+ // 2. Top-level local entries — track names AND types so we can detect
323
+ // type-mismatch (cloud has file `foo`, local has dir `foo/`).
324
+ const localTopByName = new Map(); // name -> 'dir' | 'file'
325
+ for (const e of fs.readdirSync(localDir, { withFileTypes: true })) {
326
+ if (e.name.startsWith('.') && e.name !== '.atris' && e.name !== '.claude') continue;
327
+ if (SKIP_DIRS.has(e.name)) continue;
328
+ if (SKIP_FILES.has(e.name)) continue;
329
+ localTopByName.set(e.name, e.isDirectory() ? 'dir' : 'file');
330
+ }
331
+
332
+ // 3. Delete cloud top-level entries that aren't in local OR whose type
333
+ // differs from local. Type mismatch must be cleared so the bulk push
334
+ // can write the correct shape (otherwise pushing a file at a path
335
+ // occupied by a dir, or vice versa, will silently fail server-side).
336
+ //
337
+ // NOTE: --hard intentionally does NOT respect SKIP_DIRS/SKIP_FILES here.
338
+ // Those skip lists exist for the diff/walk path so we don't enumerate
339
+ // node_modules/venv/__pycache__ locally — they are NOT protections that
340
+ // mean "don't delete on cloud." A bloated cloud workspace might genuinely
341
+ // contain stale `default/`, `venv/`, `__pycache__/` etc that the user
342
+ // wants nuked. --hard means "make cloud match local"; if local doesn't
343
+ // walk into those names, cloud shouldn't have them either.
344
+ //
345
+ // Only PROTECTED_TOP (the workspace root) is preserved.
346
+ const toDelete = cloudTopEntries.filter((entry) => {
347
+ const name = entry.name || '';
348
+ if (!name) return false;
349
+ if (PROTECTED_TOP.has(name)) return false;
350
+ const localType = localTopByName.get(name);
351
+ if (!localType) return true; // not in local at all
352
+ if (localType !== entry.type) return true; // type mismatch — must clear
353
+ return false;
354
+ });
355
+
356
+ let deleteFailures = 0;
357
+ if (toDelete.length > 0) {
358
+ console.log(` Deleting ${toDelete.length} cloud top-level entries (not present locally):`);
359
+ for (const entry of toDelete) {
360
+ const name = entry.name;
361
+ const kind = entry.type === 'dir' ? 'dir' : 'file';
362
+ process.stdout.write(` - ${name}/ (${kind}) ... `);
363
+ const r = await deleteCloudFile(token, businessId, workspaceId, '/' + name);
364
+ let succeeded = r.ok || r.status === 404;
365
+ if (succeeded) {
366
+ console.log('gone');
367
+ } else if (r.status === 429) {
368
+ await sleep(20000);
369
+ const r2 = await deleteCloudFile(token, businessId, workspaceId, '/' + name);
370
+ succeeded = r2.ok || r2.status === 404;
371
+ console.log(succeeded ? 'gone (after retry)' : `FAILED ${r2.status}`);
372
+ } else {
373
+ console.log(`FAILED ${r.status}`);
374
+ }
375
+ if (!succeeded) deleteFailures++;
376
+ await sleep(400);
377
+ }
378
+ } else {
379
+ console.log(' No cloud-only top-level entries to delete.');
380
+ }
381
+
382
+ // 4. Walk local + bulk-push every file
383
+ const localFiles = computeLocalHashes(localDir);
384
+ const localPaths = Object.keys(localFiles);
385
+ console.log(` Pushing ${localPaths.length} local files to cloud...`);
386
+
387
+ const fileObjs = [];
388
+ for (const p of localPaths) {
389
+ try {
390
+ const content = fs.readFileSync(path.join(localDir, p.replace(/^\//, '')), 'utf-8');
391
+ fileObjs.push({ path: p, content });
392
+ } catch {
393
+ // skip unreadable
394
+ }
395
+ }
396
+
397
+ // Batch size is intentionally small. The /sync endpoint writes files
398
+ // sequentially on the server (warm runner + DB) so 50 files easily blows
399
+ // past the default 30s HTTP timeout. 10 files per batch gives the server
400
+ // ~3s/file headroom and keeps individual batch latency well under 30s.
401
+ const BATCH = 10;
402
+ let written = 0;
403
+ let batchFailures = 0;
404
+ let perFileErrors = 0;
405
+ const failedPaths = [];
406
+ for (let i = 0; i < fileObjs.length; i += BATCH) {
407
+ const batch = fileObjs.slice(i, i + BATCH);
408
+ const r = await pushFiles(token, businessId, workspaceId, batch, 180000);
409
+ if (r.ok) {
410
+ // The server returns SyncResponse{written, unchanged, errors, results}
411
+ // inside a 200. A successful HTTP doesn't mean every file landed —
412
+ // results can include status="error" entries (path-traversal attempts,
413
+ // permission errors, full disk, etc). Trust the server's count and
414
+ // record any per-file errors against batchFailures so we don't write
415
+ // a manifest claiming files exist on cloud when they actually don't.
416
+ const data = r.data || {};
417
+ const serverWritten = (typeof data.written === 'number') ? data.written : 0;
418
+ const serverUnchanged = (typeof data.unchanged === 'number') ? data.unchanged : 0;
419
+ const serverErrors = (typeof data.errors === 'number') ? data.errors : 0;
420
+ written += serverWritten + serverUnchanged;
421
+ if (serverErrors > 0) {
422
+ perFileErrors += serverErrors;
423
+ for (const entry of (data.results || [])) {
424
+ if (entry && entry.status === 'error') {
425
+ failedPaths.push({ path: entry.path, error: entry.error || '' });
426
+ }
427
+ }
428
+ console.log(` [${Math.min(i + BATCH, fileObjs.length)}/${fileObjs.length}] partial: ${serverWritten + serverUnchanged} ok, ${serverErrors} errored`);
429
+ } else {
430
+ console.log(` [${Math.min(i + BATCH, fileObjs.length)}/${fileObjs.length}] pushed`);
431
+ }
432
+ } else {
433
+ batchFailures++;
434
+ console.log(` [${Math.min(i + BATCH, fileObjs.length)}/${fileObjs.length}] FAILED ${r.status} ${r.error || ''}`);
435
+ }
436
+ await sleep(500);
437
+ }
438
+ if (perFileErrors > 0) {
439
+ console.log(` ⚠ ${perFileErrors} per-file error(s) returned by server:`);
440
+ failedPaths.slice(0, 8).forEach((f) => console.log(` x ${f.path.replace(/^\//, '')} ${f.error}`));
441
+ if (failedPaths.length > 8) console.log(` ... +${failedPaths.length - 8} more`);
442
+ }
443
+
444
+ // 5. Save manifest so subsequent push/pull diffs work — but ONLY if every
445
+ // batch, every per-file write, AND every cloud delete succeeded. If
446
+ // anything failed, leaving the manifest stale is safer than recording
447
+ // a mirror that doesn't actually exist on cloud (the manifest would
448
+ // then mask undeleted cruft and let regular push think everything is
449
+ // fine).
450
+ console.log('');
451
+ if (batchFailures > 0 || deleteFailures > 0 || perFileErrors > 0) {
452
+ const parts = [];
453
+ if (batchFailures > 0) parts.push(`${batchFailures} push batch failure(s)`);
454
+ if (perFileErrors > 0) parts.push(`${perFileErrors} per-file write error(s)`);
455
+ if (deleteFailures > 0) parts.push(`${deleteFailures} cloud delete failure(s)`);
456
+ console.log(` ⚠ Force-push partial: ${written}/${fileObjs.length} pushed, ${toDelete.length - deleteFailures}/${toDelete.length} cloud-only entries deleted, ${parts.join(', ')}.`);
457
+ console.log(` Manifest NOT updated. Re-run \`atris align ${resolvedSlug} --fix --hard\` to retry.`);
458
+ process.exit(1);
459
+ }
460
+ saveManifest(resolvedSlug, buildManifest(localFiles, null));
461
+ console.log(` Force-push complete: ${written}/${fileObjs.length} pushed, ${toDelete.length} cloud-only entries deleted.`);
462
+ console.log(` ${businessName} is now mirrored from local.`);
463
+ }
464
+
465
+ async function alignAtris() {
466
+ // Parse args
467
+ let slug = process.argv[3];
468
+ if (!slug || slug.startsWith('-')) {
469
+ const bizFile = path.join(process.cwd(), '.atris', 'business.json');
470
+ if (fs.existsSync(bizFile)) {
471
+ try { slug = JSON.parse(fs.readFileSync(bizFile, 'utf8')).slug; } catch {}
472
+ }
473
+ if (!slug || slug.startsWith('-')) slug = null;
474
+ }
475
+
476
+ if (!slug || slug === '--help' || slug === '-h') {
477
+ console.log('Usage: atris align [business] [--fix] [--hard] [--from cloud|local] [--dry-run]');
478
+ console.log('');
479
+ console.log(' atris align Diff current workspace against cloud (auto-detect)');
480
+ console.log(' atris align pallet Diff pallet workspace');
481
+ console.log(' atris align pallet --fix Fix drift (local is canonical by default)');
482
+ console.log(' atris align pallet --fix --hard Force-push: nuke cloud cruft, upload local. Skips diff. Fast.');
483
+ console.log(' atris align pallet --fix --from cloud Cloud is canonical: pull EC2-only, delete local extras');
484
+ console.log(' atris align pallet --dry-run Show what would change, do nothing');
485
+ process.exit(0);
486
+ }
487
+
488
+ const fix = process.argv.includes('--fix');
489
+ const dryRun = process.argv.includes('--dry-run');
490
+ const hard = process.argv.includes('--hard');
491
+ const fromIdx = process.argv.indexOf('--from');
492
+ const fromSide = (fromIdx !== -1 && process.argv[fromIdx + 1]) ? process.argv[fromIdx + 1] : 'local';
493
+
494
+ if (!['local', 'cloud'].includes(fromSide)) {
495
+ console.error(`Invalid --from value: ${fromSide}. Use 'local' or 'cloud'.`);
496
+ process.exit(1);
497
+ }
498
+
499
+ if (hard && fromSide !== 'local') {
500
+ console.error('--hard only supported with --from local (the canonical force-push direction).');
501
+ process.exit(1);
502
+ }
503
+
504
+ // Determine local dir
505
+ let localDir;
506
+ const bizFileCwd = path.join(process.cwd(), '.atris', 'business.json');
507
+ if (fs.existsSync(bizFileCwd)) {
508
+ localDir = process.cwd();
509
+ } else if (fs.existsSync(path.join(process.cwd(), slug))) {
510
+ localDir = path.join(process.cwd(), slug);
511
+ } else {
512
+ console.error(`Cannot find local workspace for "${slug}".`);
513
+ console.error('Run from inside the workspace, or pass a slug whose folder exists in cwd.');
514
+ process.exit(1);
515
+ }
516
+
517
+ const creds = loadCredentials();
518
+ if (!creds || !creds.token) { console.error('Not logged in. Run: atris login'); process.exit(1); }
519
+
520
+ console.log('');
521
+ console.log(`Aligning ${slug}...`);
522
+
523
+ // Resolve business
524
+ const biz = await resolveBusiness(creds.token, slug);
525
+ if (!biz) { console.error(`Business "${slug}" not found.`); process.exit(1); }
526
+ if (!biz.workspaceId) { console.error(`Business "${slug}" has no workspace.`); process.exit(1); }
527
+
528
+ // Wake EC2 (the rule)
529
+ const endpoint = await ensureAwake(creds.token, biz.businessId);
530
+ if (!endpoint) {
531
+ console.error(' EC2 computer did not become ready in time. Aborting.');
532
+ process.exit(1);
533
+ }
534
+
535
+ // --hard: skip the slow file-by-file walk. Nuke top-level cloud entries
536
+ // not present locally (1 DELETE per top-level dir, recursive on the server),
537
+ // then bulk-push every local file via /sync. Designed for "cloud is bloated,
538
+ // local is canonical, just make them match" — the force-push escape hatch.
539
+ if (hard && fix) {
540
+ console.log(` Local: ${localDir}`);
541
+ console.log(` Cloud: ${endpoint}`);
542
+ console.log(' Mode: --hard (force-push: wipe cloud cruft, upload local)');
543
+ return alignHardLocalToCloud(creds.token, biz, localDir);
544
+ }
545
+
546
+ // Walk both sides in parallel
547
+ console.log(` Local: ${localDir}`);
548
+ console.log(` Cloud: ${endpoint}`);
549
+ process.stdout.write(' Walking local + cloud...');
550
+
551
+ const localFiles = walkLocal(localDir);
552
+ const { files: cloudFiles, errors: walkErrors } = await walkCloud(
553
+ creds.token, biz.businessId, biz.workspaceId
554
+ );
555
+
556
+ console.log(` local=${Object.keys(localFiles).length} cloud=${Object.keys(cloudFiles).length}`);
557
+ if (walkErrors.length > 0) {
558
+ console.log(` ⚠ ${walkErrors.length} walk errors (some directories not read)`);
559
+ }
560
+
561
+ // Diff (path-based first; cloud /files doesn't always return hashes)
562
+ const localPaths = new Set(Object.keys(localFiles));
563
+ const cloudPaths = new Set(Object.keys(cloudFiles));
564
+ const onlyLocal = [...localPaths].filter((p) => !cloudPaths.has(p)).sort();
565
+ const onlyCloud = [...cloudPaths].filter((p) => !localPaths.has(p)).sort();
566
+ const both = [...localPaths].filter((p) => cloudPaths.has(p));
567
+
568
+ // Hash check for files that exist on both sides (only if cloud has hashes)
569
+ const hashMismatches = [];
570
+ for (const p of both) {
571
+ const cHash = cloudFiles[p].hash;
572
+ if (cHash && cHash !== localFiles[p]) hashMismatches.push(p);
573
+ }
574
+
575
+ // Report
576
+ console.log('');
577
+ console.log(` Match: ${both.length - hashMismatches.length}`);
578
+ console.log(` Hash differ: ${hashMismatches.length}`);
579
+ console.log(` Only local: ${onlyLocal.length}`);
580
+ console.log(` Only cloud: ${onlyCloud.length}`);
581
+ console.log('');
582
+
583
+ if (onlyLocal.length === 0 && onlyCloud.length === 0 && hashMismatches.length === 0) {
584
+ console.log(' ✓ Aligned. No drift.');
585
+ return;
586
+ }
587
+
588
+ // Show samples (cap at 10 each so we don't drown the terminal)
589
+ if (onlyLocal.length > 0) {
590
+ console.log(` Only on local (${onlyLocal.length}):`);
591
+ onlyLocal.slice(0, 10).forEach((p) => console.log(` + ${p}`));
592
+ if (onlyLocal.length > 10) console.log(` ... +${onlyLocal.length - 10} more`);
593
+ console.log('');
594
+ }
595
+ if (onlyCloud.length > 0) {
596
+ console.log(` Only on cloud (${onlyCloud.length}):`);
597
+ onlyCloud.slice(0, 10).forEach((p) => console.log(` + ${p}`));
598
+ if (onlyCloud.length > 10) console.log(` ... +${onlyCloud.length - 10} more`);
599
+ console.log('');
600
+ }
601
+ if (hashMismatches.length > 0) {
602
+ console.log(` Content differs (${hashMismatches.length}):`);
603
+ hashMismatches.slice(0, 10).forEach((p) => console.log(` ~ ${p}`));
604
+ if (hashMismatches.length > 10) console.log(` ... +${hashMismatches.length - 10} more`);
605
+ console.log('');
606
+ }
607
+
608
+ if (dryRun) {
609
+ console.log(' (--dry-run, no changes made)');
610
+ return;
611
+ }
612
+
613
+ if (!fix) {
614
+ console.log(' Run with --fix to align. Local is the default canonical side.');
615
+ console.log(` Use --from cloud to make cloud the canonical side instead.`);
616
+ return;
617
+ }
618
+
619
+ // FIX MODE
620
+ console.log(` Fixing — ${fromSide} is canonical:`);
621
+
622
+ if (fromSide === 'local') {
623
+ // Delete cloud-only files (they're cruft)
624
+ if (onlyCloud.length > 0) {
625
+ console.log(` Deleting ${onlyCloud.length} cloud-only files...`);
626
+ let deleted = 0, failed = 0;
627
+ for (let i = 0; i < onlyCloud.length; i++) {
628
+ const p = onlyCloud[i];
629
+ await sleep(700); // throttle to stay under 60/min
630
+ const r = await deleteCloudFile(creds.token, biz.businessId, biz.workspaceId, p);
631
+ if (r.ok) {
632
+ deleted++;
633
+ } else if (r.status === 429) {
634
+ await sleep(20000);
635
+ const r2 = await deleteCloudFile(creds.token, biz.businessId, biz.workspaceId, p);
636
+ if (r2.ok) deleted++; else failed++;
637
+ } else {
638
+ failed++;
639
+ }
640
+ if ((i + 1) % 25 === 0) console.log(` [${i + 1}/${onlyCloud.length}] ${deleted} deleted, ${failed} failed`);
641
+ }
642
+ console.log(` Deleted ${deleted}/${onlyCloud.length} (${failed} failed)`);
643
+ }
644
+
645
+ // Push local-only + hash-mismatched files
646
+ const toPush = [...onlyLocal, ...hashMismatches];
647
+ if (toPush.length > 0) {
648
+ console.log(` Pushing ${toPush.length} local files to cloud...`);
649
+ const fileObjs = [];
650
+ for (const p of toPush) {
651
+ try {
652
+ const content = fs.readFileSync(path.join(localDir, p), 'utf-8');
653
+ fileObjs.push({ path: '/' + p, content });
654
+ } catch {}
655
+ }
656
+ // Push in batches of 50 to avoid huge payloads
657
+ const BATCH = 50;
658
+ let written = 0;
659
+ for (let i = 0; i < fileObjs.length; i += BATCH) {
660
+ const batch = fileObjs.slice(i, i + BATCH);
661
+ const r = await pushFiles(creds.token, biz.businessId, biz.workspaceId, batch);
662
+ if (r.ok && r.data) {
663
+ written += (r.data.written || batch.length);
664
+ }
665
+ await sleep(500);
666
+ }
667
+ console.log(` Pushed ${written}/${fileObjs.length}`);
668
+ }
669
+ } else {
670
+ // fromSide === 'cloud': pull cloud-only + hash-mismatched, delete local-only
671
+ console.log(` Pulling ${onlyCloud.length + hashMismatches.length} files from cloud...`);
672
+ let pulled = 0;
673
+ for (const p of [...onlyCloud, ...hashMismatches]) {
674
+ await sleep(300);
675
+ const r = await apiRequestJson(
676
+ `/business/${biz.businessId}/workspaces/${biz.workspaceId}/file?path=${encodeURIComponent(p)}`,
677
+ { method: 'GET', token: creds.token }
678
+ );
679
+ if (r.ok && r.data && typeof r.data.content === 'string') {
680
+ const local = path.join(localDir, p);
681
+ try {
682
+ fs.mkdirSync(path.dirname(local), { recursive: true });
683
+ fs.writeFileSync(local, r.data.content);
684
+ pulled++;
685
+ } catch {}
686
+ }
687
+ }
688
+ console.log(` Pulled ${pulled}/${onlyCloud.length + hashMismatches.length}`);
689
+
690
+ if (onlyLocal.length > 0) {
691
+ console.log(` Deleting ${onlyLocal.length} local-only files...`);
692
+ let deleted = 0;
693
+ for (const p of onlyLocal) {
694
+ try { fs.unlinkSync(path.join(localDir, p)); deleted++; } catch {}
695
+ }
696
+ console.log(` Deleted ${deleted}/${onlyLocal.length}`);
697
+ }
698
+ }
699
+
700
+ // Re-walk to verify
701
+ console.log('');
702
+ console.log(' Re-walking to verify...');
703
+ const localFiles2 = walkLocal(localDir);
704
+ const { files: cloudFiles2 } = await walkCloud(
705
+ creds.token, biz.businessId, biz.workspaceId
706
+ );
707
+ const lp2 = new Set(Object.keys(localFiles2));
708
+ const cp2 = new Set(Object.keys(cloudFiles2));
709
+ const stillOnlyLocal = [...lp2].filter((p) => !cp2.has(p)).length;
710
+ const stillOnlyCloud = [...cp2].filter((p) => !lp2.has(p)).length;
711
+
712
+ console.log(` After fix: local=${lp2.size} cloud=${cp2.size} only-local=${stillOnlyLocal} only-cloud=${stillOnlyCloud}`);
713
+ if (stillOnlyLocal === 0 && stillOnlyCloud === 0) {
714
+ console.log(' ✅ Aligned.');
715
+ } else {
716
+ console.log(' ⚠ Drift remains. Run again with --fix or inspect manually.');
717
+ }
718
+ }
719
+
720
+ module.exports = { alignAtris };