@dboio/cli 0.11.4 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/README.md +183 -3
  2. package/bin/dbo.js +6 -0
  3. package/package.json +1 -1
  4. package/plugins/claude/dbo/.claude-plugin/plugin.json +1 -1
  5. package/plugins/claude/dbo/commands/dbo.md +66 -243
  6. package/plugins/claude/dbo/docs/_audit_required/API/all.md +40 -0
  7. package/plugins/claude/dbo/docs/_audit_required/API/app.md +38 -0
  8. package/plugins/claude/dbo/docs/_audit_required/API/athenticate.md +26 -0
  9. package/plugins/claude/dbo/docs/_audit_required/API/cache.md +29 -0
  10. package/plugins/claude/dbo/docs/_audit_required/API/content.md +14 -0
  11. package/plugins/claude/dbo/docs/_audit_required/API/data_source.md +28 -0
  12. package/plugins/claude/dbo/docs/_audit_required/API/email.md +18 -0
  13. package/plugins/claude/dbo/docs/_audit_required/API/input.md +25 -0
  14. package/plugins/claude/dbo/docs/_audit_required/API/instance.md +28 -0
  15. package/plugins/claude/dbo/docs/_audit_required/API/log.md +8 -0
  16. package/plugins/claude/dbo/docs/_audit_required/API/media.md +12 -0
  17. package/plugins/claude/dbo/docs/_audit_required/API/output_by_entity.md +12 -0
  18. package/plugins/claude/dbo/docs/_audit_required/API/upload.md +7 -0
  19. package/plugins/claude/dbo/docs/_audit_required/dbo-api-syntax.md +1487 -0
  20. package/plugins/claude/dbo/docs/_audit_required/dbo-problems-code.md +111 -0
  21. package/plugins/claude/dbo/docs/_audit_required/dbo-problems-performance.md +109 -0
  22. package/plugins/claude/dbo/docs/_audit_required/dbo-problems-syntax.md +97 -0
  23. package/plugins/claude/dbo/docs/_audit_required/dbo-product-market.md +119 -0
  24. package/plugins/claude/dbo/docs/_audit_required/dbo-white-paper.md +125 -0
  25. package/plugins/claude/dbo/docs/dbo-cheat-sheet.md +323 -0
  26. package/plugins/claude/dbo/docs/dbo-cli-readme.md +2279 -0
  27. package/plugins/claude/dbo/docs/dbo-core-entities.md +878 -0
  28. package/plugins/claude/dbo/docs/dbo-output-customsql.md +677 -0
  29. package/plugins/claude/dbo/docs/dbo-output-query.md +967 -0
  30. package/plugins/claude/dbo/skills/cli/SKILL.md +63 -246
  31. package/src/commands/add.js +373 -64
  32. package/src/commands/build.js +102 -0
  33. package/src/commands/clone.js +719 -212
  34. package/src/commands/deploy.js +9 -2
  35. package/src/commands/diff.js +7 -3
  36. package/src/commands/init.js +16 -2
  37. package/src/commands/input.js +3 -1
  38. package/src/commands/login.js +30 -4
  39. package/src/commands/mv.js +28 -7
  40. package/src/commands/push.js +298 -78
  41. package/src/commands/rm.js +21 -6
  42. package/src/commands/run.js +81 -0
  43. package/src/commands/tag.js +65 -0
  44. package/src/lib/config.js +67 -0
  45. package/src/lib/delta.js +7 -1
  46. package/src/lib/deploy-config.js +137 -0
  47. package/src/lib/diff.js +28 -5
  48. package/src/lib/filenames.js +198 -54
  49. package/src/lib/ignore.js +6 -0
  50. package/src/lib/input-parser.js +13 -4
  51. package/src/lib/scaffold.js +1 -1
  52. package/src/lib/scripts.js +232 -0
  53. package/src/lib/tagging.js +380 -0
  54. package/src/lib/toe-stepping.js +2 -1
  55. package/src/migrations/006-remove-uid-companion-filenames.js +181 -0
  56. package/src/migrations/007-natural-entity-companion-filenames.js +165 -0
  57. package/src/migrations/008-metadata-uid-in-suffix.js +70 -0
@@ -1,18 +1,20 @@
1
1
  import { Command } from 'commander';
2
- import { readFile, writeFile, appendFile, mkdir, access, readdir, rename } from 'fs/promises';
2
+ import { readFile, writeFile, appendFile, mkdir, access, readdir, rename, stat } from 'fs/promises';
3
3
  import { join, basename, extname, dirname } from 'path';
4
4
  import { fileURLToPath } from 'url';
5
5
  import { DboClient } from '../lib/client.js';
6
6
  import { loadConfig, updateConfigWithApp, loadClonePlacement, saveClonePlacement, ensureGitignore, saveEntityDirPreference, loadEntityDirPreference, saveEntityContentExtractions, loadEntityContentExtractions, saveAppJsonBaseline, addDeleteEntry, loadCollisionResolutions, saveCollisionResolutions, loadSynchronize, saveSynchronize, saveAppModifyKey, loadTransactionKeyPreset, saveTransactionKeyPreset, loadOutputFilenamePreference, saveOutputFilenamePreference, saveCloneSource, loadCloneSource, saveDescriptorFilenamePreference, loadDescriptorFilenamePreference, saveDescriptorContentExtractions, loadDescriptorContentExtractions, saveExtensionDocumentationMDPlacement, loadExtensionDocumentationMDPlacement } from '../lib/config.js';
7
7
  import { buildBinHierarchy, resolveBinPath, createDirectories, saveStructureFile, findBinByPath, BINS_DIR, DEFAULT_PROJECT_DIRS, SCAFFOLD_DIRS, ENTITY_DIR_NAMES, OUTPUT_ENTITY_MAP, OUTPUT_HIERARCHY_ENTITIES, EXTENSION_DESCRIPTORS_DIR, EXTENSION_UNSUPPORTED_DIR, DOCUMENTATION_DIR, buildDescriptorMapping, saveDescriptorMapping, loadDescriptorMapping, resolveExtensionSubDir, resolveEntityDirPath } from '../lib/structure.js';
8
8
  import { log } from '../lib/logger.js';
9
- import { buildUidFilename, detectLegacyDotUid } from '../lib/filenames.js';
9
+ import { buildUidFilename, buildContentFileName, buildMetaFilename, isMetadataFile, parseMetaFilename, stripUidFromFilename, hasUidInFilename } from '../lib/filenames.js';
10
10
  import { setFileTimestamps, parseServerDate } from '../lib/timestamps.js';
11
- import { getLocalSyncTime, isServerNewer, hasLocalModifications, promptChangeDetection, inlineDiffAndMerge, isDiffable, loadBaselineForComparison, resetBaselineCache } from '../lib/diff.js';
11
+ import { getLocalSyncTime, isServerNewer, hasLocalModifications, promptChangeDetection, inlineDiffAndMerge, isDiffable, loadBaselineForComparison, resetBaselineCache, findMetadataFiles } from '../lib/diff.js';
12
+ import { loadIgnore } from '../lib/ignore.js';
12
13
  import { checkDomainChange } from '../lib/domain-guard.js';
13
- import { applyTrashIcon, ensureTrashIcon } from '../lib/folder-icon.js';
14
+ import { applyTrashIcon, ensureTrashIcon, tagProjectFiles } from '../lib/tagging.js';
14
15
  import { loadMetadataTemplates, saveMetadataTemplates, getTemplateCols, setTemplateCols, buildTemplateFromCloneRecord } from '../lib/metadata-templates.js';
15
16
  import { runPendingMigrations } from '../lib/migrations.js';
17
+ import { upsertDeployEntry } from '../lib/deploy-config.js';
16
18
 
17
19
  /**
18
20
  * Resolve a column value that may be base64-encoded.
@@ -43,6 +45,143 @@ function isWillDeleteFile(filename) {
43
45
  return basename(filename).startsWith(WILL_DELETE_PREFIX);
44
46
  }
45
47
 
48
+ /**
49
+ * Collect all UIDs from the freshly fetched app JSON.
50
+ * Traverses all entity types in appJson.children.
51
+ *
52
+ * @param {object} appJson - Parsed app JSON with children map
53
+ * @returns {Set<string>} Set of all UIDs present on the server
54
+ */
55
+ export function collectServerUids(appJson) {
56
+ const uids = new Set();
57
+ if (!appJson?.children || typeof appJson.children !== 'object') return uids;
58
+
59
+ for (const [entityName, entries] of Object.entries(appJson.children)) {
60
+ if (!Array.isArray(entries)) continue;
61
+ for (const entry of entries) {
62
+ if (entry && typeof entry === 'object' && entry.UID) {
63
+ uids.add(String(entry.UID));
64
+ }
65
+ // Output hierarchy: collect nested child UIDs too
66
+ if (entry && typeof entry === 'object' && entry.children) {
67
+ for (const childArr of Object.values(entry.children)) {
68
+ if (!Array.isArray(childArr)) continue;
69
+ for (const child of childArr) {
70
+ if (child && typeof child === 'object' && child.UID) {
71
+ uids.add(String(child.UID));
72
+ }
73
+ }
74
+ }
75
+ }
76
+ }
77
+ }
78
+
79
+ return uids;
80
+ }
81
+
82
+ /**
83
+ * Detect local metadata files whose UIDs are absent from the server response,
84
+ * then move them and their companion files to trash/.
85
+ *
86
+ * @param {object} appJson - Fresh app JSON from server
87
+ * @param {import('ignore').Ignore} ig - Ignore instance for findMetadataFiles
88
+ * @param {object} sync - Parsed synchronize.json { delete, edit, add }
89
+ * @param {object} options - Clone options ({ entityFilter?, verbose? })
90
+ */
91
+ export async function detectAndTrashOrphans(appJson, ig, sync, options) {
92
+ if (options.entityFilter) return;
93
+ if (!appJson?.children) return;
94
+
95
+ const serverUids = collectServerUids(appJson);
96
+ if (serverUids.size === 0) return;
97
+
98
+ // UIDs already queued for deletion in synchronize.json
99
+ const stagedDeleteUids = new Set(
100
+ (sync.delete || []).map(e => e.UID).filter(Boolean).map(String)
101
+ );
102
+
103
+ const metaFiles = await findMetadataFiles(process.cwd(), ig);
104
+ if (metaFiles.length === 0) return;
105
+
106
+ const trashDir = join(process.cwd(), 'trash');
107
+ const orphans = [];
108
+
109
+ for (const metaPath of metaFiles) {
110
+ let meta;
111
+ try {
112
+ meta = JSON.parse(await readFile(metaPath, 'utf8'));
113
+ } catch {
114
+ continue;
115
+ }
116
+
117
+ if (!meta.UID) continue;
118
+
119
+ const uid = String(meta.UID);
120
+ if (stagedDeleteUids.has(uid)) continue;
121
+ if (serverUids.has(uid)) continue;
122
+
123
+ // Orphan — collect files to move
124
+ const metaDir = dirname(metaPath);
125
+ const filesToMove = [metaPath];
126
+
127
+ for (const col of (meta._contentColumns || [])) {
128
+ const ref = meta[col];
129
+ if (ref && String(ref).startsWith('@')) {
130
+ const refName = String(ref).substring(1);
131
+ const companionPath = refName.startsWith('/')
132
+ ? join(process.cwd(), refName)
133
+ : join(metaDir, refName);
134
+ if (await fileExists(companionPath)) {
135
+ filesToMove.push(companionPath);
136
+ }
137
+ }
138
+ }
139
+
140
+ if (meta._mediaFile && String(meta._mediaFile).startsWith('@')) {
141
+ const refName = String(meta._mediaFile).substring(1);
142
+ const mediaPath = refName.startsWith('/')
143
+ ? join(process.cwd(), refName)
144
+ : join(metaDir, refName);
145
+ if (await fileExists(mediaPath)) {
146
+ filesToMove.push(mediaPath);
147
+ }
148
+ }
149
+
150
+ orphans.push({ metaPath, uid, entity: meta._entity || 'unknown', filesToMove });
151
+ }
152
+
153
+ if (orphans.length === 0) return;
154
+
155
+ await mkdir(trashDir, { recursive: true });
156
+
157
+ let trashed = 0;
158
+
159
+ for (const { metaPath, uid, entity, filesToMove } of orphans) {
160
+ log.dim(` Trashed: ${basename(metaPath)} (${entity}:${uid})`);
161
+
162
+ for (const filePath of filesToMove) {
163
+ const destBase = basename(filePath);
164
+ let destPath = join(trashDir, destBase);
165
+
166
+ // Collision: append timestamp suffix (same pattern as moveWillDeleteToTrash in push.js)
167
+ try { await stat(destPath); destPath = `${destPath}.${Date.now()}`; } catch {}
168
+
169
+ try {
170
+ await rename(filePath, destPath);
171
+ trashed++;
172
+ } catch (err) {
173
+ log.warn(` Could not trash: ${filePath} — ${err.message}`);
174
+ }
175
+ }
176
+ }
177
+
178
+ if (trashed > 0) {
179
+ await ensureTrashIcon(trashDir);
180
+ log.plain('');
181
+ log.warn(`Moved ${orphans.length} orphaned record(s) to trash (deleted on server)`);
182
+ }
183
+ }
184
+
46
185
  /**
47
186
  * Resolve a content Path to a directory under Bins/.
48
187
  *
@@ -144,9 +283,10 @@ export function resolveRecordPaths(entityName, record, structure, placementPref)
144
283
  }
145
284
 
146
285
  const uid = String(record.UID || record._id || 'untitled');
147
- const base = buildUidFilename(name, uid);
148
- const filename = ext ? `${base}.${ext}` : base;
149
- const metaPath = join(dir, `${base}.metadata.json`);
286
+ // Companion: natural name, no UID
287
+ const filename = sanitizeFilename(buildContentFileName(record, uid));
288
+ // Metadata: name.metadata~uid.json
289
+ const metaPath = join(dir, buildMetaFilename(name, uid));
150
290
 
151
291
  return { dir, filename, metaPath };
152
292
  }
@@ -156,9 +296,15 @@ export function resolveRecordPaths(entityName, record, structure, placementPref)
156
296
  * Replicates logic from processMediaEntries() for collision detection.
157
297
  */
158
298
  export function resolveMediaPaths(record, structure) {
159
- const filename = record.Filename || `${record.Name || record.UID}.${(record.Extension || 'bin').toLowerCase()}`;
160
- const name = sanitizeFilename(filename.replace(/\.[^.]+$/, ''));
161
- const ext = (record.Extension || 'bin').toLowerCase();
299
+ // Companion: use Filename column directly (natural name, no UID)
300
+ const companionFilename = sanitizeFilename(
301
+ record.Filename
302
+ || `${record.Name || record.UID}.${(record.Extension || 'bin').toLowerCase()}`
303
+ );
304
+
305
+ // Base name and ext for metadata naming
306
+ const name = sanitizeFilename(companionFilename.replace(/\.[^.]+$/, ''));
307
+ const ext = (record.Extension || extname(companionFilename).substring(1) || 'bin').toLowerCase();
162
308
 
163
309
  // Always place media by BinID; fall back to bins/ root
164
310
  let dir = BINS_DIR;
@@ -169,17 +315,20 @@ export function resolveMediaPaths(record, structure) {
169
315
  dir = dir.replace(/^\/+|\/+$/g, '');
170
316
  if (!dir) dir = BINS_DIR;
171
317
 
318
+ // Metadata: name.ext.metadata~uid.json
172
319
  const uid = String(record.UID || record._id || 'untitled');
173
- const base = buildUidFilename(name, uid);
174
- const finalFilename = `${base}.${ext}`;
175
- const metaPath = join(dir, `${finalFilename}.metadata.json`);
320
+ const naturalMediaBase = `${name}.${ext}`;
321
+ const metaPath = join(dir, buildMetaFilename(naturalMediaBase, uid));
176
322
 
177
- return { dir, filename: finalFilename, metaPath };
323
+ return { dir, filename: companionFilename, metaPath };
178
324
  }
179
325
 
180
326
  /**
181
327
  * Extract path components for entity-dir records.
182
328
  * Simplified from processEntityDirEntries() for collision detection.
329
+ *
330
+ * Returns `name` (natural, no ~UID) for companion files and
331
+ * `metaPath` (with ~UID) for the metadata file.
183
332
  */
184
333
  export function resolveEntityDirPaths(entityName, record, dirName) {
185
334
  let name;
@@ -192,9 +341,226 @@ export function resolveEntityDirPaths(entityName, record, dirName) {
192
341
  }
193
342
 
194
343
  const uid = record.UID || 'untitled';
195
- const finalName = buildUidFilename(name, uid);
196
- const metaPath = join(dirName, `${finalName}.metadata.json`);
197
- return { dir: dirName, filename: finalName, metaPath };
344
+ const metaPath = join(dirName, buildMetaFilename(name, uid));
345
+ return { dir: dirName, name, metaPath };
346
+ }
347
+
348
+ /**
349
+ * Resolve companion filename collisions within a shared BinID directory.
350
+ *
351
+ * Mutates the `filename` field in each entry to apply collision suffixes:
352
+ * - Content vs media same name → media gets "(media)" before extension
353
+ * - Same entity type duplicates → 2nd+ get "-1", "-2", ... suffix
354
+ *
355
+ * Companion files never contain ~UID. The metadata @reference stores the
356
+ * collision-suffixed name, so dbo rm/push/diff find metadata via @reference
357
+ * scan rather than filename derivation.
358
+ *
359
+ * @param {Array<{entity: string, uid: string, filename: string, dir: string}>} entries
360
+ */
361
+ export function resolveFilenameCollisions(entries) {
362
+ // Group by dir + filename
363
+ const byPath = new Map();
364
+ for (const entry of entries) {
365
+ const key = `${entry.dir}/${entry.filename}`;
366
+ if (!byPath.has(key)) byPath.set(key, []);
367
+ byPath.get(key).push(entry);
368
+ }
369
+
370
+ for (const group of byPath.values()) {
371
+ if (group.length <= 1) continue;
372
+
373
+ const contentGroup = group.filter(e => e.entity === 'content');
374
+ const mediaGroup = group.filter(e => e.entity === 'media');
375
+
376
+ // Content wins: media gets (media) suffix
377
+ if (contentGroup.length > 0 && mediaGroup.length > 0) {
378
+ for (const m of mediaGroup) {
379
+ const ext = extname(m.filename);
380
+ const base = basename(m.filename, ext);
381
+ m.filename = `${base}(media)${ext}`;
382
+ }
383
+ }
384
+
385
+ // Same-entity duplicates: -1, -2, ... suffix (by insertion order)
386
+ for (const sameType of [contentGroup, mediaGroup]) {
387
+ if (sameType.length <= 1) continue;
388
+ for (let i = 1; i < sameType.length; i++) {
389
+ const ext = extname(sameType[i].filename);
390
+ const base = basename(sameType[i].filename, ext);
391
+ sameType[i].filename = `${base}-${i}${ext}`;
392
+ }
393
+ }
394
+ }
395
+ }
396
+
397
+ /**
398
+ * If companion files on disk still use legacy ~UID naming,
399
+ * rename them to the natural filename and update @references in metadata.
400
+ *
401
+ * Handles two cases:
402
+ * A) @reference itself contains ~UID (e.g. "@colors~uid.css") → strip ~UID,
403
+ * rename file, update @reference in metadata, rewrite metadata file.
404
+ * B) @reference is already natural but file on disk has ~UID → just rename file.
405
+ *
406
+ * Returns true if any metadata @references were updated (caller should NOT
407
+ * overwrite metaPath after this — it's already been rewritten).
408
+ */
409
+ async function detectAndRenameLegacyCompanions(metaPath, meta) {
410
+ const uid = meta.UID;
411
+ if (!uid) return false;
412
+
413
+ const metaDir = dirname(metaPath);
414
+ const contentCols = [...(meta._contentColumns || [])];
415
+ if (meta._mediaFile) contentCols.push('_mediaFile');
416
+ let metaChanged = false;
417
+
418
+ for (const col of contentCols) {
419
+ const ref = meta[col];
420
+ if (!ref || !String(ref).startsWith('@')) continue;
421
+
422
+ const refName = String(ref).substring(1);
423
+ // Resolve paths: @/ references are root-relative, others are metaDir-relative
424
+ const resolveRef = (name) => name.startsWith('/')
425
+ ? join(process.cwd(), name)
426
+ : join(metaDir, name);
427
+
428
+ // Case A: @reference itself contains ~UID — strip it
429
+ if (hasUidInFilename(refName, uid)) {
430
+ const naturalName = stripUidFromFilename(refName, uid);
431
+ const legacyPath = resolveRef(refName);
432
+ const naturalPath = resolveRef(naturalName);
433
+ const legacyExists = await fileExists(legacyPath);
434
+ const naturalExists = await fileExists(naturalPath);
435
+
436
+ if (legacyExists && !naturalExists) {
437
+ // Rename legacy → natural
438
+ try {
439
+ await mkdir(dirname(naturalPath), { recursive: true });
440
+ await rename(legacyPath, naturalPath);
441
+ log.dim(` Legacy companion renamed: ${basename(legacyPath)} → ${basename(naturalPath)}`);
442
+ } catch { /* rename failed */ }
443
+ } else if (legacyExists && naturalExists) {
444
+ // Both exist (clone downloaded fresh copy) — move orphaned legacy file to trash
445
+ try {
446
+ const trashDir = join(process.cwd(), 'trash');
447
+ await mkdir(trashDir, { recursive: true });
448
+ await rename(legacyPath, join(trashDir, basename(legacyPath)));
449
+ await ensureTrashIcon(trashDir);
450
+ log.dim(` Trashed orphan legacy file: ${basename(legacyPath)}`);
451
+ } catch { /* non-critical */ }
452
+ }
453
+
454
+ // Update @reference regardless (even if file rename failed, fix the reference)
455
+ meta[col] = `@${naturalName}`;
456
+ metaChanged = true;
457
+ continue;
458
+ }
459
+
460
+ // Case B: @reference is natural but file on disk might still have ~UID
461
+ const naturalPath = resolveRef(refName);
462
+ if (await fileExists(naturalPath)) continue;
463
+
464
+ const ext = extname(refName);
465
+ const base = basename(refName, ext);
466
+ const legacyName = ext ? `${base}~${uid}${ext}` : `${base}~${uid}`;
467
+ const legacyPath = resolveRef(legacyName);
468
+
469
+ if (await fileExists(legacyPath)) {
470
+ try {
471
+ await mkdir(dirname(naturalPath), { recursive: true });
472
+ await rename(legacyPath, naturalPath);
473
+ log.dim(` Legacy companion renamed: ${basename(legacyPath)} → ${basename(naturalPath)}`);
474
+ } catch { /* rename failed */ }
475
+ }
476
+ }
477
+
478
+ // Rewrite metadata file if @references were updated
479
+ if (metaChanged) {
480
+ try {
481
+ await writeFile(metaPath, JSON.stringify(meta, null, 2) + '\n');
482
+ } catch { /* non-critical */ }
483
+ }
484
+
485
+ return metaChanged;
486
+ }
487
+
488
+ /**
489
+ * Scan all directories under Bins/ for orphaned legacy ~UID companion files
490
+ * that no metadata @reference points to, and move them to trash/.
491
+ *
492
+ * A file is considered an orphan if:
493
+ * - It contains ~ in its name (potential legacy ~UID naming)
494
+ * - It's NOT a .metadata.json file
495
+ * - No .metadata.json in the same directory has an @reference pointing to it
496
+ */
497
+ async function trashOrphanedLegacyCompanions() {
498
+ const binsDir = join(process.cwd(), BINS_DIR);
499
+ if (!await fileExists(binsDir)) return;
500
+
501
+ // Collect all @references from all metadata files, and all non-metadata files with ~
502
+ const referencedFiles = new Set(); // Set of absolute paths referenced by metadata
503
+ const tildeFiles = []; // non-metadata files containing ~
504
+
505
+ async function scan(dir) {
506
+ let entries;
507
+ try { entries = await readdir(dir, { withFileTypes: true }); } catch { return; }
508
+
509
+ for (const entry of entries) {
510
+ if (entry.name.startsWith('.')) continue;
511
+ const full = join(dir, entry.name);
512
+
513
+ if (entry.isDirectory()) {
514
+ await scan(full);
515
+ continue;
516
+ }
517
+
518
+ if (isMetadataFile(entry.name)) {
519
+ // Read metadata and collect all @references
520
+ try {
521
+ const meta = JSON.parse(await readFile(full, 'utf8'));
522
+ const cols = [...(meta._contentColumns || [])];
523
+ if (meta._mediaFile) cols.push('_mediaFile');
524
+ for (const col of cols) {
525
+ const ref = meta[col];
526
+ if (ref && String(ref).startsWith('@')) {
527
+ const refName = String(ref).substring(1);
528
+ // Handle both relative and @/ (root-relative) references
529
+ if (refName.startsWith('/')) {
530
+ referencedFiles.add(join(process.cwd(), refName));
531
+ } else {
532
+ referencedFiles.add(join(dir, refName));
533
+ }
534
+ }
535
+ }
536
+ } catch { /* skip unreadable metadata */ }
537
+ } else if (entry.name.includes('~')) {
538
+ tildeFiles.push(full);
539
+ }
540
+ }
541
+ }
542
+
543
+ await scan(binsDir);
544
+
545
+ // Filter to orphans: ~ files not referenced by any metadata
546
+ const orphans = tildeFiles.filter(f => !referencedFiles.has(f));
547
+ if (orphans.length === 0) return;
548
+
549
+ const trashDir = join(process.cwd(), 'trash');
550
+ await mkdir(trashDir, { recursive: true });
551
+ let trashed = 0;
552
+
553
+ for (const orphan of orphans) {
554
+ try {
555
+ await rename(orphan, join(trashDir, basename(orphan)));
556
+ trashed++;
557
+ } catch { /* non-critical */ }
558
+ }
559
+
560
+ if (trashed > 0) {
561
+ await ensureTrashIcon(trashDir);
562
+ log.dim(` Trashed ${trashed} orphaned legacy ~UID companion file(s)`);
563
+ }
198
564
  }
199
565
 
200
566
  /**
@@ -204,30 +570,39 @@ export function resolveEntityDirPaths(entityName, record, dirName) {
204
570
  async function buildFileRegistry(appJson, structure, placementPrefs) {
205
571
  const registry = new Map();
206
572
 
207
- function addToRegistry(filePath, entity, record, dir, filename, metaPath) {
208
- if (!registry.has(filePath)) {
209
- registry.set(filePath, []);
210
- }
211
- registry.get(filePath).push({ entity, record, dir, filename, metaPath });
212
- }
573
+ // Collect all entries first for collision resolution
574
+ const allEntries = [];
213
575
 
214
576
  // Process content records
215
577
  for (const record of (appJson.children.content || [])) {
216
578
  const { dir, filename, metaPath } = resolveRecordPaths(
217
579
  'content', record, structure, placementPrefs.contentPlacement
218
580
  );
219
- addToRegistry(join(dir, filename), 'content', record, dir, filename, metaPath);
581
+ allEntries.push({ entity: 'content', uid: record.UID, record, dir, filename, metaPath });
220
582
  }
221
583
 
222
584
  // Process media records
223
585
  for (const record of (appJson.children.media || [])) {
224
586
  const { dir, filename, metaPath } = resolveMediaPaths(record, structure);
225
- addToRegistry(join(dir, filename), 'media', record, dir, filename, metaPath);
587
+ allEntries.push({ entity: 'media', uid: record.UID, record, dir, filename, metaPath });
226
588
  }
227
589
 
228
- // Note: entity-dir records (Extensions/, Data Sources/, etc.) and generic entities
229
- // are excluded from collision detection they use UID-based naming to handle duplicates.
230
- // Only content and media records in Bins/ are checked for cross-entity collisions.
590
+ // Auto-resolve content vs media collisions and same-entity duplicates
591
+ // (content wins, media gets "(media)" suffix; same-entity duplicates get "-N" suffix)
592
+ resolveFilenameCollisions(allEntries);
593
+
594
+ // Build registry with resolved filenames
595
+ for (const entry of allEntries) {
596
+ const filePath = join(entry.dir, entry.filename);
597
+ if (!registry.has(filePath)) {
598
+ registry.set(filePath, []);
599
+ }
600
+ registry.get(filePath).push(entry);
601
+ }
602
+
603
+ // Note: entity-dir records (Extensions/, Data Sources/, etc.) use natural names for
604
+ // companion files with -N suffix collision resolution handled inline during processing.
605
+ // Only content and media records in Bins/ are checked for cross-entity collisions here.
231
606
 
232
607
  return registry;
233
608
  }
@@ -803,6 +1178,7 @@ export async function performClone(source, options = {}) {
803
1178
 
804
1179
  // Step 4c: Detect and resolve file path collisions (skip in pull mode and entity-filter mode)
805
1180
  let toDeleteUIDs = new Set();
1181
+ let resolvedFilenames = new Map(); // UID → resolved filename (after collision resolution)
806
1182
  if (!options.pullMode && !entityFilter) {
807
1183
  log.info('Scanning for file path collisions...');
808
1184
  const fileRegistry = await buildFileRegistry(appJson, structure, placementPrefs);
@@ -811,6 +1187,15 @@ export async function performClone(source, options = {}) {
811
1187
  if (toDeleteUIDs.size > 0) {
812
1188
  await stageCollisionDeletions(toDeleteUIDs, appJson, options);
813
1189
  }
1190
+
1191
+ // Build UID → filename map from the collision-resolved registry
1192
+ for (const entries of fileRegistry.values()) {
1193
+ for (const entry of entries) {
1194
+ if (entry.record.UID) {
1195
+ resolvedFilenames.set(entry.record.UID, entry.filename);
1196
+ }
1197
+ }
1198
+ }
814
1199
  }
815
1200
 
816
1201
  // Pre-load previous baseline for fast _LastUpdated string comparison in isServerNewer.
@@ -827,6 +1212,7 @@ export async function performClone(source, options = {}) {
827
1212
  placementPrefs.contentPlacement,
828
1213
  serverTz,
829
1214
  toDeleteUIDs,
1215
+ resolvedFilenames,
830
1216
  );
831
1217
  }
832
1218
 
@@ -840,7 +1226,7 @@ export async function performClone(source, options = {}) {
840
1226
  if (!entityFilter || entityFilter.has('media')) {
841
1227
  const mediaEntries = appJson.children.media || [];
842
1228
  if (mediaEntries.length > 0) {
843
- mediaRefs = await processMediaEntries(mediaEntries, structure, options, config, appJson.ShortName, serverTz, toDeleteUIDs);
1229
+ mediaRefs = await processMediaEntries(mediaEntries, structure, options, config, appJson.ShortName, serverTz, toDeleteUIDs, resolvedFilenames);
844
1230
  }
845
1231
  }
846
1232
 
@@ -894,6 +1280,19 @@ export async function performClone(source, options = {}) {
894
1280
  resetBaselineCache(); // invalidate so next operation reloads the fresh baseline
895
1281
  }
896
1282
 
1283
+ // Step 8.5: Detect and trash orphaned local records (deleted on server)
1284
+ if (!entityFilter) {
1285
+ const ig = await loadIgnore();
1286
+ const sync = await loadSynchronize();
1287
+ await detectAndTrashOrphans(appJson, ig, sync, { ...options, entityFilter });
1288
+ }
1289
+
1290
+ // Step 9: Trash orphaned legacy ~UID companion files that no metadata references
1291
+ await trashOrphanedLegacyCompanions();
1292
+
1293
+ // Step 10: Tag project files with sync status (best-effort, non-blocking)
1294
+ tagProjectFiles({ verbose: false }).catch(() => {});
1295
+
897
1296
  log.plain('');
898
1297
  const verb = options.pullMode ? 'Pull' : 'Clone';
899
1298
  log.success(entityFilter ? `${verb} complete! (filtered: ${options.entity})` : `${verb} complete!`);
@@ -1133,7 +1532,7 @@ async function updatePackageJson(appJson, config) {
1133
1532
  * Process content entries: write files + metadata, return reference map.
1134
1533
  * Returns array of { uid, metaPath } for app.json reference replacement.
1135
1534
  */
1136
- async function processContentEntries(contents, structure, options, contentPlacement, serverTz, skipUIDs = new Set()) {
1535
+ async function processContentEntries(contents, structure, options, contentPlacement, serverTz, skipUIDs = new Set(), resolvedFilenames = new Map()) {
1137
1536
  if (!contents || contents.length === 0) return [];
1138
1537
 
1139
1538
  const refs = [];
@@ -1151,7 +1550,8 @@ async function processContentEntries(contents, structure, options, contentPlacem
1151
1550
  log.dim(` Skipped ${record.Name || record.UID} (collision rejection)`);
1152
1551
  continue;
1153
1552
  }
1154
- const ref = await processRecord('content', record, structure, options, usedNames, placementPreference, serverTz, bulkAction);
1553
+ const filenameOverride = resolvedFilenames.get(record.UID) || null;
1554
+ const ref = await processRecord('content', record, structure, options, usedNames, placementPreference, serverTz, bulkAction, filenameOverride);
1155
1555
  if (ref) refs.push(ref);
1156
1556
  }
1157
1557
 
@@ -1175,6 +1575,7 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1175
1575
  const refs = [];
1176
1576
  const bulkAction = { value: null };
1177
1577
  const legacyRenameAction = { value: null }; // 'rename_all' | 'skip_all' | null
1578
+ const usedNames = new Map(); // name → count, for collision resolution
1178
1579
  const config = await loadConfig();
1179
1580
 
1180
1581
  // Determine filename column: saved preference, or prompt, or default
@@ -1322,19 +1723,27 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1322
1723
  name = sanitizeFilename(String(record.UID || 'untitled'));
1323
1724
  }
1324
1725
 
1325
- // Include UID in filename via tilde convention to ensure uniqueness
1726
+ // Resolve name collisions: second+ record with same name gets -1, -2, etc.
1326
1727
  const uid = record.UID || 'untitled';
1327
- const finalName = buildUidFilename(name, uid);
1328
-
1329
- const metaPath = join(dirName, `${finalName}.metadata.json`);
1330
-
1331
- // Legacy dot-separator detection: rename <name>.<uid>.metadata.json → <name>~<uid>.metadata.json
1332
- const legacyMetaPath = join(dirName, `${name}.${uid}.metadata.json`);
1333
- if (!await fileExists(metaPath) && await fileExists(legacyMetaPath)) {
1728
+ const nameKey = name;
1729
+ const count = usedNames.get(nameKey) || 0;
1730
+ usedNames.set(nameKey, count + 1);
1731
+ if (count > 0) name = `${name}-${count}`;
1732
+
1733
+ // Metadata: name.metadata~uid.json; companion files use natural name
1734
+ const metaPath = join(dirName, buildMetaFilename(name, uid));
1735
+
1736
+ // Legacy detection: rename old-format metadata files to new convention
1737
+ const legacyDotMetaPath = join(dirName, `${name}.${uid}.metadata.json`);
1738
+ const legacyTildeMetaPath = join(dirName, `${buildUidFilename(name, uid)}.metadata.json`);
1739
+ const legacyPath = !await fileExists(metaPath) && await fileExists(legacyDotMetaPath) ? legacyDotMetaPath
1740
+ : !await fileExists(metaPath) && await fileExists(legacyTildeMetaPath) ? legacyTildeMetaPath
1741
+ : null;
1742
+ if (legacyPath) {
1334
1743
  if (options.yes || legacyRenameAction.value === 'rename_all') {
1335
1744
  const { rename: fsRename } = await import('fs/promises');
1336
- await fsRename(legacyMetaPath, metaPath);
1337
- log.dim(` Auto-renamed: ${basename(legacyMetaPath)} → ${basename(metaPath)}`);
1745
+ await fsRename(legacyPath, metaPath);
1746
+ log.dim(` Auto-renamed: ${basename(legacyPath)} → ${basename(metaPath)}`);
1338
1747
  } else if (legacyRenameAction.value === 'skip_all') {
1339
1748
  // skip silently
1340
1749
  } else {
@@ -1342,7 +1751,7 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1342
1751
  const { action } = await inquirer.prompt([{
1343
1752
  type: 'list',
1344
1753
  name: 'action',
1345
- message: `Found legacy filename "${basename(legacyMetaPath)}" — rename to "${basename(metaPath)}"?`,
1754
+ message: `Found legacy filename "${basename(legacyPath)}" — rename to "${basename(metaPath)}"?`,
1346
1755
  choices: [
1347
1756
  { name: 'Yes', value: 'rename' },
1348
1757
  { name: 'Rename all remaining', value: 'rename_all' },
@@ -1352,8 +1761,8 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1352
1761
  }]);
1353
1762
  if (action === 'rename' || action === 'rename_all') {
1354
1763
  const { rename: fsRename } = await import('fs/promises');
1355
- await fsRename(legacyMetaPath, metaPath);
1356
- log.success(` Renamed: ${basename(legacyMetaPath)} → ${basename(metaPath)}`);
1764
+ await fsRename(legacyPath, metaPath);
1765
+ log.success(` Renamed: ${basename(legacyPath)} → ${basename(metaPath)}`);
1357
1766
  if (action === 'rename_all') legacyRenameAction.value = 'rename_all';
1358
1767
  } else {
1359
1768
  if (action === 'skip_all') legacyRenameAction.value = 'skip_all';
@@ -1370,7 +1779,7 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1370
1779
  const entityMetaExists = await fileExists(metaPath) && !await fileExists(willDeleteEntityMeta);
1371
1780
  if (entityMetaExists && !options.yes && !hasNewExtractions) {
1372
1781
  if (bulkAction.value === 'skip_all') {
1373
- log.dim(` Skipped ${finalName}`);
1782
+ log.dim(` Skipped ${name}`);
1374
1783
  refs.push({ uid: record.UID, metaPath });
1375
1784
  continue;
1376
1785
  }
@@ -1378,51 +1787,65 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1378
1787
  if (bulkAction.value !== 'overwrite_all') {
1379
1788
  const configWithTz = { ...config, ServerTimezone: serverTz };
1380
1789
  const localSyncTime = await getLocalSyncTime(metaPath);
1381
- const serverNewer = isServerNewer(localSyncTime, record._LastUpdated, configWithTz, 'content', record.UID);
1790
+
1791
+ // If local metadata has no _LastUpdated (e.g. from dbo add), treat as server-newer
1792
+ let localMissingLastUpdated = false;
1793
+ try {
1794
+ const localMeta = JSON.parse(await readFile(metaPath, 'utf8'));
1795
+ if (!localMeta._LastUpdated) localMissingLastUpdated = true;
1796
+ } catch { /* unreadable */ }
1797
+
1798
+ const serverNewer = localMissingLastUpdated || isServerNewer(localSyncTime, record._LastUpdated, configWithTz, 'content', record.UID);
1382
1799
  const serverDate = parseServerDate(record._LastUpdated, serverTz);
1383
1800
 
1384
1801
  if (serverNewer) {
1385
- const action = await promptChangeDetection(finalName, record, configWithTz, {
1386
- serverDate,
1387
- localDate: localSyncTime,
1388
- });
1802
+ // Incomplete metadata (no _LastUpdated) from dbo add — auto-accept without prompting
1803
+ if (localMissingLastUpdated) {
1804
+ log.dim(` Completing metadata: ${name}`);
1805
+ // Fall through to write
1806
+ } else {
1807
+ const action = await promptChangeDetection(name, record, configWithTz, {
1808
+ serverDate,
1809
+ localDate: localSyncTime,
1810
+ });
1389
1811
 
1390
- if (action === 'skip') {
1391
- log.dim(` Skipped ${finalName}`);
1392
- refs.push({ uid: record.UID, metaPath });
1393
- continue;
1394
- }
1395
- if (action === 'skip_all') {
1396
- bulkAction.value = 'skip_all';
1397
- log.dim(` Skipped ${finalName}`);
1398
- refs.push({ uid: record.UID, metaPath });
1399
- continue;
1400
- }
1401
- if (action === 'overwrite_all') {
1402
- bulkAction.value = 'overwrite_all';
1403
- }
1404
- if (action === 'compare') {
1405
- await inlineDiffAndMerge(record, metaPath, configWithTz);
1406
- refs.push({ uid: record.UID, metaPath });
1407
- continue;
1812
+ if (action === 'skip') {
1813
+ log.dim(` Skipped ${name}`);
1814
+ refs.push({ uid: record.UID, metaPath });
1815
+ continue;
1816
+ }
1817
+ if (action === 'skip_all') {
1818
+ bulkAction.value = 'skip_all';
1819
+ log.dim(` Skipped ${name}`);
1820
+ refs.push({ uid: record.UID, metaPath });
1821
+ continue;
1822
+ }
1823
+ if (action === 'overwrite_all') {
1824
+ bulkAction.value = 'overwrite_all';
1825
+ }
1826
+ if (action === 'compare') {
1827
+ await inlineDiffAndMerge(record, metaPath, configWithTz);
1828
+ refs.push({ uid: record.UID, metaPath });
1829
+ continue;
1830
+ }
1408
1831
  }
1409
1832
  } else {
1410
1833
  const locallyModified = await hasLocalModifications(metaPath, configWithTz);
1411
1834
  if (locallyModified) {
1412
- const action = await promptChangeDetection(finalName, record, configWithTz, {
1835
+ const action = await promptChangeDetection(name, record, configWithTz, {
1413
1836
  localIsNewer: true,
1414
1837
  serverDate,
1415
1838
  localDate: localSyncTime,
1416
1839
  });
1417
1840
 
1418
1841
  if (action === 'skip') {
1419
- log.dim(` Kept local: ${finalName}`);
1842
+ log.dim(` Kept local: ${name}`);
1420
1843
  refs.push({ uid: record.UID, metaPath });
1421
1844
  continue;
1422
1845
  }
1423
1846
  if (action === 'skip_all') {
1424
1847
  bulkAction.value = 'skip_all';
1425
- log.dim(` Kept local: ${finalName}`);
1848
+ log.dim(` Kept local: ${name}`);
1426
1849
  refs.push({ uid: record.UID, metaPath });
1427
1850
  continue;
1428
1851
  }
@@ -1435,7 +1858,7 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1435
1858
  continue;
1436
1859
  }
1437
1860
  } else {
1438
- log.dim(` Up to date: ${finalName}`);
1861
+ log.dim(` Up to date: ${name}`);
1439
1862
  refs.push({ uid: record.UID, metaPath });
1440
1863
  continue;
1441
1864
  }
@@ -1452,25 +1875,25 @@ async function processEntityDirEntries(entityName, entries, options, serverTz) {
1452
1875
 
1453
1876
  // Check if this column should be extracted as a companion file
1454
1877
  const extractInfo = contentColsToExtract.find(c => c.col === key);
1455
- if (extractInfo && value && typeof value === 'object' && value.encoding === 'base64' && value.value !== null) {
1456
- const decoded = resolveContentValue(value);
1457
- if (decoded) {
1458
- const colFileName = `${finalName}.${key}.${extractInfo.ext}`;
1459
- const colFilePath = join(dirName, colFileName);
1460
- await writeFile(colFilePath, decoded);
1461
- meta[key] = `@${colFileName}`;
1462
- extractedContentCols.push(key);
1463
-
1464
- // Set timestamps on companion file
1465
- if (serverTz && (record._CreatedOn || record._LastUpdated)) {
1466
- try {
1467
- await setFileTimestamps(colFilePath, record._CreatedOn, record._LastUpdated, serverTz);
1468
- } catch { /* non-critical */ }
1469
- }
1878
+ if (extractInfo) {
1879
+ const isBase64 = value && typeof value === 'object' && !Array.isArray(value) && value.encoding === 'base64';
1880
+ const decoded = isBase64 ? (resolveContentValue(value) ?? '') : (value ?? '');
1881
+ const colFileName = `${name}.${key}.${extractInfo.ext}`;
1882
+ const colFilePath = join(dirName, colFileName);
1883
+ await writeFile(colFilePath, decoded);
1884
+ await upsertDeployEntry(colFilePath, record.UID, entityName, key);
1885
+ meta[key] = `@${colFileName}`;
1886
+ extractedContentCols.push(key);
1470
1887
 
1471
- log.dim(` → ${colFilePath}`);
1472
- continue;
1888
+ // Set timestamps on companion file
1889
+ if (serverTz && (record._CreatedOn || record._LastUpdated)) {
1890
+ try {
1891
+ await setFileTimestamps(colFilePath, record._CreatedOn, record._LastUpdated, serverTz);
1892
+ } catch { /* non-critical */ }
1473
1893
  }
1894
+
1895
+ log.dim(` → ${colFilePath}`);
1896
+ continue;
1474
1897
  }
1475
1898
 
1476
1899
  // Other base64 columns not selected for extraction — decode inline
@@ -1790,6 +2213,7 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1790
2213
  const { filenameCol, contentColsToExtract } = descriptorPrefs.get(descriptor);
1791
2214
  const useRootDoc = (descriptor === 'documentation' && docPlacement === 'root');
1792
2215
  const mdColInfo = useRootDoc ? contentColsToExtract.find(c => c.ext === 'md') : null;
2216
+ const usedNames = new Map(); // name → count, for collision resolution within this descriptor group
1793
2217
 
1794
2218
  log.info(`Processing ${records.length} "${descriptor}" extension(s) → ${dir}/`);
1795
2219
 
@@ -1804,17 +2228,27 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1804
2228
  name = sanitizeFilename(String(record.UID || 'untitled'));
1805
2229
  }
1806
2230
 
2231
+ // Resolve name collisions: second+ record with same name gets -1, -2, etc.
1807
2232
  const uid = record.UID || 'untitled';
1808
- const finalName = buildUidFilename(name, uid);
1809
- const metaPath = join(dir, `${finalName}.metadata.json`);
1810
-
1811
- // Legacy dot-separator detection: rename <name>.<uid>.metadata.json <name>~<uid>.metadata.json
1812
- const legacyExtMetaPath = join(dir, `${name}.${uid}.metadata.json`);
1813
- if (!await fileExists(metaPath) && await fileExists(legacyExtMetaPath)) {
2233
+ const nameKey = name;
2234
+ const nameCount = usedNames.get(nameKey) || 0;
2235
+ usedNames.set(nameKey, nameCount + 1);
2236
+ if (nameCount > 0) name = `${name}-${nameCount}`;
2237
+
2238
+ // Metadata: name.metadata~uid.json; companion files use natural name
2239
+ const metaPath = join(dir, buildMetaFilename(name, uid));
2240
+
2241
+ // Legacy detection: rename old-format metadata files to new convention
2242
+ const legacyDotExtMetaPath = join(dir, `${name}.${uid}.metadata.json`);
2243
+ const legacyTildeExtMetaPath = join(dir, `${buildUidFilename(name, uid)}.metadata.json`);
2244
+ const legacyExtPath = !await fileExists(metaPath) && await fileExists(legacyDotExtMetaPath) ? legacyDotExtMetaPath
2245
+ : !await fileExists(metaPath) && await fileExists(legacyTildeExtMetaPath) ? legacyTildeExtMetaPath
2246
+ : null;
2247
+ if (legacyExtPath) {
1814
2248
  if (options.yes || legacyRenameAction.value === 'rename_all') {
1815
2249
  const { rename: fsRename } = await import('fs/promises');
1816
- await fsRename(legacyExtMetaPath, metaPath);
1817
- log.dim(` Auto-renamed: ${basename(legacyExtMetaPath)} → ${basename(metaPath)}`);
2250
+ await fsRename(legacyExtPath, metaPath);
2251
+ log.dim(` Auto-renamed: ${basename(legacyExtPath)} → ${basename(metaPath)}`);
1818
2252
  } else if (legacyRenameAction.value === 'skip_all') {
1819
2253
  // skip silently
1820
2254
  } else {
@@ -1822,7 +2256,7 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1822
2256
  const { action } = await inquirer.prompt([{
1823
2257
  type: 'list',
1824
2258
  name: 'action',
1825
- message: `Found legacy filename "${basename(legacyExtMetaPath)}" — rename to "${basename(metaPath)}"?`,
2259
+ message: `Found legacy filename "${basename(legacyExtPath)}" — rename to "${basename(metaPath)}"?`,
1826
2260
  choices: [
1827
2261
  { name: 'Yes', value: 'rename' },
1828
2262
  { name: 'Rename all remaining', value: 'rename_all' },
@@ -1832,8 +2266,8 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1832
2266
  }]);
1833
2267
  if (action === 'rename' || action === 'rename_all') {
1834
2268
  const { rename: fsRename } = await import('fs/promises');
1835
- await fsRename(legacyExtMetaPath, metaPath);
1836
- log.success(` Renamed: ${basename(legacyExtMetaPath)} → ${basename(metaPath)}`);
2269
+ await fsRename(legacyExtPath, metaPath);
2270
+ log.success(` Renamed: ${basename(legacyExtPath)} → ${basename(metaPath)}`);
1837
2271
  if (action === 'rename_all') legacyRenameAction.value = 'rename_all';
1838
2272
  } else {
1839
2273
  if (action === 'skip_all') legacyRenameAction.value = 'skip_all';
@@ -1841,14 +2275,40 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1841
2275
  }
1842
2276
  }
1843
2277
 
2278
+ // Rename legacy ~UID companion files and update @references in extension metadata
2279
+ if (await fileExists(metaPath)) {
2280
+ try {
2281
+ const existingMeta = JSON.parse(await readFile(metaPath, 'utf8'));
2282
+ await detectAndRenameLegacyCompanions(metaPath, existingMeta);
2283
+ } catch { /* non-critical */ }
2284
+ }
2285
+
2286
+ // Check if any @reference content files are missing — force re-extraction if so
2287
+ let hasNewExtractions = contentColsToExtract.length > 0;
2288
+ if (!hasNewExtractions && await fileExists(metaPath)) {
2289
+ try {
2290
+ const existingMeta = JSON.parse(await readFile(metaPath, 'utf8'));
2291
+ for (const col of (existingMeta._contentColumns || [])) {
2292
+ const ref = existingMeta[col];
2293
+ if (ref && String(ref).startsWith('@')) {
2294
+ const refName = String(ref).substring(1);
2295
+ const refPath = refName.startsWith('/') ? join(process.cwd(), refName) : join(dir, refName);
2296
+ if (!await fileExists(refPath)) {
2297
+ hasNewExtractions = true; // Force re-extraction
2298
+ break;
2299
+ }
2300
+ }
2301
+ }
2302
+ } catch { /* non-critical */ }
2303
+ }
2304
+
1844
2305
  // Change detection — same pattern as processEntityDirEntries()
1845
- const hasNewExtractions = contentColsToExtract.length > 0;
1846
2306
  // Skip __WILL_DELETE__-prefixed files — treat as "no existing file"
1847
2307
  const willDeleteExtMeta = join(dir, `${WILL_DELETE_PREFIX}${basename(metaPath)}`);
1848
2308
  const extMetaExists = await fileExists(metaPath) && !await fileExists(willDeleteExtMeta);
1849
2309
  if (extMetaExists && !options.yes && !hasNewExtractions) {
1850
2310
  if (bulkAction.value === 'skip_all') {
1851
- log.dim(` Skipped ${finalName}`);
2311
+ log.dim(` Skipped ${name}`);
1852
2312
  refs.push({ uid: record.UID, metaPath });
1853
2313
  continue;
1854
2314
  }
@@ -1859,7 +2319,7 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1859
2319
  const serverDate = parseServerDate(record._LastUpdated, serverTz);
1860
2320
 
1861
2321
  if (serverNewer) {
1862
- const action = await promptChangeDetection(finalName, record, cfgWithTz, { serverDate, localDate: localSyncTime });
2322
+ const action = await promptChangeDetection(name, record, cfgWithTz, { serverDate, localDate: localSyncTime });
1863
2323
  if (action === 'skip') { refs.push({ uid: record.UID, metaPath }); continue; }
1864
2324
  if (action === 'skip_all') { bulkAction.value = 'skip_all'; refs.push({ uid: record.UID, metaPath }); continue; }
1865
2325
  if (action === 'overwrite_all') { bulkAction.value = 'overwrite_all'; }
@@ -1867,13 +2327,13 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1867
2327
  } else {
1868
2328
  const localModified = await hasLocalModifications(metaPath, cfgWithTz);
1869
2329
  if (localModified) {
1870
- const action = await promptChangeDetection(finalName, record, cfgWithTz, { localIsNewer: true, serverDate, localDate: localSyncTime });
2330
+ const action = await promptChangeDetection(name, record, cfgWithTz, { localIsNewer: true, serverDate, localDate: localSyncTime });
1871
2331
  if (action === 'skip') { refs.push({ uid: record.UID, metaPath }); continue; }
1872
2332
  if (action === 'skip_all') { bulkAction.value = 'skip_all'; refs.push({ uid: record.UID, metaPath }); continue; }
1873
2333
  if (action === 'overwrite_all') { bulkAction.value = 'overwrite_all'; }
1874
2334
  if (action === 'compare') { await inlineDiffAndMerge(record, metaPath, cfgWithTz, { localIsNewer: true }); refs.push({ uid: record.UID, metaPath }); continue; }
1875
2335
  } else {
1876
- log.dim(` Up to date: ${finalName}`);
2336
+ log.dim(` Up to date: ${name}`);
1877
2337
  refs.push({ uid: record.UID, metaPath });
1878
2338
  continue;
1879
2339
  }
@@ -1889,32 +2349,31 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1889
2349
  if (key === 'children') continue;
1890
2350
 
1891
2351
  const extractInfo = contentColsToExtract.find(c => c.col === key);
1892
- if (extractInfo && value && typeof value === 'object'
1893
- && value.encoding === 'base64' && value.value !== null) {
1894
- const decoded = resolveContentValue(value);
1895
- if (decoded) {
1896
- let colFilePath, refValue;
1897
-
1898
- if (mdColInfo && extractInfo.col === mdColInfo.col) {
1899
- // Root placement: docs/<name>~<uid>.md
1900
- const docFileName = `${finalName}.md`;
1901
- colFilePath = join(DOCUMENTATION_DIR, docFileName);
1902
- refValue = `@/${DOCUMENTATION_DIR}/${docFileName}`;
1903
- } else {
1904
- const colFileName = `${finalName}.${key}.${extractInfo.ext}`;
1905
- colFilePath = join(dir, colFileName);
1906
- refValue = `@${colFileName}`;
1907
- }
2352
+ if (extractInfo) {
2353
+ const isBase64 = value && typeof value === 'object' && !Array.isArray(value) && value.encoding === 'base64';
2354
+ const decoded = isBase64 ? (resolveContentValue(value) ?? '') : (value ?? '');
2355
+ let colFilePath, refValue;
2356
+
2357
+ if (mdColInfo && extractInfo.col === mdColInfo.col) {
2358
+ // Root placement: docs/<name>.md (natural name, no ~UID)
2359
+ const docFileName = `${name}.md`;
2360
+ colFilePath = join(DOCUMENTATION_DIR, docFileName);
2361
+ refValue = `@/${DOCUMENTATION_DIR}/${docFileName}`;
2362
+ } else {
2363
+ const colFileName = `${name}.${key}.${extractInfo.ext}`;
2364
+ colFilePath = join(dir, colFileName);
2365
+ refValue = `@${colFileName}`;
2366
+ }
1908
2367
 
1909
- meta[key] = refValue;
1910
- await writeFile(colFilePath, decoded);
1911
- extractedCols.push(key);
1912
- if (serverTz) {
1913
- try { await setFileTimestamps(colFilePath, record._CreatedOn, record._LastUpdated, serverTz); } catch {}
1914
- }
1915
- log.dim(` → ${colFilePath}`);
1916
- continue;
2368
+ meta[key] = refValue;
2369
+ await writeFile(colFilePath, decoded);
2370
+ await upsertDeployEntry(colFilePath, record.UID, 'extension', key);
2371
+ extractedCols.push(key);
2372
+ if (serverTz) {
2373
+ try { await setFileTimestamps(colFilePath, record._CreatedOn, record._LastUpdated, serverTz); } catch {}
1917
2374
  }
2375
+ log.dim(` → ${colFilePath}`);
2376
+ continue;
1918
2377
  }
1919
2378
 
1920
2379
  // Inline or non-extraction columns
@@ -1959,7 +2418,7 @@ async function processExtensionEntries(entries, structure, options, serverTz) {
1959
2418
  * Process media entries: download binary files from server + create metadata.
1960
2419
  * Media uses Filename (not Name) and files are fetched via /api/media/{uid}.
1961
2420
  */
1962
- async function processMediaEntries(mediaRecords, structure, options, config, appShortName, serverTz, skipUIDs = new Set()) {
2421
+ async function processMediaEntries(mediaRecords, structure, options, config, appShortName, serverTz, skipUIDs = new Set(), resolvedFilenames = new Map()) {
1963
2422
  if (!mediaRecords || mediaRecords.length === 0) return [];
1964
2423
 
1965
2424
  // Track stale records (404s) for cleanup prompt
@@ -2062,16 +2521,25 @@ async function processMediaEntries(mediaRecords, structure, options, config, app
2062
2521
  if (!dir) dir = BINS_DIR;
2063
2522
  await mkdir(dir, { recursive: true });
2064
2523
 
2065
- // Always include UID in filename via tilde convention
2524
+ // Companion: natural name, no UID (use collision-resolved override if available)
2066
2525
  const uid = String(record.UID || record._id || 'untitled');
2067
- const base = buildUidFilename(name, uid);
2068
- const finalFilename = `${base}.${ext}`;
2526
+ const finalFilename = resolvedFilenames.get(record.UID) || sanitizeFilename(filename);
2069
2527
  const filePath = join(dir, finalFilename);
2070
- const metaPath = join(dir, `${finalFilename}.metadata.json`);
2528
+ // Metadata: name.ext.metadata~uid.json
2529
+ const naturalMediaBase = `${name}.${ext}`;
2530
+ const metaPath = join(dir, buildMetaFilename(naturalMediaBase, uid));
2071
2531
  // usedNames retained for tracking
2072
2532
  const fileKey = `${dir}/${name}.${ext}`;
2073
2533
  usedNames.set(fileKey, (usedNames.get(fileKey) || 0) + 1);
2074
2534
 
2535
+ // Rename legacy ~UID companion files to natural names if needed
2536
+ if (await fileExists(metaPath)) {
2537
+ try {
2538
+ const existingMeta = JSON.parse(await readFile(metaPath, 'utf8'));
2539
+ await detectAndRenameLegacyCompanions(metaPath, existingMeta);
2540
+ } catch { /* non-critical */ }
2541
+ }
2542
+
2075
2543
  // Change detection for existing media files
2076
2544
  // Skip __WILL_DELETE__-prefixed files — treat as "no existing file"
2077
2545
  const willDeleteMediaMeta = join(dir, `${WILL_DELETE_PREFIX}${basename(metaPath)}`);
@@ -2093,7 +2561,7 @@ async function processMediaEntries(mediaRecords, structure, options, config, app
2093
2561
  const diffable = isDiffable(ext);
2094
2562
 
2095
2563
  if (serverNewer) {
2096
- const action = await promptChangeDetection(dedupName, record, configWithTz, {
2564
+ const action = await promptChangeDetection(finalFilename, record, configWithTz, {
2097
2565
  diffable,
2098
2566
  serverDate,
2099
2567
  localDate: localSyncTime,
@@ -2124,7 +2592,7 @@ async function processMediaEntries(mediaRecords, structure, options, config, app
2124
2592
  const locallyModified = await hasLocalModifications(metaPath, configWithTz);
2125
2593
  if (locallyModified) {
2126
2594
  const localDate = localSyncTime; // mtime already fetched above
2127
- const action = await promptChangeDetection(dedupName, record, configWithTz, {
2595
+ const action = await promptChangeDetection(finalFilename, record, configWithTz, {
2128
2596
  localIsNewer: true,
2129
2597
  diffable,
2130
2598
  serverDate,
@@ -2230,6 +2698,7 @@ async function processMediaEntries(mediaRecords, structure, options, config, app
2230
2698
  meta._mediaFile = `@${finalFilename}`;
2231
2699
 
2232
2700
  await writeFile(metaPath, JSON.stringify(meta, null, 2) + '\n');
2701
+ await upsertDeployEntry(filePath, record.UID, 'media', 'File');
2233
2702
  log.dim(` → ${metaPath}`);
2234
2703
 
2235
2704
  // Set file timestamps from server dates (independent try-catch so one failure
@@ -2273,7 +2742,7 @@ async function processMediaEntries(mediaRecords, structure, options, config, app
2273
2742
  * Process a single record: determine directory, write content file + metadata.
2274
2743
  * Returns { uid, metaPath } or null.
2275
2744
  */
2276
- async function processRecord(entityName, record, structure, options, usedNames, _placementPreference, serverTz, bulkAction = { value: null }) {
2745
+ async function processRecord(entityName, record, structure, options, usedNames, _placementPreference, serverTz, bulkAction = { value: null }, filenameOverride = null) {
2277
2746
  let name = sanitizeFilename(String(record.Name || record.UID || 'untitled'));
2278
2747
 
2279
2748
  // Determine file extension (priority: Extension field > Name field > Path field > empty)
@@ -2296,12 +2765,11 @@ async function processRecord(entityName, record, structure, options, usedNames,
2296
2765
  }
2297
2766
  // If still no extension, check existing local metadata for a previously chosen extension.
2298
2767
  // On re-clone, the Content @reference in the local metadata already has the extension
2299
- // the user picked on the first clone (e.g. "@CurrentTask~uid.html" → "html").
2768
+ // the user picked on the first clone (e.g. "@CurrentTask.html" → "html").
2300
2769
  if (!ext && record.UID) {
2301
2770
  try {
2302
2771
  const uid = String(record.UID);
2303
2772
  const sanitized = sanitizeFilename(String(record.Name || uid || 'untitled'));
2304
- const probe = buildUidFilename(sanitized, uid);
2305
2773
  // Resolve the directory the same way the main code does below
2306
2774
  let probeDir = BINS_DIR;
2307
2775
  if (record.BinID && structure[record.BinID]) {
@@ -2312,7 +2780,7 @@ async function processRecord(entityName, record, structure, options, usedNames,
2312
2780
  probeDir = probeDir.replace(/^\/+|\/+$/g, '') || BINS_DIR;
2313
2781
  if (extname(probeDir)) probeDir = probeDir.substring(0, probeDir.lastIndexOf('/')) || BINS_DIR;
2314
2782
 
2315
- const probeMeta = join(probeDir, `${probe}.metadata.json`);
2783
+ const probeMeta = join(probeDir, buildMetaFilename(sanitized, uid));
2316
2784
  const raw = await readFile(probeMeta, 'utf8');
2317
2785
  const localMeta = JSON.parse(raw);
2318
2786
  // Extract extension from Content @reference (e.g. "@Name~uid.html")
@@ -2398,23 +2866,31 @@ async function processRecord(entityName, record, structure, options, usedNames,
2398
2866
 
2399
2867
  await mkdir(dir, { recursive: true });
2400
2868
 
2401
- // Always include UID in filename via tilde convention
2402
2869
  const uid = String(record.UID || record._id || 'untitled');
2403
- const finalName = buildUidFilename(name, uid);
2870
+ // Companion: natural name, no UID (use collision-resolved override if available)
2871
+ const fileName = filenameOverride || sanitizeFilename(buildContentFileName(record, uid));
2872
+ // Metadata: name.metadata~uid.json
2404
2873
  // usedNames retained for non-UID edge case tracking
2405
2874
  const nameKey = `${dir}/${name}`;
2406
2875
  usedNames.set(nameKey, (usedNames.get(nameKey) || 0) + 1);
2407
2876
 
2408
- // Write content file if Content column has data
2877
+ // Write content file always create companion for content entities even if empty
2409
2878
  const contentValue = record.Content;
2410
- const hasContent = contentValue && (
2411
- (typeof contentValue === 'object' && contentValue.value) ||
2412
- (typeof contentValue === 'string' && contentValue.length > 0)
2879
+ const hasContent = contentValue !== null && contentValue !== undefined && (
2880
+ (typeof contentValue === 'object' && contentValue.encoding === 'base64') ||
2881
+ (typeof contentValue === 'string')
2413
2882
  );
2414
2883
 
2415
- const fileName = ext ? `${finalName}.${ext}` : finalName;
2416
2884
  const filePath = join(dir, fileName);
2417
- const metaPath = join(dir, `${finalName}.metadata.json`);
2885
+ const metaPath = join(dir, buildMetaFilename(name, uid));
2886
+
2887
+ // Rename legacy ~UID companion files to natural names if needed
2888
+ if (await fileExists(metaPath)) {
2889
+ try {
2890
+ const existingMeta = JSON.parse(await readFile(metaPath, 'utf8'));
2891
+ await detectAndRenameLegacyCompanions(metaPath, existingMeta);
2892
+ } catch { /* non-critical */ }
2893
+ }
2418
2894
 
2419
2895
  // Change detection: check if file already exists locally
2420
2896
  // Skip __WILL_DELETE__-prefixed files — treat as "no existing file"
@@ -2422,7 +2898,7 @@ async function processRecord(entityName, record, structure, options, usedNames,
2422
2898
  const metaExistsForChangeDetect = await fileExists(metaPath) && !await fileExists(willDeleteMeta);
2423
2899
  if (metaExistsForChangeDetect && !options.yes) {
2424
2900
  if (bulkAction.value === 'skip_all') {
2425
- log.dim(` Skipped ${finalName}.${ext}`);
2901
+ log.dim(` Skipped ${fileName}`);
2426
2902
  return { uid: record.UID, metaPath };
2427
2903
  }
2428
2904
 
@@ -2430,51 +2906,66 @@ async function processRecord(entityName, record, structure, options, usedNames,
2430
2906
  const config = await loadConfig();
2431
2907
  const configWithTz = { ...config, ServerTimezone: serverTz };
2432
2908
  const localSyncTime = await getLocalSyncTime(metaPath);
2433
- const serverNewer = isServerNewer(localSyncTime, record._LastUpdated, configWithTz, entityName, record.UID);
2909
+
2910
+ // If local metadata has no _LastUpdated (e.g. from dbo add with incomplete fields),
2911
+ // always treat as server-newer so pull populates missing columns.
2912
+ let localMissingLastUpdated = false;
2913
+ try {
2914
+ const localMeta = JSON.parse(await readFile(metaPath, 'utf8'));
2915
+ if (!localMeta._LastUpdated) localMissingLastUpdated = true;
2916
+ } catch { /* unreadable — will be overwritten */ }
2917
+
2918
+ const serverNewer = localMissingLastUpdated || isServerNewer(localSyncTime, record._LastUpdated, configWithTz, entityName, record.UID);
2434
2919
  const serverDate = parseServerDate(record._LastUpdated, serverTz);
2435
2920
 
2436
2921
  if (serverNewer) {
2437
- const action = await promptChangeDetection(finalName, record, configWithTz, {
2438
- serverDate,
2439
- localDate: localSyncTime,
2440
- });
2441
-
2442
- if (action === 'skip') {
2443
- log.dim(` Skipped ${finalName}.${ext}`);
2444
- return { uid: record.UID, metaPath };
2445
- }
2446
- if (action === 'skip_all') {
2447
- bulkAction.value = 'skip_all';
2448
- log.dim(` Skipped ${finalName}.${ext}`);
2449
- return { uid: record.UID, metaPath };
2450
- }
2451
- if (action === 'overwrite_all') {
2452
- bulkAction.value = 'overwrite_all';
2922
+ // Incomplete metadata (no _LastUpdated) from dbo add — auto-accept without prompting
2923
+ if (localMissingLastUpdated) {
2924
+ log.dim(` Completing metadata: ${fileName}`);
2453
2925
  // Fall through to write
2926
+ } else {
2927
+ const action = await promptChangeDetection(fileName, record, configWithTz, {
2928
+ serverDate,
2929
+ localDate: localSyncTime,
2930
+ });
2931
+
2932
+ if (action === 'skip') {
2933
+ log.dim(` Skipped ${fileName}`);
2934
+ return { uid: record.UID, metaPath };
2935
+ }
2936
+ if (action === 'skip_all') {
2937
+ bulkAction.value = 'skip_all';
2938
+ log.dim(` Skipped ${fileName}`);
2939
+ return { uid: record.UID, metaPath };
2940
+ }
2941
+ if (action === 'overwrite_all') {
2942
+ bulkAction.value = 'overwrite_all';
2943
+ // Fall through to write
2944
+ }
2945
+ if (action === 'compare') {
2946
+ await inlineDiffAndMerge(record, metaPath, configWithTz);
2947
+ return { uid: record.UID, metaPath };
2948
+ }
2949
+ // 'overwrite' falls through to normal write
2454
2950
  }
2455
- if (action === 'compare') {
2456
- await inlineDiffAndMerge(record, metaPath, configWithTz);
2457
- return { uid: record.UID, metaPath };
2458
- }
2459
- // 'overwrite' falls through to normal write
2460
2951
  } else {
2461
2952
  // Server _LastUpdated hasn't changed since last sync.
2462
2953
  // Check if local content files were modified (user edits).
2463
2954
  const locallyModified = await hasLocalModifications(metaPath, configWithTz);
2464
2955
  if (locallyModified) {
2465
- const action = await promptChangeDetection(finalName, record, configWithTz, {
2956
+ const action = await promptChangeDetection(fileName, record, configWithTz, {
2466
2957
  localIsNewer: true,
2467
2958
  serverDate,
2468
2959
  localDate: localSyncTime,
2469
2960
  });
2470
2961
 
2471
2962
  if (action === 'skip') {
2472
- log.dim(` Kept local: ${finalName}.${ext}`);
2963
+ log.dim(` Kept local: ${fileName}`);
2473
2964
  return { uid: record.UID, metaPath };
2474
2965
  }
2475
2966
  if (action === 'skip_all') {
2476
2967
  bulkAction.value = 'skip_all';
2477
- log.dim(` Kept local: ${finalName}.${ext}`);
2968
+ log.dim(` Kept local: ${fileName}`);
2478
2969
  return { uid: record.UID, metaPath };
2479
2970
  }
2480
2971
  if (action === 'overwrite_all') {
@@ -2486,7 +2977,7 @@ async function processRecord(entityName, record, structure, options, usedNames,
2486
2977
  }
2487
2978
  // 'overwrite' falls through to normal write
2488
2979
  } else {
2489
- log.dim(` Up to date: ${finalName}.${ext}`);
2980
+ log.dim(` Up to date: ${fileName}`);
2490
2981
  return { uid: record.UID, metaPath };
2491
2982
  }
2492
2983
  }
@@ -2494,11 +2985,10 @@ async function processRecord(entityName, record, structure, options, usedNames,
2494
2985
  }
2495
2986
 
2496
2987
  if (hasContent) {
2497
- const decoded = resolveContentValue(contentValue);
2498
- if (decoded) {
2499
- await writeFile(filePath, decoded);
2500
- log.success(`Saved ${filePath}`);
2501
- }
2988
+ const decoded = resolveContentValue(contentValue) ?? '';
2989
+ await writeFile(filePath, decoded);
2990
+ await upsertDeployEntry(filePath, record.UID, entityName, 'Content');
2991
+ log.success(`Saved ${filePath}`);
2502
2992
  }
2503
2993
 
2504
2994
  // Build metadata
@@ -2513,11 +3003,12 @@ async function processRecord(entityName, record, structure, options, usedNames,
2513
3003
  // Other base64 columns — decode and store inline or as reference
2514
3004
  const decoded = resolveContentValue(value);
2515
3005
  if (decoded && decoded.length > 200) {
2516
- // Large value: save as separate file
3006
+ // Large value: save as separate file (natural name, no ~UID)
2517
3007
  const colExt = guessExtensionForColumn(key);
2518
- const colFileName = `${finalName}-${key.toLowerCase()}.${colExt}`;
3008
+ const colFileName = `${name}-${key.toLowerCase()}.${colExt}`;
2519
3009
  const colFilePath = join(dir, colFileName);
2520
3010
  await writeFile(colFilePath, decoded);
3011
+ await upsertDeployEntry(colFilePath, record.UID, entityName, key);
2521
3012
  meta[key] = `@${colFileName}`;
2522
3013
  if (!meta._contentColumns) meta._contentColumns = [];
2523
3014
  meta._contentColumns.push(key);
@@ -2770,17 +3261,11 @@ async function resolveOutputFilenameColumns(appJson, options) {
2770
3261
  export function buildOutputFilename(entityType, node, filenameCol, parentChain = []) {
2771
3262
  const uid = node.UID || '';
2772
3263
  const rawName = node[filenameCol];
2773
- const name = rawName ? sanitizeFilename(String(rawName)) : '';
3264
+ const name = rawName ? sanitizeFilename(String(rawName)) : uid;
2774
3265
 
2775
- // Build this entity's segment
2776
- let segment;
2777
- if (entityType === 'output') {
2778
- // Root output: use Name~UID directly (no type prefix)
2779
- segment = (!name || name === uid) ? uid : `${name}~${uid}`;
2780
- } else {
2781
- // Child entities: keep type prefix (column~, join~, filter~)
2782
- segment = (!name || name === uid) ? `${entityType}~${uid}` : `${entityType}~${name}~${uid}`;
2783
- }
3266
+ // Root output: use natural name only (UID goes to .metadata~uid.json, not the stem)
3267
+ // Child entities: docName only, no ~uid (index determines uniqueness)
3268
+ const segment = entityType === 'output' ? name : entityType;
2784
3269
 
2785
3270
  const allSegments = [...parentChain, segment];
2786
3271
  return allSegments.join('.');
@@ -2794,18 +3279,21 @@ const INLINE_DOC_KEYS = ['column', 'join', 'filter'];
2794
3279
 
2795
3280
  /**
2796
3281
  * Build the companion file stem for a child entity within a root output file.
2797
- * e.g. root stem "Sales~abc", entity "output_value", uid "col1"
2798
- * → "Sales~abc.column~col1"
3282
+ * Uses array index for uniqueness (index 0 = no suffix, index N = "-N" suffix).
3283
+ *
3284
+ * e.g. rootStem "Sales", entity "output_value", index 0 → "Sales.column"
3285
+ * rootStem "Sales", entity "output_value", index 1 → "Sales.column-1"
2799
3286
  *
2800
- * @param {string} rootStem - Root output file stem (no extension)
3287
+ * @param {string} rootStem - Root output natural name (e.g. "Sales")
2801
3288
  * @param {string} physicalEntity - Physical entity name ('output_value', etc.)
2802
- * @param {string} uid - Child entity UID
3289
+ * @param {number} index - Zero-based position of this child in its array
2803
3290
  * @param {string} [parentChainStem] - Already-built ancestor stem (for nested children)
2804
3291
  * @returns {string}
2805
3292
  */
2806
- export function getChildCompanionStem(rootStem, physicalEntity, uid, parentChainStem = rootStem) {
3293
+ export function getChildCompanionStem(rootStem, physicalEntity, index, parentChainStem = rootStem) {
2807
3294
  const docName = INLINE_DOC_NAMES[physicalEntity] || physicalEntity;
2808
- return `${parentChainStem}.${docName}~${uid}`;
3295
+ const suffix = index === 0 ? '' : `-${index}`;
3296
+ return `${parentChainStem}.${docName}${suffix}`;
2809
3297
  }
2810
3298
 
2811
3299
  /**
@@ -2842,6 +3330,7 @@ async function extractCustomSqlIfNeeded(entityObj, companionStem, outputDir, ser
2842
3330
  const companionName = `${companionStem}.CustomSQL.sql`;
2843
3331
  const companionPath = join(outputDir, companionName);
2844
3332
  await writeFile(companionPath, hasContent ? decoded : '', 'utf8');
3333
+ await upsertDeployEntry(companionPath, entityObj.UID || entityObj._uid, 'output', 'CustomSQL');
2845
3334
  entityObj.CustomSQL = `@${companionName}`;
2846
3335
  entityObj._contentColumns = entityObj._contentColumns || [];
2847
3336
  if (!entityObj._contentColumns.includes('CustomSQL')) {
@@ -2887,7 +3376,8 @@ async function buildInlineOutputChildren(parentObj, node, rootStem, outputDir, s
2887
3376
 
2888
3377
  if (!Array.isArray(entityArray) || entityArray.length === 0) continue;
2889
3378
 
2890
- for (const child of entityArray) {
3379
+ for (let childIdx = 0; childIdx < entityArray.length; childIdx++) {
3380
+ const child = entityArray[childIdx];
2891
3381
  // Build a clean copy without tree-internal fields
2892
3382
  const childObj = { ...child };
2893
3383
  delete childObj._children;
@@ -2903,8 +3393,8 @@ async function buildInlineOutputChildren(parentObj, node, rootStem, outputDir, s
2903
3393
  // Ensure _entity is set to physical entity name (for push routing)
2904
3394
  childObj._entity = physicalKey;
2905
3395
 
2906
- // Compute companion stem for this child
2907
- const childStem = getChildCompanionStem(rootStem, physicalKey, child.UID, parentStem);
3396
+ // Compute companion stem for this child (index-based, not UID-based)
3397
+ const childStem = getChildCompanionStem(rootStem, physicalKey, childIdx, parentStem);
2908
3398
 
2909
3399
  // Extract CustomSQL if needed
2910
3400
  const companionFile = await extractCustomSqlIfNeeded(childObj, childStem, outputDir, serverTz);
@@ -2956,8 +3446,8 @@ async function trashOrphanedChildFiles(outputDir, rootStem) {
2956
3446
  log.dim(` Trashed orphaned child file: ${f}`);
2957
3447
  } catch { /* non-critical */ }
2958
3448
  }
2959
- // Also trash the legacy root file itself (_output~Name~UID.json or .metadata.json) if new format exists
2960
- if (matchesLegacy === false && (f === `${legacyStem}.json` || f === `${legacyStem}.metadata.json`)) {
3449
+ // Also trash the legacy root file itself (_output~Name~UID.json or .metadata.json)
3450
+ if (!matchesCurrent && (f === `${legacyStem}.json` || f === `${legacyStem}.metadata.json`)) {
2961
3451
  if (!trashCreated) {
2962
3452
  await mkdir(trashDir, { recursive: true });
2963
3453
  trashCreated = true;
@@ -2984,10 +3474,16 @@ async function trashOrphanedChildFiles(outputDir, rootStem) {
2984
3474
  * @returns {Object} - { segments: [{entity, name, uid}], rootOutputUid, entityType, uid }
2985
3475
  */
2986
3476
  export function parseOutputHierarchyFile(filename) {
2987
- // Strip .metadata.json or legacy .json extension
3477
+ // Strip metadata or .json extension
2988
3478
  let base = filename;
2989
- if (base.endsWith('.metadata.json')) base = base.substring(0, base.length - 14);
2990
- else if (base.endsWith('.json')) base = base.substring(0, base.length - 5);
3479
+ const metaParsed = parseMetaFilename(filename);
3480
+ if (metaParsed) {
3481
+ base = metaParsed.naturalBase;
3482
+ } else if (base.endsWith('.metadata.json')) {
3483
+ base = base.substring(0, base.length - 14);
3484
+ } else if (base.endsWith('.json')) {
3485
+ base = base.substring(0, base.length - 5);
3486
+ }
2991
3487
 
2992
3488
  // Split into segments by finding entity type boundaries
2993
3489
  // Entity types are: output~ (or legacy _output~), column~, join~, filter~
@@ -3108,15 +3604,26 @@ async function processOutputHierarchy(appJson, structure, options, serverTz) {
3108
3604
 
3109
3605
  await mkdir(binDir, { recursive: true });
3110
3606
 
3111
- // Build root output filename
3607
+ // Build root output filename (natural name, no UID in stem)
3112
3608
  const rootBasename = buildOutputFilename('output', output, filenameCols.output);
3113
- let rootMetaPath = join(binDir, `${rootBasename}.metadata.json`);
3609
+ const rootUid = output.UID || '';
3610
+ let rootMetaPath = join(binDir, buildMetaFilename(rootBasename, rootUid));
3114
3611
 
3115
- // Legacy fallback: if old .json exists but new .metadata.json doesn't, rename in-place
3612
+ // Legacy fallback: rename old-format metadata to new convention
3613
+ const legacyTildeOutputMeta = join(binDir, `${rootBasename}~${rootUid}.metadata.json`);
3116
3614
  const legacyJsonPath = join(binDir, `${rootBasename}.json`);
3117
- if (!await fileExists(rootMetaPath) && await fileExists(legacyJsonPath)) {
3118
- await rename(legacyJsonPath, rootMetaPath);
3119
- log.dim(` Renamed ${rootBasename}.json → ${rootBasename}.metadata.json`);
3615
+ const legacyOutputMeta = join(binDir, `${rootBasename}.metadata.json`);
3616
+ if (!await fileExists(rootMetaPath)) {
3617
+ if (await fileExists(legacyTildeOutputMeta)) {
3618
+ await rename(legacyTildeOutputMeta, rootMetaPath);
3619
+ log.dim(` Renamed ${basename(legacyTildeOutputMeta)} → ${basename(rootMetaPath)}`);
3620
+ } else if (await fileExists(legacyOutputMeta)) {
3621
+ await rename(legacyOutputMeta, rootMetaPath);
3622
+ log.dim(` Renamed ${basename(legacyOutputMeta)} → ${basename(rootMetaPath)}`);
3623
+ } else if (await fileExists(legacyJsonPath)) {
3624
+ await rename(legacyJsonPath, rootMetaPath);
3625
+ log.dim(` Renamed ${rootBasename}.json → ${basename(rootMetaPath)}`);
3626
+ }
3120
3627
  }
3121
3628
 
3122
3629
  // Detect old-format files that need migration to inline children format.