get-tbd 0.1.23 → 0.1.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -2
- package/dist/bin.mjs +247 -52
- package/dist/bin.mjs.map +1 -1
- package/dist/cli.mjs +87 -50
- package/dist/cli.mjs.map +1 -1
- package/dist/docs/README.md +2 -2
- package/dist/docs/templates/research-brief.md +46 -5
- package/dist/{id-mapping-JGow6Jk4.mjs → id-mapping-BSNsaOCC.mjs} +165 -7
- package/dist/id-mapping-BSNsaOCC.mjs.map +1 -0
- package/dist/{id-mapping-0-R0X8zb.mjs → id-mapping-DMMKwXZv.mjs} +2 -2
- package/dist/index.mjs +1 -1
- package/dist/{src-7qUDeWJf.mjs → src-DuXy2Uyd.mjs} +2 -2
- package/dist/{src-7qUDeWJf.mjs.map → src-DuXy2Uyd.mjs.map} +1 -1
- package/dist/tbd +247 -52
- package/package.json +1 -1
- package/dist/id-mapping-JGow6Jk4.mjs.map +0 -1
package/README.md
CHANGED
|
@@ -279,7 +279,7 @@ npm install -g get-tbd@latest
|
|
|
279
279
|
### Setup
|
|
280
280
|
|
|
281
281
|
```bash
|
|
282
|
-
# Fresh project (--prefix is REQUIRED—
|
|
282
|
+
# Fresh project (--prefix is REQUIRED—a short alphabetic name used as an issue ID prefix, e.g. myapp → issues like myapp-a1b2)
|
|
283
283
|
tbd setup --auto --prefix=myapp
|
|
284
284
|
|
|
285
285
|
# Joining an existing tbd project (no prefix needed—reads existing config)
|
|
@@ -299,7 +299,7 @@ tbd setup --from-beads
|
|
|
299
299
|
**First contributor:**
|
|
300
300
|
```bash
|
|
301
301
|
npm install -g get-tbd@latest
|
|
302
|
-
tbd setup --auto --prefix=
|
|
302
|
+
tbd setup --auto --prefix=proj # Short alphabetic prefix for issue IDs
|
|
303
303
|
git add .tbd/ .claude/ && git commit -m "Initialize tbd"
|
|
304
304
|
git push
|
|
305
305
|
```
|
package/dist/bin.mjs
CHANGED
|
@@ -8,7 +8,7 @@ import process$1 from "node:process";
|
|
|
8
8
|
import matter from "gray-matter";
|
|
9
9
|
import os, { homedir } from "node:os";
|
|
10
10
|
import tty from "node:tty";
|
|
11
|
-
import { access, chmod, cp, mkdir, readFile, readdir, rename, rm, stat, unlink } from "node:fs/promises";
|
|
11
|
+
import { access, chmod, cp, mkdir, readFile, readdir, rename, rm, rmdir, stat, unlink } from "node:fs/promises";
|
|
12
12
|
import { Readable } from "node:stream";
|
|
13
13
|
import { promisify } from "node:util";
|
|
14
14
|
import crypto, { randomBytes } from "node:crypto";
|
|
@@ -14033,7 +14033,7 @@ function serializeIssue(issue) {
|
|
|
14033
14033
|
* Package version, derived from git at build time.
|
|
14034
14034
|
* Format: X.Y.Z for releases, X.Y.Z-dev.N.hash for dev builds.
|
|
14035
14035
|
*/
|
|
14036
|
-
const VERSION$1 = "0.1.
|
|
14036
|
+
const VERSION$1 = "0.1.25";
|
|
14037
14037
|
|
|
14038
14038
|
//#endregion
|
|
14039
14039
|
//#region src/cli/lib/version.ts
|
|
@@ -99273,7 +99273,11 @@ async function migrateDataToWorktree(baseDir, removeSource = false) {
|
|
|
99273
99273
|
await mkdir(correctIssuesPath, { recursive: true });
|
|
99274
99274
|
await mkdir(correctMappingsPath, { recursive: true });
|
|
99275
99275
|
for (const file of issueFiles) await cp(join(wrongIssuesPath, file), join(correctIssuesPath, file));
|
|
99276
|
-
for (const file of mappingFiles)
|
|
99276
|
+
for (const file of mappingFiles) if (file === "ids.yml") {
|
|
99277
|
+
const { loadIdMapping, mergeIdMappings, saveIdMapping } = await Promise.resolve().then(() => id_mapping_exports);
|
|
99278
|
+
const sourceMapping = await loadIdMapping(wrongPath);
|
|
99279
|
+
await saveIdMapping(correctPath, mergeIdMappings(await loadIdMapping(correctPath), sourceMapping));
|
|
99280
|
+
} else await cp(join(wrongMappingsPath, file), join(correctMappingsPath, file));
|
|
99277
99281
|
const totalFiles = issueFiles.length + mappingFiles.length;
|
|
99278
99282
|
await git("-C", worktreePath, "add", "-A");
|
|
99279
99283
|
if (await git("-C", worktreePath, "diff", "--cached", "--quiet").then(() => false).catch(() => true)) await git("-C", worktreePath, "commit", "--no-verify", "-m", `tbd: migrate ${totalFiles} file(s) from incorrect location`);
|
|
@@ -99747,31 +99751,154 @@ async function listIssues(baseDir) {
|
|
|
99747
99751
|
return [];
|
|
99748
99752
|
}
|
|
99749
99753
|
const mdFiles = files.filter((f) => f.endsWith(".md"));
|
|
99750
|
-
const
|
|
99751
|
-
|
|
99754
|
+
const BATCH_SIZE = 200;
|
|
99755
|
+
const issues = [];
|
|
99756
|
+
for (let i = 0; i < mdFiles.length; i += BATCH_SIZE) {
|
|
99757
|
+
const batch = mdFiles.slice(i, i + BATCH_SIZE);
|
|
99758
|
+
const fileContents = await Promise.all(batch.map(async (file) => {
|
|
99759
|
+
const filePath = join(issuesDir, file);
|
|
99760
|
+
try {
|
|
99761
|
+
return {
|
|
99762
|
+
file,
|
|
99763
|
+
content: await readFile(filePath, "utf-8")
|
|
99764
|
+
};
|
|
99765
|
+
} catch {
|
|
99766
|
+
return {
|
|
99767
|
+
file,
|
|
99768
|
+
content: null
|
|
99769
|
+
};
|
|
99770
|
+
}
|
|
99771
|
+
}));
|
|
99772
|
+
for (const { file, content } of fileContents) {
|
|
99773
|
+
if (content === null) continue;
|
|
99774
|
+
try {
|
|
99775
|
+
const issue = parseIssue(content);
|
|
99776
|
+
issues.push(issue);
|
|
99777
|
+
} catch (error) {
|
|
99778
|
+
console.warn(`Skipping invalid issue file: ${file}`, error);
|
|
99779
|
+
}
|
|
99780
|
+
}
|
|
99781
|
+
}
|
|
99782
|
+
return issues;
|
|
99783
|
+
}
|
|
99784
|
+
|
|
99785
|
+
//#endregion
|
|
99786
|
+
//#region src/utils/lockfile.ts
|
|
99787
|
+
/**
|
|
99788
|
+
* Directory-based mutual exclusion for concurrent file access.
|
|
99789
|
+
*
|
|
99790
|
+
* Note: Despite the name "lockfile", this is NOT a POSIX file lock (flock/fcntl).
|
|
99791
|
+
* It uses mkdir to create a lock *directory* as a coordination convention — no
|
|
99792
|
+
* OS-level file locking syscalls are involved. This makes it portable across all
|
|
99793
|
+
* filesystems, including NFS and other network mounts where flock/fcntl locks
|
|
99794
|
+
* are unreliable or unsupported.
|
|
99795
|
+
*
|
|
99796
|
+
* This is the same strategy used by:
|
|
99797
|
+
*
|
|
99798
|
+
* - **Git** for ref updates (e.g., `.git/refs/heads/main.lock`)
|
|
99799
|
+
* See: https://git-scm.com/docs/gitrepository-layout ("lockfile protocol")
|
|
99800
|
+
* - **npm** for package-lock.json concurrent access
|
|
99801
|
+
*
|
|
99802
|
+
* ## Why mkdir?
|
|
99803
|
+
*
|
|
99804
|
+
* `mkdir(2)` is atomic on all common filesystems (local and network): it either
|
|
99805
|
+
* creates the directory or returns EEXIST. Unlike `open(O_CREAT|O_EXCL)`,
|
|
99806
|
+
* a directory lock is trivially distinguishable from normal files.
|
|
99807
|
+
*
|
|
99808
|
+
* Node.js `fs.mkdir` maps directly to the mkdir(2) syscall, preserving
|
|
99809
|
+
* the atomicity guarantee:
|
|
99810
|
+
* https://nodejs.org/api/fs.html#fsmkdirpath-options-callback
|
|
99811
|
+
*
|
|
99812
|
+
* ## Lock lifecycle
|
|
99813
|
+
*
|
|
99814
|
+
* 1. **Acquire**: `mkdir(lockDir)` — fails with EEXIST if held by another process
|
|
99815
|
+
* 2. **Hold**: Execute the critical section
|
|
99816
|
+
* 3. **Release**: `rmdir(lockDir)` — in a finally block
|
|
99817
|
+
* 4. **Stale detection**: If lock mtime exceeds a threshold, assume the holder
|
|
99818
|
+
* crashed and break the lock. This is a heuristic — safe when the critical
|
|
99819
|
+
* section is short-lived (sub-second for file I/O).
|
|
99820
|
+
*
|
|
99821
|
+
* ## Failure on timeout
|
|
99822
|
+
*
|
|
99823
|
+
* If the lock cannot be acquired within the timeout, a LockAcquisitionError is
|
|
99824
|
+
* thrown. This prevents the dangerous "degraded mode" where the critical section
|
|
99825
|
+
* runs without mutual exclusion, which can cause data loss (e.g., lost ID
|
|
99826
|
+
* mappings during concurrent `tbd create`).
|
|
99827
|
+
*
|
|
99828
|
+
* IMPORTANT: `timeoutMs` must be greater than `staleMs` so stale locks from
|
|
99829
|
+
* crashed processes are always detected and broken before the timeout expires.
|
|
99830
|
+
*/
|
|
99831
|
+
const DEFAULT_TIMEOUT_MS = 1e4;
|
|
99832
|
+
const DEFAULT_POLL_MS = 50;
|
|
99833
|
+
const DEFAULT_STALE_MS = 5e3;
|
|
99834
|
+
/**
|
|
99835
|
+
* Error thrown when the lock cannot be acquired within the timeout.
|
|
99836
|
+
*/
|
|
99837
|
+
var LockAcquisitionError = class extends Error {
|
|
99838
|
+
constructor(lockPath, timeoutMs) {
|
|
99839
|
+
super(`Failed to acquire lock at ${lockPath} within ${timeoutMs}ms. Another process may be holding the lock. If this persists, delete the lock directory manually and retry.`);
|
|
99840
|
+
this.name = "LockAcquisitionError";
|
|
99841
|
+
}
|
|
99842
|
+
};
|
|
99843
|
+
/**
|
|
99844
|
+
* Execute `fn` while holding a lockfile.
|
|
99845
|
+
*
|
|
99846
|
+
* The lock is a directory at `lockPath` (typically `<target-file>.lock`).
|
|
99847
|
+
* Concurrent callers will wait up to `timeoutMs` for the lock, polling
|
|
99848
|
+
* every `pollMs`. Stale locks older than `staleMs` are broken automatically.
|
|
99849
|
+
*
|
|
99850
|
+
* If the lock cannot be acquired within the timeout, a LockAcquisitionError
|
|
99851
|
+
* is thrown. This ensures mutual exclusion is never silently bypassed, which
|
|
99852
|
+
* prevents data loss from concurrent writes.
|
|
99853
|
+
*
|
|
99854
|
+
* @param lockPath - Path to use as the lock directory (e.g., "/path/to/ids.yml.lock")
|
|
99855
|
+
* @param fn - Critical section to execute under the lock
|
|
99856
|
+
* @param options - Timing parameters for lock acquisition
|
|
99857
|
+
* @returns The return value of `fn`
|
|
99858
|
+
* @throws LockAcquisitionError if the lock cannot be acquired within the timeout
|
|
99859
|
+
*
|
|
99860
|
+
* @example
|
|
99861
|
+
* ```ts
|
|
99862
|
+
* await withLockfile('/path/to/ids.yml.lock', async () => {
|
|
99863
|
+
* const data = await readFile('/path/to/ids.yml', 'utf-8');
|
|
99864
|
+
* const updated = mergeEntries(data, newEntries);
|
|
99865
|
+
* await writeFile('/path/to/ids.yml', updated);
|
|
99866
|
+
* });
|
|
99867
|
+
* ```
|
|
99868
|
+
*/
|
|
99869
|
+
async function withLockfile(lockPath, fn, options) {
|
|
99870
|
+
const timeoutMs = options?.timeoutMs ?? DEFAULT_TIMEOUT_MS;
|
|
99871
|
+
const pollMs = options?.pollMs ?? DEFAULT_POLL_MS;
|
|
99872
|
+
const staleMs = options?.staleMs ?? DEFAULT_STALE_MS;
|
|
99873
|
+
const deadline = Date.now() + timeoutMs;
|
|
99874
|
+
let acquired = false;
|
|
99875
|
+
while (Date.now() < deadline) try {
|
|
99876
|
+
await mkdir(lockPath);
|
|
99877
|
+
acquired = true;
|
|
99878
|
+
break;
|
|
99879
|
+
} catch (error) {
|
|
99880
|
+
if (error.code !== "EEXIST") break;
|
|
99752
99881
|
try {
|
|
99753
|
-
|
|
99754
|
-
|
|
99755
|
-
|
|
99756
|
-
|
|
99882
|
+
const lockStat = await stat(lockPath);
|
|
99883
|
+
if (Date.now() - lockStat.mtimeMs > staleMs) {
|
|
99884
|
+
try {
|
|
99885
|
+
await rmdir(lockPath);
|
|
99886
|
+
} catch {}
|
|
99887
|
+
continue;
|
|
99888
|
+
}
|
|
99757
99889
|
} catch {
|
|
99758
|
-
|
|
99759
|
-
file,
|
|
99760
|
-
content: null
|
|
99761
|
-
};
|
|
99890
|
+
continue;
|
|
99762
99891
|
}
|
|
99763
|
-
|
|
99764
|
-
|
|
99765
|
-
|
|
99766
|
-
|
|
99892
|
+
await new Promise((resolve) => setTimeout(resolve, pollMs));
|
|
99893
|
+
}
|
|
99894
|
+
if (!acquired) throw new LockAcquisitionError(lockPath, timeoutMs);
|
|
99895
|
+
try {
|
|
99896
|
+
return await fn();
|
|
99897
|
+
} finally {
|
|
99767
99898
|
try {
|
|
99768
|
-
|
|
99769
|
-
|
|
99770
|
-
} catch (error) {
|
|
99771
|
-
console.warn(`Skipping invalid issue file: ${file}`, error);
|
|
99772
|
-
}
|
|
99899
|
+
await rmdir(lockPath);
|
|
99900
|
+
} catch {}
|
|
99773
99901
|
}
|
|
99774
|
-
return issues;
|
|
99775
99902
|
}
|
|
99776
99903
|
|
|
99777
99904
|
//#endregion
|
|
@@ -99918,15 +100045,54 @@ async function loadIdMapping(baseDir) {
|
|
|
99918
100045
|
};
|
|
99919
100046
|
}
|
|
99920
100047
|
/**
|
|
99921
|
-
* Save the ID mapping to disk.
|
|
100048
|
+
* Save the ID mapping to disk with mutual exclusion.
|
|
100049
|
+
*
|
|
100050
|
+
* Uses a lockfile to serialize concurrent writers, then performs read-merge-write
|
|
100051
|
+
* inside the lock. This prevents the lost-update problem when multiple `tbd create`
|
|
100052
|
+
* commands run in parallel.
|
|
100053
|
+
*
|
|
100054
|
+
* The merge is safe because ID mappings are append-only — entries are never
|
|
100055
|
+
* intentionally removed. If the lock cannot be acquired within the timeout,
|
|
100056
|
+
* a LockAcquisitionError is thrown rather than proceeding without protection.
|
|
99922
100057
|
*/
|
|
99923
100058
|
async function saveIdMapping(baseDir, mapping) {
|
|
99924
100059
|
const filePath = getMappingPath(baseDir);
|
|
99925
100060
|
await mkdir(dirname(filePath), { recursive: true });
|
|
99926
|
-
|
|
99927
|
-
|
|
99928
|
-
|
|
99929
|
-
|
|
100061
|
+
await withLockfile(filePath + ".lock", async () => {
|
|
100062
|
+
let merged = mapping;
|
|
100063
|
+
let onDiskSize = 0;
|
|
100064
|
+
try {
|
|
100065
|
+
const onDisk = await loadIdMappingRaw(filePath);
|
|
100066
|
+
onDiskSize = onDisk.shortToUlid.size;
|
|
100067
|
+
if (onDiskSize > 0) merged = mergeIdMappings(mapping, onDisk);
|
|
100068
|
+
} catch {}
|
|
100069
|
+
if (merged.shortToUlid.size < onDiskSize) throw new Error(`Refusing to save ID mapping: would lose ${onDiskSize - merged.shortToUlid.size} entries (on-disk: ${onDiskSize}, proposed: ${merged.shortToUlid.size}). ID mappings are append-only — this indicates a bug.`);
|
|
100070
|
+
const data = {};
|
|
100071
|
+
const sortedKeys = naturalSort(Array.from(merged.shortToUlid.keys()));
|
|
100072
|
+
for (const key of sortedKeys) data[key] = merged.shortToUlid.get(key);
|
|
100073
|
+
await writeFile(filePath, stringifyYaml(data));
|
|
100074
|
+
});
|
|
100075
|
+
}
|
|
100076
|
+
/**
|
|
100077
|
+
* Load an ID mapping directly from a file path (internal helper for save merging).
|
|
100078
|
+
* Separated from loadIdMapping to avoid coupling the save path to baseDir resolution.
|
|
100079
|
+
*/
|
|
100080
|
+
async function loadIdMappingRaw(filePath) {
|
|
100081
|
+
const { data: rawData } = parseYamlToleratingDuplicateKeys(await readFile(filePath, "utf-8"), filePath);
|
|
100082
|
+
const data = rawData ?? {};
|
|
100083
|
+
const parseResult = IdMappingYamlSchema.safeParse(data);
|
|
100084
|
+
if (!parseResult.success) throw new Error(`Invalid ID mapping format in ${filePath}: ${parseResult.error.message}`);
|
|
100085
|
+
const validData = parseResult.data;
|
|
100086
|
+
const shortToUlid = /* @__PURE__ */ new Map();
|
|
100087
|
+
const ulidToShort = /* @__PURE__ */ new Map();
|
|
100088
|
+
for (const [shortId, ulid] of Object.entries(validData)) {
|
|
100089
|
+
shortToUlid.set(shortId, ulid);
|
|
100090
|
+
ulidToShort.set(ulid, shortId);
|
|
100091
|
+
}
|
|
100092
|
+
return {
|
|
100093
|
+
shortToUlid,
|
|
100094
|
+
ulidToShort
|
|
100095
|
+
};
|
|
99930
100096
|
}
|
|
99931
100097
|
/**
|
|
99932
100098
|
* Calculate the optimal short ID length based on existing ID count.
|
|
@@ -104456,9 +104622,18 @@ var DoctorHandler = class extends BaseCommand {
|
|
|
104456
104622
|
healthChecks.push(await this.checkIdMappingDuplicates(options.fix));
|
|
104457
104623
|
healthChecks.push(await this.checkTempFiles(options.fix));
|
|
104458
104624
|
healthChecks.push(this.checkIssueValidity(this.issues));
|
|
104459
|
-
healthChecks.push(await this.checkMissingMappings(options.fix));
|
|
104460
104625
|
healthChecks.push(await this.checkWorktree(options.fix));
|
|
104461
|
-
|
|
104626
|
+
const dataLocationResult = await this.checkDataLocation(options.fix);
|
|
104627
|
+
healthChecks.push(dataLocationResult);
|
|
104628
|
+
if (dataLocationResult.status === "ok" && dataLocationResult.message?.includes("migrated")) {
|
|
104629
|
+
this.dataSyncDir = await resolveDataSyncDir(this.cwd);
|
|
104630
|
+
try {
|
|
104631
|
+
this.issues = await listIssues(this.dataSyncDir);
|
|
104632
|
+
} catch {}
|
|
104633
|
+
}
|
|
104634
|
+
const parsedMaxHistory = options.maxHistory ? parseInt(options.maxHistory, 10) : 50;
|
|
104635
|
+
const maxHistory = Number.isNaN(parsedMaxHistory) || parsedMaxHistory < 0 ? 50 : parsedMaxHistory;
|
|
104636
|
+
healthChecks.push(await this.checkMissingMappings(options.fix, maxHistory));
|
|
104462
104637
|
healthChecks.push(await this.checkLocalSyncBranch());
|
|
104463
104638
|
healthChecks.push(await this.checkRemoteSyncBranch());
|
|
104464
104639
|
healthChecks.push(await this.checkLocalVsRemoteData());
|
|
@@ -104810,7 +104985,7 @@ var DoctorHandler = class extends BaseCommand {
|
|
|
104810
104985
|
*
|
|
104811
104986
|
* With --fix, creates missing mappings automatically.
|
|
104812
104987
|
*/
|
|
104813
|
-
async checkMissingMappings(fix) {
|
|
104988
|
+
async checkMissingMappings(fix, maxHistory = 50) {
|
|
104814
104989
|
if (this.issues.length === 0) return {
|
|
104815
104990
|
name: "ID mapping coverage",
|
|
104816
104991
|
status: "ok"
|
|
@@ -104827,25 +105002,41 @@ var DoctorHandler = class extends BaseCommand {
|
|
|
104827
105002
|
status: "ok"
|
|
104828
105003
|
};
|
|
104829
105004
|
if (fix && !this.checkDryRun("Create missing ID mappings")) {
|
|
104830
|
-
const { parseIdMappingFromYaml } = await Promise.resolve().then(() => id_mapping_exports);
|
|
105005
|
+
const { parseIdMappingFromYaml, mergeIdMappings } = await Promise.resolve().then(() => id_mapping_exports);
|
|
104831
105006
|
let historicalMapping;
|
|
104832
105007
|
try {
|
|
104833
105008
|
const syncBranch = (await Promise.resolve().then(() => config_exports).then((m) => m.readConfig(this.cwd))).sync.branch;
|
|
104834
|
-
const
|
|
104835
|
-
if (
|
|
104836
|
-
|
|
104837
|
-
|
|
104838
|
-
|
|
105009
|
+
const logArgs = ["log", "--format=%H"];
|
|
105010
|
+
if (maxHistory > 0) logArgs.push(`-${maxHistory}`);
|
|
105011
|
+
logArgs.push(syncBranch, "--", `${DATA_SYNC_DIR}/mappings/ids.yml`);
|
|
105012
|
+
const commitHashes = (await git(...logArgs)).trim().split("\n").filter(Boolean);
|
|
105013
|
+
for (const commitHash of commitHashes) try {
|
|
105014
|
+
const idsContent = await git("show", `${commitHash}:${DATA_SYNC_DIR}/mappings/ids.yml`);
|
|
105015
|
+
if (idsContent) {
|
|
105016
|
+
const versionMapping = parseIdMappingFromYaml(idsContent);
|
|
105017
|
+
if (!historicalMapping) historicalMapping = versionMapping;
|
|
105018
|
+
else historicalMapping = mergeIdMappings(historicalMapping, versionMapping);
|
|
105019
|
+
}
|
|
105020
|
+
} catch {}
|
|
104839
105021
|
} catch {}
|
|
105022
|
+
const historicalCount = historicalMapping?.shortToUlid.size ?? 0;
|
|
104840
105023
|
const result = reconcileMappings(missingIds, mapping, historicalMapping);
|
|
104841
105024
|
await saveIdMapping(this.dataSyncDir, mapping);
|
|
104842
105025
|
const parts = [];
|
|
104843
105026
|
if (result.recovered.length > 0) parts.push(`recovered ${result.recovered.length} from git history`);
|
|
104844
105027
|
if (result.created.length > 0) parts.push(`created ${result.created.length} new`);
|
|
105028
|
+
const details = [
|
|
105029
|
+
`Scanned ${maxHistory > 0 ? `up to ${maxHistory}` : "all"} git commits for ids.yml history`,
|
|
105030
|
+
`Found ${historicalCount} historical mapping(s) to use for recovery`,
|
|
105031
|
+
`${missingIds.length} issue(s) were missing short ID mappings`
|
|
105032
|
+
];
|
|
105033
|
+
if (result.recovered.length > 0) details.push(`Recovered ${result.recovered.length} original short ID(s) from git history`);
|
|
105034
|
+
if (result.created.length > 0) details.push(`Generated ${result.created.length} new short ID(s) (originals not found in history)`);
|
|
104845
105035
|
return {
|
|
104846
105036
|
name: "ID mapping coverage",
|
|
104847
105037
|
status: "ok",
|
|
104848
|
-
message: parts.join(", ")
|
|
105038
|
+
message: parts.join(", "),
|
|
105039
|
+
details
|
|
104849
105040
|
};
|
|
104850
105041
|
}
|
|
104851
105042
|
return {
|
|
@@ -105004,13 +105195,19 @@ var DoctorHandler = class extends BaseCommand {
|
|
|
105004
105195
|
path: wrongIssuesPath,
|
|
105005
105196
|
details: ["Cannot migrate: worktree must be repaired first.", "The worktree repair should have run before this check."]
|
|
105006
105197
|
};
|
|
105007
|
-
const result = await migrateDataToWorktree(this.cwd);
|
|
105008
|
-
if (result.success)
|
|
105009
|
-
|
|
105010
|
-
|
|
105011
|
-
|
|
105012
|
-
|
|
105013
|
-
|
|
105198
|
+
const result = await migrateDataToWorktree(this.cwd, true);
|
|
105199
|
+
if (result.success) {
|
|
105200
|
+
const details = [];
|
|
105201
|
+
if (result.backupPath) details.push(`Backed up to ${result.backupPath}`);
|
|
105202
|
+
details.push(`Migrated ${result.migratedCount} file(s) from .tbd/data-sync/ to worktree`, "Source files removed after successful migration");
|
|
105203
|
+
return {
|
|
105204
|
+
name: "Data location",
|
|
105205
|
+
status: "ok",
|
|
105206
|
+
message: result.backupPath ? `migrated ${result.migratedCount} file(s), backed up to ${result.backupPath}` : `migrated ${result.migratedCount} file(s)`,
|
|
105207
|
+
path: wrongIssuesPath,
|
|
105208
|
+
details
|
|
105209
|
+
};
|
|
105210
|
+
}
|
|
105014
105211
|
return {
|
|
105015
105212
|
name: "Data location",
|
|
105016
105213
|
status: "error",
|
|
@@ -105207,15 +105404,13 @@ var DoctorHandler = class extends BaseCommand {
|
|
|
105207
105404
|
};
|
|
105208
105405
|
if (consistency.localAhead > 0) return {
|
|
105209
105406
|
name: "Sync consistency",
|
|
105210
|
-
status: "
|
|
105211
|
-
message: `${consistency.localAhead} commit(s)
|
|
105212
|
-
suggestion: "Run: tbd sync to push changes"
|
|
105407
|
+
status: "ok",
|
|
105408
|
+
message: `${consistency.localAhead} local commit(s) not yet pushed — run \`tbd sync\` to push`
|
|
105213
105409
|
};
|
|
105214
105410
|
if (consistency.localBehind > 0) return {
|
|
105215
105411
|
name: "Sync consistency",
|
|
105216
|
-
status: "
|
|
105217
|
-
message: `${consistency.localBehind} commit(s)
|
|
105218
|
-
suggestion: "Run: tbd sync to pull changes"
|
|
105412
|
+
status: "ok",
|
|
105413
|
+
message: `${consistency.localBehind} remote commit(s) not yet pulled — run \`tbd sync\` to pull`
|
|
105219
105414
|
};
|
|
105220
105415
|
return {
|
|
105221
105416
|
name: "Sync consistency",
|
|
@@ -105236,7 +105431,7 @@ var DoctorHandler = class extends BaseCommand {
|
|
|
105236
105431
|
}
|
|
105237
105432
|
}
|
|
105238
105433
|
};
|
|
105239
|
-
const doctorCommand = new Command("doctor").description("Diagnose and repair repository").option("--fix", "Attempt to fix issues").action(async (options, command) => {
|
|
105434
|
+
const doctorCommand = new Command("doctor").description("Diagnose and repair repository").option("--fix", "Attempt to fix issues").option("--max-history <n>", "Max git commits to scan for ID mapping recovery (0 = full history)", "50").action(async (options, command) => {
|
|
105240
105435
|
await new DoctorHandler(command).run(options);
|
|
105241
105436
|
});
|
|
105242
105437
|
|