@cleocode/cleo 2026.4.44 → 2026.4.46
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cli/index.js +124 -105
- package/dist/cli/index.js.map +3 -3
- package/package.json +8 -8
package/dist/cli/index.js
CHANGED
|
@@ -11467,16 +11467,27 @@ function reconcileJournal(nativeDb, migrationsFolder, existenceTable, logSubsyst
|
|
|
11467
11467
|
const localMigrations = readMigrationFiles({ migrationsFolder });
|
|
11468
11468
|
const localHashes = new Set(localMigrations.map((m2) => m2.hash));
|
|
11469
11469
|
const dbEntries = nativeDb.prepare('SELECT hash FROM "__drizzle_migrations"').all();
|
|
11470
|
-
const
|
|
11470
|
+
const orphanedEntries = dbEntries.filter((e) => !localHashes.has(e.hash));
|
|
11471
|
+
const hasOrphanedEntries = orphanedEntries.length > 0;
|
|
11471
11472
|
if (hasOrphanedEntries) {
|
|
11472
|
-
const
|
|
11473
|
-
|
|
11474
|
-
|
|
11475
|
-
|
|
11476
|
-
|
|
11477
|
-
|
|
11478
|
-
|
|
11479
|
-
|
|
11473
|
+
const dbHashes = new Set(dbEntries.map((e) => e.hash));
|
|
11474
|
+
const allLocalHashesPresentInDb = localMigrations.every((m2) => dbHashes.has(m2.hash));
|
|
11475
|
+
if (allLocalHashesPresentInDb) {
|
|
11476
|
+
const log13 = getLogger(logSubsystem);
|
|
11477
|
+
log13.debug(
|
|
11478
|
+
{ extra: orphanedEntries.length },
|
|
11479
|
+
`Migration journal has ${orphanedEntries.length} entries for migrations not known to this install (DB is ahead). Skipping reconciliation.`
|
|
11480
|
+
);
|
|
11481
|
+
} else {
|
|
11482
|
+
const log13 = getLogger(logSubsystem);
|
|
11483
|
+
log13.warn(
|
|
11484
|
+
{ orphaned: orphanedEntries.length },
|
|
11485
|
+
`Detected stale migration journal entries from a previous CLEO version. Reconciling.`
|
|
11486
|
+
);
|
|
11487
|
+
nativeDb.exec('DELETE FROM "__drizzle_migrations"');
|
|
11488
|
+
for (const m2 of localMigrations) {
|
|
11489
|
+
insertJournalEntry(nativeDb, m2.hash, m2.folderMillis, m2.name ?? "");
|
|
11490
|
+
}
|
|
11480
11491
|
}
|
|
11481
11492
|
}
|
|
11482
11493
|
}
|
|
@@ -11500,8 +11511,8 @@ function reconcileJournal(nativeDb, migrationsFolder, existenceTable, logSubsyst
|
|
|
11500
11511
|
return cols.some((c) => c.name === column);
|
|
11501
11512
|
});
|
|
11502
11513
|
if (allColumnsExist) {
|
|
11503
|
-
const
|
|
11504
|
-
|
|
11514
|
+
const log13 = getLogger(logSubsystem);
|
|
11515
|
+
log13.warn(
|
|
11505
11516
|
{ migration: migration.name, columns: alterMatches },
|
|
11506
11517
|
`Detected partially-applied migration ${migration.name} \u2014 columns exist but journal entry missing. Auto-reconciling.`
|
|
11507
11518
|
);
|
|
@@ -11519,8 +11530,8 @@ function reconcileJournal(nativeDb, migrationsFolder, existenceTable, logSubsyst
|
|
|
11519
11530
|
for (const entry of unnamedEntries) {
|
|
11520
11531
|
const migrationName = hashToName.get(entry.hash);
|
|
11521
11532
|
if (!migrationName) continue;
|
|
11522
|
-
const
|
|
11523
|
-
|
|
11533
|
+
const log13 = getLogger(logSubsystem);
|
|
11534
|
+
log13.warn(
|
|
11524
11535
|
{ id: entry.id, hash: entry.hash, name: migrationName },
|
|
11525
11536
|
`Backfilling missing name on journal entry id=${entry.id} \u2014 Drizzle v1 beta requires name for applied-migration detection.`
|
|
11526
11537
|
);
|
|
@@ -11564,8 +11575,8 @@ function ensureColumns(nativeDb, tableName, requiredColumns, logSubsystem) {
|
|
|
11564
11575
|
const existingCols = new Set(columns.map((c) => c.name));
|
|
11565
11576
|
for (const req of requiredColumns) {
|
|
11566
11577
|
if (!existingCols.has(req.name)) {
|
|
11567
|
-
const
|
|
11568
|
-
|
|
11578
|
+
const log13 = getLogger(logSubsystem);
|
|
11579
|
+
log13.warn(
|
|
11569
11580
|
{ column: req.name },
|
|
11570
11581
|
`Adding missing column ${tableName}.${req.name} via ALTER TABLE`
|
|
11571
11582
|
);
|
|
@@ -13963,7 +13974,7 @@ function getDbPath(cwd) {
|
|
|
13963
13974
|
return join10(getCleoDirAbsolute(cwd), DB_FILENAME2);
|
|
13964
13975
|
}
|
|
13965
13976
|
async function autoRecoverFromBackup(nativeDb, dbPath, cwd) {
|
|
13966
|
-
const
|
|
13977
|
+
const log13 = getLogger("sqlite");
|
|
13967
13978
|
try {
|
|
13968
13979
|
const countResult = nativeDb.prepare("SELECT COUNT(*) as cnt FROM tasks").get();
|
|
13969
13980
|
const taskCount = countResult?.cnt ?? 0;
|
|
@@ -13984,7 +13995,7 @@ async function autoRecoverFromBackup(nativeDb, dbPath, cwd) {
|
|
|
13984
13995
|
if (backupTaskCount < MIN_BACKUP_TASK_COUNT) {
|
|
13985
13996
|
return;
|
|
13986
13997
|
}
|
|
13987
|
-
|
|
13998
|
+
log13.warn(
|
|
13988
13999
|
{ dbPath, backupPath: newestBackup.path, backupTasks: backupTaskCount },
|
|
13989
14000
|
`Empty database detected with ${backupTaskCount}-task backup available. Auto-recovering from backup. This likely happened because git-tracked WAL/SHM files were overwritten during a branch switch (T5188).`
|
|
13990
14001
|
);
|
|
@@ -14002,7 +14013,7 @@ async function autoRecoverFromBackup(nativeDb, dbPath, cwd) {
|
|
|
14002
14013
|
const tempPath = dbPath + ".recovery-tmp";
|
|
14003
14014
|
copyFileSync4(newestBackup.path, tempPath);
|
|
14004
14015
|
renameSync(tempPath, dbPath);
|
|
14005
|
-
|
|
14016
|
+
log13.info(
|
|
14006
14017
|
{ dbPath, backupPath: newestBackup.path, restoredTasks: backupTaskCount },
|
|
14007
14018
|
"Database auto-recovered from backup successfully."
|
|
14008
14019
|
);
|
|
@@ -14012,7 +14023,7 @@ async function autoRecoverFromBackup(nativeDb, dbPath, cwd) {
|
|
|
14012
14023
|
runMigrations(restoredNativeDb, restoredDb);
|
|
14013
14024
|
_db = restoredDb;
|
|
14014
14025
|
} catch (err) {
|
|
14015
|
-
|
|
14026
|
+
log13.error({ err, dbPath }, "Auto-recovery from backup failed. Continuing with empty database.");
|
|
14016
14027
|
}
|
|
14017
14028
|
}
|
|
14018
14029
|
async function getDb(cwd) {
|
|
@@ -14046,7 +14057,7 @@ async function getDb(cwd) {
|
|
|
14046
14057
|
const { execFileSync: execFileSync19 } = await import("node:child_process");
|
|
14047
14058
|
const gitCwd = resolve3(dbPath, "..", "..");
|
|
14048
14059
|
const filesToCheck = [dbPath, dbPath + "-wal", dbPath + "-shm"];
|
|
14049
|
-
const
|
|
14060
|
+
const log13 = getLogger("sqlite");
|
|
14050
14061
|
for (const fileToCheck of filesToCheck) {
|
|
14051
14062
|
try {
|
|
14052
14063
|
execFileSync19("git", ["ls-files", "--error-unmatch", fileToCheck], {
|
|
@@ -14055,7 +14066,7 @@ async function getDb(cwd) {
|
|
|
14055
14066
|
});
|
|
14056
14067
|
const basename19 = fileToCheck.split(/[\\/]/).pop();
|
|
14057
14068
|
const relPath = fileToCheck.replace(gitCwd + sep, "");
|
|
14058
|
-
|
|
14069
|
+
log13.warn(
|
|
14059
14070
|
{ path: fileToCheck },
|
|
14060
14071
|
`${basename19} is tracked by project git \u2014 this risks data loss on branch switch. Resolution (ADR-013 \xA79): \`git rm --cached ${relPath}\` and rely on \`.cleo/backups/sqlite/\` snapshots + \`cleo backup add\` for recovery.`
|
|
14061
14072
|
);
|
|
@@ -15249,11 +15260,18 @@ var init_cross_db_cleanup = __esm({
|
|
|
15249
15260
|
|
|
15250
15261
|
// packages/core/src/store/db-helpers.ts
|
|
15251
15262
|
import { eq as eq4, inArray as inArray2 } from "drizzle-orm";
|
|
15252
|
-
async function upsertTask(db, row, archiveFields) {
|
|
15263
|
+
async function upsertTask(db, row, archiveFields, allowOrphanParent = false) {
|
|
15253
15264
|
if (row.parentId) {
|
|
15254
15265
|
const parent = await db.select({ id: tasks.id }).from(tasks).where(eq4(tasks.id, row.parentId)).limit(1).all();
|
|
15255
15266
|
if (parent.length === 0) {
|
|
15256
|
-
|
|
15267
|
+
if (allowOrphanParent) {
|
|
15268
|
+
row = { ...row, parentId: null };
|
|
15269
|
+
} else {
|
|
15270
|
+
log2.warn(
|
|
15271
|
+
{ taskId: row.id, parentId: row.parentId },
|
|
15272
|
+
"upsertTask: parentId references a non-existent task \u2014 parent relationship may be lost"
|
|
15273
|
+
);
|
|
15274
|
+
}
|
|
15257
15275
|
}
|
|
15258
15276
|
}
|
|
15259
15277
|
const values = archiveFields ? { ...row, ...archiveFields, status: "archived" } : row;
|
|
@@ -15397,10 +15415,13 @@ async function loadRelationsForTasks(db, tasks2) {
|
|
|
15397
15415
|
}
|
|
15398
15416
|
}
|
|
15399
15417
|
}
|
|
15418
|
+
var log2;
|
|
15400
15419
|
var init_db_helpers = __esm({
|
|
15401
15420
|
"packages/core/src/store/db-helpers.ts"() {
|
|
15402
15421
|
"use strict";
|
|
15422
|
+
init_logger();
|
|
15403
15423
|
init_tasks_schema();
|
|
15424
|
+
log2 = getLogger("db-helpers");
|
|
15404
15425
|
}
|
|
15405
15426
|
});
|
|
15406
15427
|
|
|
@@ -15734,7 +15755,7 @@ async function createSqliteDataAccessor(cwd) {
|
|
|
15734
15755
|
archiveReason: taskAny.archiveReason ?? "completed",
|
|
15735
15756
|
cycleTimeDays: taskAny.cycleTimeDays ?? null
|
|
15736
15757
|
};
|
|
15737
|
-
await upsertTask(db, row, archiveFields);
|
|
15758
|
+
await upsertTask(db, row, archiveFields, true);
|
|
15738
15759
|
depBatch.push({ taskId: task.id, deps: task.depends ?? [] });
|
|
15739
15760
|
}
|
|
15740
15761
|
await batchUpdateDependencies(db, depBatch, validDepIds);
|
|
@@ -16747,7 +16768,7 @@ async function ensureSequenceValid(cwd, options) {
|
|
|
16747
16768
|
if (!options?.validateSequence) return;
|
|
16748
16769
|
const check2 = await checkSequence(cwd);
|
|
16749
16770
|
if (!check2.valid) {
|
|
16750
|
-
|
|
16771
|
+
log3.warn({ counter: check2.counter, maxId: check2.maxIdInData }, "Sequence behind, repairing");
|
|
16751
16772
|
const repair = await repairSequence(cwd);
|
|
16752
16773
|
if (!repair.repaired && options.strict) {
|
|
16753
16774
|
throw new DataSafetyError(`Sequence repair failed: ${repair.message}`, "SEQUENCE_INVALID", {
|
|
@@ -16764,7 +16785,7 @@ async function checkpoint(context, cwd, options) {
|
|
|
16764
16785
|
stats.checkpoints++;
|
|
16765
16786
|
stats.lastCheckpoint = /* @__PURE__ */ new Date();
|
|
16766
16787
|
} catch (err) {
|
|
16767
|
-
|
|
16788
|
+
log3.warn({ err }, "Checkpoint failed (non-fatal)");
|
|
16768
16789
|
}
|
|
16769
16790
|
vacuumIntoBackup({ cwd }).catch(() => {
|
|
16770
16791
|
});
|
|
@@ -16826,7 +16847,7 @@ async function safeAppendLog(accessor, entry, cwd, options) {
|
|
|
16826
16847
|
stats.writes++;
|
|
16827
16848
|
await checkpoint("log entry", cwd, opts);
|
|
16828
16849
|
}
|
|
16829
|
-
var
|
|
16850
|
+
var log3, DataSafetyError, DEFAULT_SAFETY, stats;
|
|
16830
16851
|
var init_data_safety_central = __esm({
|
|
16831
16852
|
"packages/core/src/store/data-safety-central.ts"() {
|
|
16832
16853
|
"use strict";
|
|
@@ -16834,7 +16855,7 @@ var init_data_safety_central = __esm({
|
|
|
16834
16855
|
init_sequence();
|
|
16835
16856
|
init_git_checkpoint();
|
|
16836
16857
|
init_sqlite_backup();
|
|
16837
|
-
|
|
16858
|
+
log3 = getLogger("data-safety");
|
|
16838
16859
|
DataSafetyError = class extends Error {
|
|
16839
16860
|
constructor(message, code, context) {
|
|
16840
16861
|
super(message);
|
|
@@ -16874,7 +16895,7 @@ function isSafetyDisabled() {
|
|
|
16874
16895
|
}
|
|
16875
16896
|
function wrapWithSafety(accessor, cwd) {
|
|
16876
16897
|
if (isSafetyDisabled()) {
|
|
16877
|
-
|
|
16898
|
+
log4.warn(
|
|
16878
16899
|
"Safety disabled - emergency mode (CLEO_DISABLE_SAFETY=true). Data integrity checks bypassed."
|
|
16879
16900
|
);
|
|
16880
16901
|
return accessor;
|
|
@@ -16895,13 +16916,13 @@ function getSafetyStatus() {
|
|
|
16895
16916
|
enabled: true
|
|
16896
16917
|
};
|
|
16897
16918
|
}
|
|
16898
|
-
var
|
|
16919
|
+
var log4, SafetyDataAccessor;
|
|
16899
16920
|
var init_safety_data_accessor = __esm({
|
|
16900
16921
|
"packages/core/src/store/safety-data-accessor.ts"() {
|
|
16901
16922
|
"use strict";
|
|
16902
16923
|
init_logger();
|
|
16903
16924
|
init_data_safety_central();
|
|
16904
|
-
|
|
16925
|
+
log4 = getLogger("data-safety");
|
|
16905
16926
|
SafetyDataAccessor = class {
|
|
16906
16927
|
/** The underlying accessor being wrapped. */
|
|
16907
16928
|
inner;
|
|
@@ -16925,7 +16946,7 @@ var init_safety_data_accessor = __esm({
|
|
|
16925
16946
|
...config2
|
|
16926
16947
|
};
|
|
16927
16948
|
if (this.config.verbose) {
|
|
16928
|
-
|
|
16949
|
+
log4.debug({ engine: inner.engine }, "SafetyDataAccessor initialized");
|
|
16929
16950
|
}
|
|
16930
16951
|
}
|
|
16931
16952
|
/** The storage engine backing this accessor. */
|
|
@@ -16937,7 +16958,7 @@ var init_safety_data_accessor = __esm({
|
|
|
16937
16958
|
*/
|
|
16938
16959
|
logVerbose(message) {
|
|
16939
16960
|
if (this.config.verbose) {
|
|
16940
|
-
|
|
16961
|
+
log4.debug(message);
|
|
16941
16962
|
}
|
|
16942
16963
|
}
|
|
16943
16964
|
/**
|
|
@@ -24058,16 +24079,16 @@ async function queryAudit(options) {
|
|
|
24058
24079
|
error: row.errorMessage ?? void 0
|
|
24059
24080
|
}));
|
|
24060
24081
|
} catch (err) {
|
|
24061
|
-
|
|
24082
|
+
log5.warn({ err }, "Failed to query audit entries from SQLite");
|
|
24062
24083
|
return [];
|
|
24063
24084
|
}
|
|
24064
24085
|
}
|
|
24065
|
-
var
|
|
24086
|
+
var log5;
|
|
24066
24087
|
var init_audit = __esm({
|
|
24067
24088
|
"packages/core/src/audit.ts"() {
|
|
24068
24089
|
"use strict";
|
|
24069
24090
|
init_logger();
|
|
24070
|
-
|
|
24091
|
+
log5 = getLogger("audit");
|
|
24071
24092
|
}
|
|
24072
24093
|
});
|
|
24073
24094
|
|
|
@@ -57096,7 +57117,7 @@ async function recordEvidence(epicId, stage, uri, type, options) {
|
|
|
57096
57117
|
description: options?.description ?? null
|
|
57097
57118
|
}).run();
|
|
57098
57119
|
} catch (err) {
|
|
57099
|
-
|
|
57120
|
+
log6.warn({ err }, "Failed to write evidence to SQLite");
|
|
57100
57121
|
}
|
|
57101
57122
|
return record2;
|
|
57102
57123
|
}
|
|
@@ -57109,14 +57130,14 @@ async function linkProvenance(epicId, stage, filePath, cwd) {
|
|
|
57109
57130
|
cwd
|
|
57110
57131
|
});
|
|
57111
57132
|
}
|
|
57112
|
-
var
|
|
57133
|
+
var log6;
|
|
57113
57134
|
var init_evidence = __esm({
|
|
57114
57135
|
"packages/core/src/lifecycle/evidence.ts"() {
|
|
57115
57136
|
"use strict";
|
|
57116
57137
|
init_logger();
|
|
57117
57138
|
init_paths();
|
|
57118
57139
|
init_tasks_schema();
|
|
57119
|
-
|
|
57140
|
+
log6 = getLogger("lifecycle:evidence");
|
|
57120
57141
|
}
|
|
57121
57142
|
});
|
|
57122
57143
|
|
|
@@ -65570,7 +65591,7 @@ async function ensureExternalTaskLinksTable(cwd) {
|
|
|
65570
65591
|
)
|
|
65571
65592
|
);
|
|
65572
65593
|
} catch (err) {
|
|
65573
|
-
|
|
65594
|
+
log7.warn({ err }, "Failed to ensure external_task_links table exists");
|
|
65574
65595
|
throw err;
|
|
65575
65596
|
}
|
|
65576
65597
|
}
|
|
@@ -65638,14 +65659,14 @@ function rowToLink(row) {
|
|
|
65638
65659
|
lastSyncAt: row.lastSyncAt
|
|
65639
65660
|
};
|
|
65640
65661
|
}
|
|
65641
|
-
var
|
|
65662
|
+
var log7;
|
|
65642
65663
|
var init_link_store = __esm({
|
|
65643
65664
|
"packages/core/src/reconciliation/link-store.ts"() {
|
|
65644
65665
|
"use strict";
|
|
65645
65666
|
init_logger();
|
|
65646
65667
|
init_sqlite2();
|
|
65647
65668
|
init_tasks_schema();
|
|
65648
|
-
|
|
65669
|
+
log7 = getLogger("link-store");
|
|
65649
65670
|
}
|
|
65650
65671
|
});
|
|
65651
65672
|
|
|
@@ -65791,7 +65812,7 @@ async function executeTransferInternal(params) {
|
|
|
65791
65812
|
}
|
|
65792
65813
|
}
|
|
65793
65814
|
} catch (err) {
|
|
65794
|
-
|
|
65815
|
+
log8.warn(
|
|
65795
65816
|
{ err, linksCreated },
|
|
65796
65817
|
"Failed to create external_task_links during transfer \u2014 tasks were transferred successfully but provenance links could not be written"
|
|
65797
65818
|
);
|
|
@@ -65819,7 +65840,7 @@ async function executeTransferInternal(params) {
|
|
|
65819
65840
|
})
|
|
65820
65841
|
});
|
|
65821
65842
|
} catch (err) {
|
|
65822
|
-
|
|
65843
|
+
log8.warn({ err }, "nexus transfer audit write failed");
|
|
65823
65844
|
}
|
|
65824
65845
|
if (mode === "move") {
|
|
65825
65846
|
let archived = 0;
|
|
@@ -65832,7 +65853,7 @@ async function executeTransferInternal(params) {
|
|
|
65832
65853
|
});
|
|
65833
65854
|
archived++;
|
|
65834
65855
|
} catch (err) {
|
|
65835
|
-
|
|
65856
|
+
log8.warn({ err, taskId: entry.sourceId }, "failed to archive source task after transfer");
|
|
65836
65857
|
}
|
|
65837
65858
|
}
|
|
65838
65859
|
}
|
|
@@ -65870,14 +65891,14 @@ async function executeTransferInternal(params) {
|
|
|
65870
65891
|
}
|
|
65871
65892
|
}
|
|
65872
65893
|
} catch (err) {
|
|
65873
|
-
|
|
65894
|
+
log8.warn({ err }, "brain observation transfer failed");
|
|
65874
65895
|
}
|
|
65875
65896
|
result.brainObservationsTransferred = brainTransferred;
|
|
65876
65897
|
result.manifest.brainObservationsTransferred = brainTransferred;
|
|
65877
65898
|
}
|
|
65878
65899
|
return result;
|
|
65879
65900
|
}
|
|
65880
|
-
var
|
|
65901
|
+
var log8;
|
|
65881
65902
|
var init_transfer = __esm({
|
|
65882
65903
|
"packages/core/src/nexus/transfer.ts"() {
|
|
65883
65904
|
"use strict";
|
|
@@ -65890,7 +65911,7 @@ var init_transfer = __esm({
|
|
|
65890
65911
|
init_export2();
|
|
65891
65912
|
init_permissions();
|
|
65892
65913
|
init_registry3();
|
|
65893
|
-
|
|
65914
|
+
log8 = getLogger("nexus:transfer");
|
|
65894
65915
|
}
|
|
65895
65916
|
});
|
|
65896
65917
|
|
|
@@ -66171,9 +66192,7 @@ async function completeTask(options, cwd, accessor) {
|
|
|
66171
66192
|
const parent = await acc.loadSingleTask(task.parentId);
|
|
66172
66193
|
if (parent && parent.type === "epic" && !parent.noAutoComplete) {
|
|
66173
66194
|
const siblings = await acc.getChildren(parent.id);
|
|
66174
|
-
const allDone = siblings.every(
|
|
66175
|
-
(c) => c.id === task.id || c.status === "done" || c.status === "cancelled"
|
|
66176
|
-
);
|
|
66195
|
+
const allDone = siblings.length > 0 && siblings.every((c) => c.id === task.id || c.status === "done" || c.status === "cancelled");
|
|
66177
66196
|
if (allDone) {
|
|
66178
66197
|
parent.status = "done";
|
|
66179
66198
|
parent.completedAt = now2;
|
|
@@ -66832,8 +66851,8 @@ async function loadProjectACL(projectPath) {
|
|
|
66832
66851
|
async function logAclFailure(projectPath) {
|
|
66833
66852
|
try {
|
|
66834
66853
|
const { getLogger: getLogger2 } = await Promise.resolve().then(() => (init_logger(), logger_exports));
|
|
66835
|
-
const
|
|
66836
|
-
|
|
66854
|
+
const log13 = getLogger2("nexus.acl");
|
|
66855
|
+
log13.warn({ projectPath }, "Failed to load ACL configuration, defaulting to deny-all");
|
|
66837
66856
|
} catch {
|
|
66838
66857
|
}
|
|
66839
66858
|
}
|
|
@@ -66978,9 +66997,9 @@ async function executeOperation(operation, taskId, projectPath, accessor, direct
|
|
|
66978
66997
|
async function logRouteAudit(directive, projectName, taskId, operation, success2, error48) {
|
|
66979
66998
|
try {
|
|
66980
66999
|
const { getLogger: getLogger2 } = await Promise.resolve().then(() => (init_logger(), logger_exports));
|
|
66981
|
-
const
|
|
67000
|
+
const log13 = getLogger2("nexus.route");
|
|
66982
67001
|
const level = success2 ? "info" : "warn";
|
|
66983
|
-
|
|
67002
|
+
log13[level](
|
|
66984
67003
|
{
|
|
66985
67004
|
directive: directive.verb,
|
|
66986
67005
|
agentId: directive.agentId,
|
|
@@ -76073,7 +76092,7 @@ async function queryTasks(cwd, since) {
|
|
|
76073
76092
|
}).from(tasks2).where(conditions.length > 0 ? and14(...conditions) : void 0).all();
|
|
76074
76093
|
return rows;
|
|
76075
76094
|
} catch (err) {
|
|
76076
|
-
|
|
76095
|
+
log9.warn({ err }, "Failed to query tasks for workflow telemetry");
|
|
76077
76096
|
return [];
|
|
76078
76097
|
}
|
|
76079
76098
|
}
|
|
@@ -76107,7 +76126,7 @@ async function queryCompletionAuditRows(cwd, since) {
|
|
|
76107
76126
|
return isComplete;
|
|
76108
76127
|
});
|
|
76109
76128
|
} catch (err) {
|
|
76110
|
-
|
|
76129
|
+
log9.warn({ err }, "Failed to query audit log for workflow telemetry");
|
|
76111
76130
|
return [];
|
|
76112
76131
|
}
|
|
76113
76132
|
}
|
|
@@ -76332,12 +76351,12 @@ async function getWorkflowComplianceReport(opts) {
|
|
|
76332
76351
|
}
|
|
76333
76352
|
};
|
|
76334
76353
|
}
|
|
76335
|
-
var
|
|
76354
|
+
var log9;
|
|
76336
76355
|
var init_workflow_telemetry = __esm({
|
|
76337
76356
|
"packages/core/src/stats/workflow-telemetry.ts"() {
|
|
76338
76357
|
"use strict";
|
|
76339
76358
|
init_logger();
|
|
76340
|
-
|
|
76359
|
+
log9 = getLogger("workflow-telemetry");
|
|
76341
76360
|
}
|
|
76342
76361
|
});
|
|
76343
76362
|
|
|
@@ -77654,7 +77673,7 @@ import { createGzip } from "node:zlib";
|
|
|
77654
77673
|
async function pruneAuditLog(cleoDir, config2) {
|
|
77655
77674
|
try {
|
|
77656
77675
|
if (!config2.auditRetentionDays || config2.auditRetentionDays <= 0) {
|
|
77657
|
-
|
|
77676
|
+
log10.debug("auditRetentionDays is 0 or unset; skipping audit prune");
|
|
77658
77677
|
return { rowsArchived: 0, rowsDeleted: 0 };
|
|
77659
77678
|
}
|
|
77660
77679
|
const cutoff = new Date(Date.now() - config2.auditRetentionDays * 864e5).toISOString();
|
|
@@ -77665,7 +77684,7 @@ async function pruneAuditLog(cleoDir, config2) {
|
|
|
77665
77684
|
const db = await getDb4(projectRoot);
|
|
77666
77685
|
const oldRows = await db.select().from(auditLog2).where(lt4(auditLog2.timestamp, cutoff));
|
|
77667
77686
|
if (oldRows.length === 0) {
|
|
77668
|
-
|
|
77687
|
+
log10.debug("No audit_log rows older than cutoff; nothing to prune");
|
|
77669
77688
|
return { rowsArchived: 0, rowsDeleted: 0 };
|
|
77670
77689
|
}
|
|
77671
77690
|
let archivePath;
|
|
@@ -77683,17 +77702,17 @@ async function pruneAuditLog(cleoDir, config2) {
|
|
|
77683
77702
|
const inStream = Readable.from([jsonlContent]);
|
|
77684
77703
|
await pipeline(inStream, gzip, outStream);
|
|
77685
77704
|
rowsArchived = oldRows.length;
|
|
77686
|
-
|
|
77705
|
+
log10.info(
|
|
77687
77706
|
{ archivePath, rowsArchived },
|
|
77688
77707
|
`Archived ${rowsArchived} audit rows to ${archivePath}`
|
|
77689
77708
|
);
|
|
77690
77709
|
} catch (archiveErr) {
|
|
77691
|
-
|
|
77710
|
+
log10.warn({ err: archiveErr }, "Failed to archive audit rows; continuing with deletion");
|
|
77692
77711
|
archivePath = void 0;
|
|
77693
77712
|
}
|
|
77694
77713
|
}
|
|
77695
77714
|
await db.delete(auditLog2).where(lt4(auditLog2.timestamp, cutoff)).run();
|
|
77696
|
-
|
|
77715
|
+
log10.info(
|
|
77697
77716
|
{ rowsDeleted: oldRows.length, cutoff },
|
|
77698
77717
|
`Pruned ${oldRows.length} audit_log rows older than ${cutoff}`
|
|
77699
77718
|
);
|
|
@@ -77703,16 +77722,16 @@ async function pruneAuditLog(cleoDir, config2) {
|
|
|
77703
77722
|
archivePath
|
|
77704
77723
|
};
|
|
77705
77724
|
} catch (err) {
|
|
77706
|
-
|
|
77725
|
+
log10.warn({ err }, "audit log pruning failed");
|
|
77707
77726
|
return { rowsArchived: 0, rowsDeleted: 0 };
|
|
77708
77727
|
}
|
|
77709
77728
|
}
|
|
77710
|
-
var
|
|
77729
|
+
var log10;
|
|
77711
77730
|
var init_audit_prune = __esm({
|
|
77712
77731
|
"packages/core/src/audit-prune.ts"() {
|
|
77713
77732
|
"use strict";
|
|
77714
77733
|
init_logger();
|
|
77715
|
-
|
|
77734
|
+
log10 = getLogger("prune");
|
|
77716
77735
|
}
|
|
77717
77736
|
});
|
|
77718
77737
|
|
|
@@ -96741,7 +96760,7 @@ var init_backup_pack = __esm({
|
|
|
96741
96760
|
import fs5 from "node:fs";
|
|
96742
96761
|
import path5 from "node:path";
|
|
96743
96762
|
function detectAndRemoveLegacyGlobalFiles(cleoHomeOverride) {
|
|
96744
|
-
const
|
|
96763
|
+
const log13 = getLogger("cleanup-legacy");
|
|
96745
96764
|
const cleoHome = cleoHomeOverride ?? getCleoHome();
|
|
96746
96765
|
const removed = [];
|
|
96747
96766
|
const errors = [];
|
|
@@ -96751,30 +96770,30 @@ function detectAndRemoveLegacyGlobalFiles(cleoHomeOverride) {
|
|
|
96751
96770
|
if (fs5.existsSync(fullPath)) {
|
|
96752
96771
|
fs5.unlinkSync(fullPath);
|
|
96753
96772
|
removed.push(fileName);
|
|
96754
|
-
|
|
96773
|
+
log13.info({ file: fullPath }, "Removed legacy global file");
|
|
96755
96774
|
}
|
|
96756
96775
|
} catch (err) {
|
|
96757
96776
|
const message = err instanceof Error ? err.message : String(err);
|
|
96758
96777
|
errors.push({ file: fileName, error: message });
|
|
96759
|
-
|
|
96778
|
+
log13.warn({ file: fullPath, error: message }, "Failed to remove legacy global file");
|
|
96760
96779
|
}
|
|
96761
96780
|
}
|
|
96762
96781
|
return { removed, errors };
|
|
96763
96782
|
}
|
|
96764
96783
|
function detectAndRemoveStrayProjectNexus(projectRoot) {
|
|
96765
|
-
const
|
|
96784
|
+
const log13 = getLogger("cleanup-legacy");
|
|
96766
96785
|
const strayPath = path5.join(projectRoot, ".cleo", "nexus.db");
|
|
96767
96786
|
if (fs5.existsSync(strayPath)) {
|
|
96768
96787
|
try {
|
|
96769
96788
|
fs5.unlinkSync(strayPath);
|
|
96770
|
-
|
|
96789
|
+
log13.warn(
|
|
96771
96790
|
{ path: strayPath },
|
|
96772
96791
|
"Removed stray project-tier nexus.db (violates ADR-036 global-only contract)"
|
|
96773
96792
|
);
|
|
96774
96793
|
return { removed: true, path: strayPath };
|
|
96775
96794
|
} catch (err) {
|
|
96776
96795
|
const message = err instanceof Error ? err.message : String(err);
|
|
96777
|
-
|
|
96796
|
+
log13.warn(
|
|
96778
96797
|
{ path: strayPath, error: message },
|
|
96779
96798
|
"Failed to remove stray project-tier nexus.db \u2014 manual deletion may be required"
|
|
96780
96799
|
);
|
|
@@ -96845,7 +96864,7 @@ function brokenTimestamp() {
|
|
|
96845
96864
|
return `${now2.getFullYear()}${pad2(now2.getMonth() + 1)}${pad2(now2.getDate())}-${pad2(now2.getHours())}${pad2(now2.getMinutes())}${pad2(now2.getSeconds())}-${pad3(now2.getMilliseconds())}`;
|
|
96846
96865
|
}
|
|
96847
96866
|
function migrateSignaldockToConduit(projectRoot) {
|
|
96848
|
-
const
|
|
96867
|
+
const log13 = getLogger("migrate-signaldock-to-conduit");
|
|
96849
96868
|
const legacyPath = join113(projectRoot, ".cleo", "signaldock.db");
|
|
96850
96869
|
const conduitPath = join113(projectRoot, ".cleo", "conduit.db");
|
|
96851
96870
|
const globalSignaldockPath = join113(getCleoHome(), "signaldock.db");
|
|
@@ -96862,13 +96881,13 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96862
96881
|
if (!needsSignaldockToConduitMigration(projectRoot)) {
|
|
96863
96882
|
return result;
|
|
96864
96883
|
}
|
|
96865
|
-
|
|
96884
|
+
log13.info({ projectRoot, legacyPath }, "T310 migration: starting signaldock.db \u2192 conduit.db");
|
|
96866
96885
|
let legacy = null;
|
|
96867
96886
|
try {
|
|
96868
96887
|
legacy = new DatabaseSync7(legacyPath, { readOnly: true });
|
|
96869
96888
|
} catch (err) {
|
|
96870
96889
|
const message = err instanceof Error ? err.message : String(err);
|
|
96871
|
-
|
|
96890
|
+
log13.error({ legacyPath, error: message }, "T310 migration: cannot open legacy signaldock.db");
|
|
96872
96891
|
result.errors.push({ step: "step-2-open-legacy", error: message });
|
|
96873
96892
|
result.status = "failed";
|
|
96874
96893
|
return result;
|
|
@@ -96876,7 +96895,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96876
96895
|
try {
|
|
96877
96896
|
if (!integrityCheckPasses(legacy)) {
|
|
96878
96897
|
const msg = "Legacy signaldock.db failed PRAGMA integrity_check. Migration aborted \u2014 no changes written. Recovery: inspect the database with sqlite3 and attempt manual repair before re-running.";
|
|
96879
|
-
|
|
96898
|
+
log13.error({ legacyPath }, msg);
|
|
96880
96899
|
result.errors.push({ step: "step-3-legacy-integrity", error: msg });
|
|
96881
96900
|
result.status = "failed";
|
|
96882
96901
|
legacy.close();
|
|
@@ -96884,7 +96903,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96884
96903
|
}
|
|
96885
96904
|
} catch (err) {
|
|
96886
96905
|
const message = err instanceof Error ? err.message : String(err);
|
|
96887
|
-
|
|
96906
|
+
log13.error({ legacyPath, error: message }, "T310 migration: integrity_check threw on legacy DB");
|
|
96888
96907
|
result.errors.push({ step: "step-3-legacy-integrity", error: message });
|
|
96889
96908
|
result.status = "failed";
|
|
96890
96909
|
legacy.close();
|
|
@@ -96902,7 +96921,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96902
96921
|
getGlobalSalt();
|
|
96903
96922
|
} catch (err) {
|
|
96904
96923
|
const message = err instanceof Error ? err.message : String(err);
|
|
96905
|
-
|
|
96924
|
+
log13.error({ error: message }, "T310 migration: getGlobalSalt failed \u2014 migration aborted");
|
|
96906
96925
|
result.errors.push({ step: "step-6-global-salt", error: message });
|
|
96907
96926
|
result.status = "failed";
|
|
96908
96927
|
legacy.close();
|
|
@@ -96916,7 +96935,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96916
96935
|
conduit.exec("PRAGMA foreign_keys = OFF");
|
|
96917
96936
|
} catch (err) {
|
|
96918
96937
|
const message = err instanceof Error ? err.message : String(err);
|
|
96919
|
-
|
|
96938
|
+
log13.error({ conduitPath, error: message }, "T310 migration: failed to create conduit.db");
|
|
96920
96939
|
result.errors.push({ step: "step-7-create-conduit", error: message });
|
|
96921
96940
|
result.status = "failed";
|
|
96922
96941
|
legacy.close();
|
|
@@ -96962,7 +96981,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96962
96981
|
result.agentsCopied = agentsCountForConduit;
|
|
96963
96982
|
} catch (err) {
|
|
96964
96983
|
const message = err instanceof Error ? err.message : String(err);
|
|
96965
|
-
|
|
96984
|
+
log13.error({ error: message }, "T310 migration: conduit.db write failed \u2014 rolling back");
|
|
96966
96985
|
result.errors.push({ step: "step-8-conduit-write", error: message });
|
|
96967
96986
|
result.status = "failed";
|
|
96968
96987
|
try {
|
|
@@ -96983,7 +97002,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96983
97002
|
try {
|
|
96984
97003
|
if (!integrityCheckPasses(conduit)) {
|
|
96985
97004
|
const msg = "conduit.db failed PRAGMA integrity_check after write";
|
|
96986
|
-
|
|
97005
|
+
log13.error({ conduitPath }, msg);
|
|
96987
97006
|
result.errors.push({ step: "step-10-conduit-integrity", error: msg });
|
|
96988
97007
|
result.status = "failed";
|
|
96989
97008
|
conduit.close();
|
|
@@ -96998,7 +97017,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
96998
97017
|
}
|
|
96999
97018
|
} catch (err) {
|
|
97000
97019
|
const message = err instanceof Error ? err.message : String(err);
|
|
97001
|
-
|
|
97020
|
+
log13.error({ error: message }, "T310 migration: conduit.db integrity_check threw");
|
|
97002
97021
|
result.errors.push({ step: "step-10-conduit-integrity", error: message });
|
|
97003
97022
|
result.status = "failed";
|
|
97004
97023
|
if (conduit) {
|
|
@@ -97020,7 +97039,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
97020
97039
|
globalDb.exec("PRAGMA foreign_keys = OFF");
|
|
97021
97040
|
} catch (err) {
|
|
97022
97041
|
const message = err instanceof Error ? err.message : String(err);
|
|
97023
|
-
|
|
97042
|
+
log13.error(
|
|
97024
97043
|
{ globalSignaldockPath, error: message },
|
|
97025
97044
|
"T310 migration: cannot open global signaldock.db"
|
|
97026
97045
|
);
|
|
@@ -97059,7 +97078,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
97059
97078
|
result.agentsCopied = agentsCopiedToGlobal;
|
|
97060
97079
|
} catch (err) {
|
|
97061
97080
|
const message = err instanceof Error ? err.message : String(err);
|
|
97062
|
-
|
|
97081
|
+
log13.error(
|
|
97063
97082
|
{ error: message },
|
|
97064
97083
|
"T310 migration: global signaldock.db write failed \u2014 rolling back"
|
|
97065
97084
|
);
|
|
@@ -97077,7 +97096,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
97077
97096
|
try {
|
|
97078
97097
|
if (!integrityCheckPasses(globalDb)) {
|
|
97079
97098
|
const msg = "Global signaldock.db failed PRAGMA integrity_check after write";
|
|
97080
|
-
|
|
97099
|
+
log13.error({ globalSignaldockPath }, msg);
|
|
97081
97100
|
result.errors.push({ step: "step-14-global-integrity", error: msg });
|
|
97082
97101
|
result.status = "failed";
|
|
97083
97102
|
globalDb.close();
|
|
@@ -97092,7 +97111,7 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
97092
97111
|
}
|
|
97093
97112
|
} catch (err) {
|
|
97094
97113
|
const message = err instanceof Error ? err.message : String(err);
|
|
97095
|
-
|
|
97114
|
+
log13.error({ error: message }, "T310 migration: global signaldock.db integrity_check threw");
|
|
97096
97115
|
result.errors.push({ step: "step-14-global-integrity", error: message });
|
|
97097
97116
|
result.status = "failed";
|
|
97098
97117
|
if (globalDb) {
|
|
@@ -97111,21 +97130,21 @@ function migrateSignaldockToConduit(projectRoot) {
|
|
|
97111
97130
|
result.bakPath = bakPath;
|
|
97112
97131
|
} catch (err) {
|
|
97113
97132
|
const message = err instanceof Error ? err.message : String(err);
|
|
97114
|
-
|
|
97133
|
+
log13.error(
|
|
97115
97134
|
{ legacyPath, bakPath, error: message },
|
|
97116
97135
|
"T310 migration: rename to .pre-t310.bak failed \u2014 legacy file left in place (harmless)"
|
|
97117
97136
|
);
|
|
97118
97137
|
result.errors.push({ step: "step-16-rename-bak", error: message });
|
|
97119
97138
|
}
|
|
97120
|
-
|
|
97139
|
+
log13.info(
|
|
97121
97140
|
{ projectRoot, agentsCopied: result.agentsCopied, conduitPath, bakPath: result.bakPath },
|
|
97122
97141
|
`T310 migration complete: ${result.agentsCopied} agents migrated to global, conduit.db created`
|
|
97123
97142
|
);
|
|
97124
|
-
|
|
97143
|
+
log13.warn(
|
|
97125
97144
|
{},
|
|
97126
97145
|
"T310 migration: API keys have been re-keyed. External systems holding old API keys (CI env vars, remote agent configs) must be updated."
|
|
97127
97146
|
);
|
|
97128
|
-
|
|
97147
|
+
log13.info(
|
|
97129
97148
|
{ legacyPath, bakPath: result.bakPath, conduitPath },
|
|
97130
97149
|
"T310 migration recovery: if problems occur, rename .pre-t310.bak to signaldock.db and delete conduit.db to re-run migration."
|
|
97131
97150
|
);
|
|
@@ -97359,7 +97378,7 @@ async function validateAndRepairSequence(cwd, config2 = {}) {
|
|
|
97359
97378
|
}
|
|
97360
97379
|
const repair = await repairSequence(cwd);
|
|
97361
97380
|
if (repair.repaired) {
|
|
97362
|
-
|
|
97381
|
+
log11.warn(
|
|
97363
97382
|
{ oldCounter: repair.oldCounter, newCounter: repair.newCounter },
|
|
97364
97383
|
"Sequence repaired"
|
|
97365
97384
|
);
|
|
@@ -97388,7 +97407,7 @@ async function triggerCheckpoint(context, cwd, config2 = {}) {
|
|
|
97388
97407
|
try {
|
|
97389
97408
|
await gitCheckpoint("auto", context, cwd);
|
|
97390
97409
|
} catch (err) {
|
|
97391
|
-
|
|
97410
|
+
log11.warn({ err }, "Checkpoint failed (non-fatal)");
|
|
97392
97411
|
}
|
|
97393
97412
|
vacuumIntoBackup({ cwd }).catch(() => {
|
|
97394
97413
|
});
|
|
@@ -97426,16 +97445,16 @@ async function safeDeleteTask(deleteFn, taskId, cwd, config2 = {}) {
|
|
|
97426
97445
|
return result;
|
|
97427
97446
|
}
|
|
97428
97447
|
async function forceCheckpointBeforeOperation(operation, cwd) {
|
|
97429
|
-
|
|
97448
|
+
log11.info({ operation }, "Forcing checkpoint before operation");
|
|
97430
97449
|
try {
|
|
97431
97450
|
await gitCheckpoint("manual", `pre-${operation}`, cwd);
|
|
97432
97451
|
} catch (err) {
|
|
97433
|
-
|
|
97452
|
+
log11.error({ err }, "Failed to create pre-operation checkpoint");
|
|
97434
97453
|
}
|
|
97435
97454
|
vacuumIntoBackup({ cwd, force: true }).catch(() => {
|
|
97436
97455
|
});
|
|
97437
97456
|
}
|
|
97438
|
-
var
|
|
97457
|
+
var log11, DEFAULT_CONFIG2, SafetyError;
|
|
97439
97458
|
var init_data_safety = __esm({
|
|
97440
97459
|
"packages/core/src/store/data-safety.ts"() {
|
|
97441
97460
|
"use strict";
|
|
@@ -97445,7 +97464,7 @@ var init_data_safety = __esm({
|
|
|
97445
97464
|
init_sqlite2();
|
|
97446
97465
|
init_sqlite_backup();
|
|
97447
97466
|
init_tasks_schema();
|
|
97448
|
-
|
|
97467
|
+
log11 = getLogger("data-safety");
|
|
97449
97468
|
DEFAULT_CONFIG2 = {
|
|
97450
97469
|
verifyWrites: true,
|
|
97451
97470
|
detectCollisions: true,
|
|
@@ -107319,11 +107338,11 @@ var require_core = __commonJS({
|
|
|
107319
107338
|
Ajv4.ValidationError = validation_error_1.default;
|
|
107320
107339
|
Ajv4.MissingRefError = ref_error_1.default;
|
|
107321
107340
|
exports.default = Ajv4;
|
|
107322
|
-
function checkOptions(checkOpts, options, msg,
|
|
107341
|
+
function checkOptions(checkOpts, options, msg, log13 = "error") {
|
|
107323
107342
|
for (const key in checkOpts) {
|
|
107324
107343
|
const opt = key;
|
|
107325
107344
|
if (opt in options)
|
|
107326
|
-
this.logger[
|
|
107345
|
+
this.logger[log13](`${msg}: option ${key}. ${checkOpts[opt]}`);
|
|
107327
107346
|
}
|
|
107328
107347
|
}
|
|
107329
107348
|
function getSchEnv(keyRef) {
|
|
@@ -130123,7 +130142,7 @@ async function writeToSqlite(entry, requestId) {
|
|
|
130123
130142
|
};
|
|
130124
130143
|
const parsed = AuditLogInsertSchema2.safeParse(payload);
|
|
130125
130144
|
if (!parsed.success) {
|
|
130126
|
-
|
|
130145
|
+
log12.warn(
|
|
130127
130146
|
{ issues: parsed.error.issues },
|
|
130128
130147
|
"Audit payload failed Zod validation; skipping insert"
|
|
130129
130148
|
);
|
|
@@ -130132,7 +130151,7 @@ async function writeToSqlite(entry, requestId) {
|
|
|
130132
130151
|
const db = await getDb4(process.cwd());
|
|
130133
130152
|
await db.insert(auditLog2).values(parsed.data).run();
|
|
130134
130153
|
} catch (err) {
|
|
130135
|
-
|
|
130154
|
+
log12.warn({ err }, "Failed to write audit entry to SQLite");
|
|
130136
130155
|
}
|
|
130137
130156
|
}
|
|
130138
130157
|
function createAudit() {
|
|
@@ -130163,7 +130182,7 @@ function createAudit() {
|
|
|
130163
130182
|
},
|
|
130164
130183
|
error: response.error?.message
|
|
130165
130184
|
};
|
|
130166
|
-
|
|
130185
|
+
log12.info(
|
|
130167
130186
|
{
|
|
130168
130187
|
domain: entry.domain,
|
|
130169
130188
|
operation: entry.operation,
|
|
@@ -130180,20 +130199,20 @@ function createAudit() {
|
|
|
130180
130199
|
await writeToSqlite(entry, req.requestId);
|
|
130181
130200
|
} else {
|
|
130182
130201
|
writeToSqlite(entry, req.requestId).catch((err) => {
|
|
130183
|
-
|
|
130202
|
+
log12.error({ err }, "Failed to persist audit entry to SQLite");
|
|
130184
130203
|
});
|
|
130185
130204
|
}
|
|
130186
130205
|
return response;
|
|
130187
130206
|
};
|
|
130188
130207
|
}
|
|
130189
|
-
var
|
|
130208
|
+
var log12, cachedProjectHash;
|
|
130190
130209
|
var init_audit3 = __esm({
|
|
130191
130210
|
"packages/cleo/src/dispatch/middleware/audit.ts"() {
|
|
130192
130211
|
"use strict";
|
|
130193
130212
|
init_internal();
|
|
130194
130213
|
init_config3();
|
|
130195
130214
|
init_internal();
|
|
130196
|
-
|
|
130215
|
+
log12 = getLogger("audit");
|
|
130197
130216
|
}
|
|
130198
130217
|
});
|
|
130199
130218
|
|