@voybio/ace-swarm 0.1.0 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +69 -29
- package/assets/agent-state/EVIDENCE_LOG.md +1 -1
- package/assets/agent-state/STATUS.md +2 -2
- package/assets/scripts/ace-hook-dispatch.mjs +1 -1
- package/dist/ace-autonomy.js +38 -1
- package/dist/ace-context.js +8 -0
- package/dist/ace-server-instructions.js +55 -19
- package/dist/ace-state-resolver.d.ts +18 -0
- package/dist/ace-state-resolver.js +106 -0
- package/dist/cli.js +74 -7
- package/dist/handoff-registry.js +11 -7
- package/dist/helpers.js +75 -9
- package/dist/job-scheduler.js +94 -44
- package/dist/run-ledger.js +3 -4
- package/dist/server.d.ts +1 -1
- package/dist/server.js +1 -1
- package/dist/shared.d.ts +1 -1
- package/dist/status-events.js +12 -14
- package/dist/store/ace-packed-store.d.ts +65 -26
- package/dist/store/ace-packed-store.js +448 -261
- package/dist/store/bootstrap-store.d.ts +1 -1
- package/dist/store/bootstrap-store.js +24 -13
- package/dist/store/catalog-builder.js +3 -3
- package/dist/store/importer.d.ts +2 -2
- package/dist/store/importer.js +2 -2
- package/dist/store/materializers/context-snapshot-materializer.d.ts +10 -0
- package/dist/store/materializers/context-snapshot-materializer.js +51 -0
- package/dist/store/materializers/hook-context-materializer.d.ts +1 -1
- package/dist/store/materializers/hook-context-materializer.js +1 -1
- package/dist/store/materializers/host-file-materializer.d.ts +6 -0
- package/dist/store/materializers/host-file-materializer.js +14 -1
- package/dist/store/materializers/projection-manager.d.ts +14 -0
- package/dist/store/materializers/projection-manager.js +73 -0
- package/dist/store/materializers/scheduler-projection-materializer.d.ts +16 -0
- package/dist/store/materializers/scheduler-projection-materializer.js +48 -0
- package/dist/store/repositories/context-snapshot-repository.d.ts +46 -0
- package/dist/store/repositories/context-snapshot-repository.js +105 -0
- package/dist/store/repositories/local-model-runtime-repository.d.ts +98 -0
- package/dist/store/repositories/local-model-runtime-repository.js +165 -0
- package/dist/store/repositories/scheduler-repository.d.ts +21 -39
- package/dist/store/repositories/scheduler-repository.js +123 -93
- package/dist/store/repositories/todo-repository.d.ts +4 -0
- package/dist/store/repositories/todo-repository.js +50 -0
- package/dist/store/skills-install.d.ts +1 -1
- package/dist/store/skills-install.js +3 -3
- package/dist/store/state-reader.d.ts +8 -1
- package/dist/store/state-reader.js +19 -13
- package/dist/store/store-artifacts.js +105 -41
- package/dist/store/store-authority-audit.d.ts +30 -0
- package/dist/store/store-authority-audit.js +448 -0
- package/dist/store/store-snapshot.js +3 -3
- package/dist/store/types.d.ts +6 -2
- package/dist/store/types.js +5 -2
- package/dist/todo-state.js +179 -11
- package/dist/tools-files.js +2 -1
- package/dist/tools-framework.js +62 -2
- package/dist/tools-memory.js +69 -34
- package/dist/tools-todo.js +1 -1
- package/dist/tui/agent-worker.d.ts +1 -1
- package/dist/tui/agent-worker.js +5 -3
- package/dist/tui/chat.d.ts +19 -0
- package/dist/tui/chat.js +275 -9
- package/dist/tui/commands.d.ts +2 -0
- package/dist/tui/commands.js +62 -0
- package/dist/tui/dashboard.d.ts +6 -1
- package/dist/tui/dashboard.js +44 -3
- package/dist/tui/index.d.ts +5 -0
- package/dist/tui/index.js +154 -0
- package/dist/tui/input.js +5 -0
- package/dist/tui/layout.d.ts +24 -0
- package/dist/tui/layout.js +76 -2
- package/dist/tui/local-model-contract.d.ts +50 -0
- package/dist/tui/local-model-contract.js +272 -0
- package/dist/vericify-bridge.js +3 -4
- package/dist/vericify-context.js +18 -6
- package/package.json +4 -6
package/dist/job-scheduler.js
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
1
|
import { randomUUID } from "node:crypto";
|
|
2
2
|
import { readHandoffRegistry } from "./handoff-registry.js";
|
|
3
|
-
import { safeRead, safeWrite, withFileLock, wsPath } from "./helpers.js";
|
|
3
|
+
import { resolveWorkspaceRoot, safeRead, safeWrite, withFileLock, wsPath } from "./helpers.js";
|
|
4
4
|
import { isReadError } from "./shared.js";
|
|
5
5
|
import { appendStatusEventSafe, readStatusEvents } from "./status-events.js";
|
|
6
6
|
import { appendRunLedgerEntrySafe } from "./run-ledger.js";
|
|
7
7
|
import { readTodoState } from "./todo-state.js";
|
|
8
|
+
import { openStore } from "./store/ace-packed-store.js";
|
|
9
|
+
import { ProjectionManager } from "./store/materializers/projection-manager.js";
|
|
10
|
+
import { SchedulerRepository } from "./store/repositories/scheduler-repository.js";
|
|
11
|
+
import { getWorkspaceStorePath, readStoreJsonSync, storeExistsSync, } from "./store/store-snapshot.js";
|
|
12
|
+
import { withStoreWriteQueue } from "./store/write-queue.js";
|
|
8
13
|
// ── Buffered transition-event infrastructure ────────────────────────
|
|
9
14
|
// Events are collected during the scheduler lock and flushed after
|
|
10
15
|
// the lock is released. Emission is failure-tolerant: a broken
|
|
@@ -270,33 +275,63 @@ export function getSchedulerLeasePath() {
|
|
|
270
275
|
return wsPath(SCHEDULER_LEASE_REL);
|
|
271
276
|
}
|
|
272
277
|
export function readJobQueue() {
|
|
278
|
+
const root = resolveWorkspaceRoot();
|
|
279
|
+
if (storeExistsSync(root)) {
|
|
280
|
+
const stored = readStoreJsonSync(root, "state/scheduler/queue");
|
|
281
|
+
if (stored)
|
|
282
|
+
return stored;
|
|
283
|
+
}
|
|
273
284
|
const raw = safeRead(JOB_QUEUE_REL);
|
|
274
285
|
if (isReadError(raw))
|
|
275
286
|
return defaultQueueFile();
|
|
276
287
|
return parseQueueFile(raw) ?? defaultQueueFile();
|
|
277
288
|
}
|
|
278
289
|
export function readJobLockTable() {
|
|
290
|
+
const root = resolveWorkspaceRoot();
|
|
291
|
+
if (storeExistsSync(root)) {
|
|
292
|
+
const stored = readStoreJsonSync(root, "state/scheduler/locks");
|
|
293
|
+
if (stored)
|
|
294
|
+
return stored;
|
|
295
|
+
}
|
|
279
296
|
const raw = safeRead(JOB_LOCK_TABLE_REL);
|
|
280
297
|
if (isReadError(raw))
|
|
281
298
|
return defaultJobLockFile();
|
|
282
299
|
return parseJobLockFile(raw) ?? defaultJobLockFile();
|
|
283
300
|
}
|
|
284
301
|
export function readSchedulerLease() {
|
|
302
|
+
const root = resolveWorkspaceRoot();
|
|
303
|
+
if (storeExistsSync(root)) {
|
|
304
|
+
const stored = readStoreJsonSync(root, "state/scheduler/lease");
|
|
305
|
+
if (stored)
|
|
306
|
+
return stored ?? undefined;
|
|
307
|
+
}
|
|
285
308
|
const raw = safeRead(SCHEDULER_LEASE_REL);
|
|
286
309
|
if (isReadError(raw))
|
|
287
310
|
return undefined;
|
|
288
311
|
return parseLease(raw);
|
|
289
312
|
}
|
|
290
|
-
function
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
}
|
|
298
|
-
|
|
299
|
-
|
|
313
|
+
async function persistSchedulerState(input) {
|
|
314
|
+
const root = resolveWorkspaceRoot();
|
|
315
|
+
if (!storeExistsSync(root)) {
|
|
316
|
+
safeWrite(JOB_QUEUE_REL, JSON.stringify({ ...input.queue, updated_at: nowIso() }, null, 2));
|
|
317
|
+
safeWrite(JOB_LOCK_TABLE_REL, JSON.stringify({ ...input.locks, updated_at: nowIso() }, null, 2));
|
|
318
|
+
safeWrite(SCHEDULER_LEASE_REL, JSON.stringify(input.lease ?? null, null, 2));
|
|
319
|
+
return;
|
|
320
|
+
}
|
|
321
|
+
const storePath = getWorkspaceStorePath(root);
|
|
322
|
+
await withStoreWriteQueue(storePath, async () => {
|
|
323
|
+
const store = await openStore(storePath);
|
|
324
|
+
try {
|
|
325
|
+
const repo = new SchedulerRepository(store);
|
|
326
|
+
await repo.replaceState(input);
|
|
327
|
+
await store.commit();
|
|
328
|
+
const projections = new ProjectionManager(store, root);
|
|
329
|
+
await projections.projectAfterCommit(["scheduler"]);
|
|
330
|
+
}
|
|
331
|
+
finally {
|
|
332
|
+
await store.close();
|
|
333
|
+
}
|
|
334
|
+
});
|
|
300
335
|
}
|
|
301
336
|
function compareJobs(a, b) {
|
|
302
337
|
const priorityCmp = PRIORITY_WEIGHT[a.priority] - PRIORITY_WEIGHT[b.priority];
|
|
@@ -480,8 +515,7 @@ function defaultLeaseTtlSeconds() {
|
|
|
480
515
|
function hasActiveLockForJob(locks, jobId) {
|
|
481
516
|
return locks.some((lock) => lock.job_id === jobId && isJobLockActive(lock));
|
|
482
517
|
}
|
|
483
|
-
function acquireLease(owner, ttlSeconds, now) {
|
|
484
|
-
const existing = readSchedulerLease();
|
|
518
|
+
function acquireLease(existing, owner, ttlSeconds, now) {
|
|
485
519
|
const expiry = new Date(now.getTime() + ttlSeconds * 1000);
|
|
486
520
|
if (!existing) {
|
|
487
521
|
const lease = {
|
|
@@ -492,7 +526,6 @@ function acquireLease(owner, ttlSeconds, now) {
|
|
|
492
526
|
heartbeat_at: nowIso(now),
|
|
493
527
|
expires_at: nowIso(expiry),
|
|
494
528
|
};
|
|
495
|
-
writeLease(lease);
|
|
496
529
|
return { acquired: true, lease, recovery_mode: false };
|
|
497
530
|
}
|
|
498
531
|
const existingExpiry = parseDate(existing.expires_at);
|
|
@@ -506,7 +539,6 @@ function acquireLease(owner, ttlSeconds, now) {
|
|
|
506
539
|
heartbeat_at: nowIso(now),
|
|
507
540
|
expires_at: nowIso(expiry),
|
|
508
541
|
};
|
|
509
|
-
writeLease(lease);
|
|
510
542
|
return { acquired: true, lease, recovery_mode: expired };
|
|
511
543
|
}
|
|
512
544
|
return { acquired: false, lease: existing, recovery_mode: false };
|
|
@@ -522,7 +554,7 @@ function hasActiveLeaseForOwner(lease, owner, now) {
|
|
|
522
554
|
return expires.getTime() > now.getTime();
|
|
523
555
|
}
|
|
524
556
|
function ensureLeaseForOwner(owner, now, ttlSeconds = defaultLeaseTtlSeconds()) {
|
|
525
|
-
const leaseResult = acquireLease(owner, ttlSeconds, now);
|
|
557
|
+
const leaseResult = acquireLease(readSchedulerLease(), owner, ttlSeconds, now);
|
|
526
558
|
if (!leaseResult.acquired) {
|
|
527
559
|
return leaseResult;
|
|
528
560
|
}
|
|
@@ -532,7 +564,6 @@ function ensureLeaseForOwner(owner, now, ttlSeconds = defaultLeaseTtlSeconds())
|
|
|
532
564
|
heartbeat_at: nowIso(now),
|
|
533
565
|
expires_at: nowIso(new Date(now.getTime() + ttlSeconds * 1000)),
|
|
534
566
|
};
|
|
535
|
-
writeLease(refreshedLease);
|
|
536
567
|
return {
|
|
537
568
|
...leaseResult,
|
|
538
569
|
lease: refreshedLease,
|
|
@@ -657,7 +688,7 @@ function reconcileRecoveryState(queue, table, now, recoveryMode, buf) {
|
|
|
657
688
|
}
|
|
658
689
|
export async function enqueueJob(input) {
|
|
659
690
|
const buf = newEventBuffer();
|
|
660
|
-
const result = await withFileLock(SCHEDULER_LOCK_REL, () => {
|
|
691
|
+
const result = await withFileLock(SCHEDULER_LOCK_REL, async () => {
|
|
661
692
|
if (!input.kind || input.kind.trim().length === 0) {
|
|
662
693
|
const queue = readJobQueue();
|
|
663
694
|
return {
|
|
@@ -715,7 +746,11 @@ export async function enqueueJob(input) {
|
|
|
715
746
|
accepted_at: created,
|
|
716
747
|
};
|
|
717
748
|
queue.jobs.push(job);
|
|
718
|
-
|
|
749
|
+
await persistSchedulerState({
|
|
750
|
+
queue,
|
|
751
|
+
locks: readJobLockTable(),
|
|
752
|
+
lease: readSchedulerLease(),
|
|
753
|
+
});
|
|
719
754
|
buf.status.push({
|
|
720
755
|
event_type: "SCHEDULER_JOB_ENQUEUED",
|
|
721
756
|
status: "started",
|
|
@@ -741,7 +776,7 @@ export async function enqueueJob(input) {
|
|
|
741
776
|
});
|
|
742
777
|
return {
|
|
743
778
|
ok: true,
|
|
744
|
-
path,
|
|
779
|
+
path: getJobQueuePath(),
|
|
745
780
|
queue,
|
|
746
781
|
ack: {
|
|
747
782
|
job_id: job.job_id,
|
|
@@ -754,7 +789,7 @@ export async function enqueueJob(input) {
|
|
|
754
789
|
}
|
|
755
790
|
export async function acknowledgeJob(input) {
|
|
756
791
|
const buf = newEventBuffer();
|
|
757
|
-
const result = await withFileLock(SCHEDULER_LOCK_REL, () => {
|
|
792
|
+
const result = await withFileLock(SCHEDULER_LOCK_REL, async () => {
|
|
758
793
|
const queue = readJobQueue();
|
|
759
794
|
const job = queue.jobs.find((row) => row.job_id === input.job_id);
|
|
760
795
|
if (!job) {
|
|
@@ -794,7 +829,11 @@ export async function acknowledgeJob(input) {
|
|
|
794
829
|
}
|
|
795
830
|
}
|
|
796
831
|
}
|
|
797
|
-
|
|
832
|
+
await persistSchedulerState({
|
|
833
|
+
queue,
|
|
834
|
+
locks: readJobLockTable(),
|
|
835
|
+
lease: readSchedulerLease(),
|
|
836
|
+
});
|
|
798
837
|
buf.status.push({
|
|
799
838
|
event_type: "SCHEDULER_JOB_ACK",
|
|
800
839
|
status: job.status === "canceled" ? "done" : "pass",
|
|
@@ -821,7 +860,7 @@ export async function acknowledgeJob(input) {
|
|
|
821
860
|
});
|
|
822
861
|
return {
|
|
823
862
|
ok: true,
|
|
824
|
-
path,
|
|
863
|
+
path: getJobQueuePath(),
|
|
825
864
|
queue,
|
|
826
865
|
from,
|
|
827
866
|
to: job.status,
|
|
@@ -832,14 +871,14 @@ export async function acknowledgeJob(input) {
|
|
|
832
871
|
}
|
|
833
872
|
export async function dispatchJobs(input = {}) {
|
|
834
873
|
const buf = newEventBuffer();
|
|
835
|
-
const result = await withFileLock(SCHEDULER_LOCK_REL, () => {
|
|
874
|
+
const result = await withFileLock(SCHEDULER_LOCK_REL, async () => {
|
|
836
875
|
const owner = deriveOwner(input.owner);
|
|
837
876
|
const now = parseDate(input.now_iso) ?? new Date();
|
|
838
877
|
const table = readJobLockTable();
|
|
839
878
|
const leaseTtl = typeof input.lease_ttl_seconds === "number" && input.lease_ttl_seconds > 0
|
|
840
879
|
? Math.floor(input.lease_ttl_seconds)
|
|
841
880
|
: defaultLeaseTtlSeconds();
|
|
842
|
-
const leaseResult = acquireLease(owner, leaseTtl, now);
|
|
881
|
+
const leaseResult = acquireLease(readSchedulerLease(), owner, leaseTtl, now);
|
|
843
882
|
if (!leaseResult.acquired) {
|
|
844
883
|
const queue = readJobQueue();
|
|
845
884
|
return {
|
|
@@ -913,9 +952,11 @@ export async function dispatchJobs(input = {}) {
|
|
|
913
952
|
heartbeat_at: nowIso(now),
|
|
914
953
|
expires_at: nowIso(new Date(now.getTime() + leaseTtl * 1000)),
|
|
915
954
|
};
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
955
|
+
await persistSchedulerState({
|
|
956
|
+
queue,
|
|
957
|
+
locks: table,
|
|
958
|
+
lease: refreshedLease,
|
|
959
|
+
});
|
|
919
960
|
const summary = {
|
|
920
961
|
total_jobs: queue.jobs.length,
|
|
921
962
|
blocked_jobs: queue.jobs.filter((job) => job.status === "blocked").length,
|
|
@@ -954,8 +995,8 @@ export async function dispatchJobs(input = {}) {
|
|
|
954
995
|
owner,
|
|
955
996
|
lease_acquired: true,
|
|
956
997
|
lease: refreshedLease,
|
|
957
|
-
queue_path:
|
|
958
|
-
lock_path:
|
|
998
|
+
queue_path: getJobQueuePath(),
|
|
999
|
+
lock_path: getJobLockTablePath(),
|
|
959
1000
|
queue,
|
|
960
1001
|
locks: table,
|
|
961
1002
|
summary,
|
|
@@ -966,7 +1007,7 @@ export async function dispatchJobs(input = {}) {
|
|
|
966
1007
|
}
|
|
967
1008
|
export async function dispatchJobNow(input) {
|
|
968
1009
|
const buf = newEventBuffer();
|
|
969
|
-
const result = await withFileLock(SCHEDULER_LOCK_REL, () => {
|
|
1010
|
+
const result = await withFileLock(SCHEDULER_LOCK_REL, async () => {
|
|
970
1011
|
const owner = deriveOwner(input.owner);
|
|
971
1012
|
const now = parseDate(input.now_iso) ?? new Date();
|
|
972
1013
|
const leaseResult = ensureLeaseForOwner(owner, now);
|
|
@@ -1022,12 +1063,15 @@ export async function dispatchJobNow(input) {
|
|
|
1022
1063
|
job.started_at ??
|
|
1023
1064
|
nowIso(now);
|
|
1024
1065
|
heartbeatRunningLocks(table, leaseResult.lease.lease_id, now);
|
|
1025
|
-
|
|
1026
|
-
|
|
1066
|
+
await persistSchedulerState({
|
|
1067
|
+
queue,
|
|
1068
|
+
locks: table,
|
|
1069
|
+
lease: leaseResult.lease,
|
|
1070
|
+
});
|
|
1027
1071
|
return {
|
|
1028
1072
|
ok: true,
|
|
1029
|
-
queue_path:
|
|
1030
|
-
lock_path:
|
|
1073
|
+
queue_path: getJobQueuePath(),
|
|
1074
|
+
lock_path: getJobLockTablePath(),
|
|
1031
1075
|
queue,
|
|
1032
1076
|
locks: table,
|
|
1033
1077
|
started_lock: {
|
|
@@ -1069,12 +1113,15 @@ export async function dispatchJobNow(input) {
|
|
|
1069
1113
|
occupancy.add(resource);
|
|
1070
1114
|
}
|
|
1071
1115
|
const startedLock = startJobNow(job, table, leaseResult.lease, now, buf, "dispatch_job_now");
|
|
1072
|
-
|
|
1073
|
-
|
|
1116
|
+
await persistSchedulerState({
|
|
1117
|
+
queue,
|
|
1118
|
+
locks: table,
|
|
1119
|
+
lease: leaseResult.lease,
|
|
1120
|
+
});
|
|
1074
1121
|
return {
|
|
1075
1122
|
ok: true,
|
|
1076
|
-
queue_path:
|
|
1077
|
-
lock_path:
|
|
1123
|
+
queue_path: getJobQueuePath(),
|
|
1124
|
+
lock_path: getJobLockTablePath(),
|
|
1078
1125
|
queue,
|
|
1079
1126
|
locks: table,
|
|
1080
1127
|
started_lock: startedLock,
|
|
@@ -1085,7 +1132,7 @@ export async function dispatchJobNow(input) {
|
|
|
1085
1132
|
}
|
|
1086
1133
|
export async function completeJob(input) {
|
|
1087
1134
|
const buf = newEventBuffer();
|
|
1088
|
-
const result = await withFileLock(SCHEDULER_LOCK_REL, () => {
|
|
1135
|
+
const result = await withFileLock(SCHEDULER_LOCK_REL, async () => {
|
|
1089
1136
|
const owner = deriveOwner(input.owner);
|
|
1090
1137
|
const now = parseDate(input.now_iso) ?? new Date();
|
|
1091
1138
|
const leaseResult = ensureLeaseForOwner(owner, now);
|
|
@@ -1170,8 +1217,11 @@ export async function completeJob(input) {
|
|
|
1170
1217
|
job.started_at = undefined;
|
|
1171
1218
|
}
|
|
1172
1219
|
}
|
|
1173
|
-
|
|
1174
|
-
|
|
1220
|
+
await persistSchedulerState({
|
|
1221
|
+
queue,
|
|
1222
|
+
locks: table,
|
|
1223
|
+
lease: leaseResult.lease,
|
|
1224
|
+
});
|
|
1175
1225
|
const finalStatus = job.status;
|
|
1176
1226
|
const isSuccess = finalStatus === "done";
|
|
1177
1227
|
buf.status.push({
|
|
@@ -1203,8 +1253,8 @@ export async function completeJob(input) {
|
|
|
1203
1253
|
});
|
|
1204
1254
|
return {
|
|
1205
1255
|
ok: true,
|
|
1206
|
-
queue_path:
|
|
1207
|
-
lock_path:
|
|
1256
|
+
queue_path: getJobQueuePath(),
|
|
1257
|
+
lock_path: getJobLockTablePath(),
|
|
1208
1258
|
queue,
|
|
1209
1259
|
locks: table,
|
|
1210
1260
|
status: job.status,
|
package/dist/run-ledger.js
CHANGED
|
@@ -2,7 +2,7 @@ import { existsSync } from "node:fs";
|
|
|
2
2
|
import { resolveWorkspaceRoot, safeRead, safeWrite, wsPath, withFileLock, } from "./helpers.js";
|
|
3
3
|
import { isReadError } from "./shared.js";
|
|
4
4
|
import { openStore } from "./store/ace-packed-store.js";
|
|
5
|
-
import {
|
|
5
|
+
import { ProjectionManager } from "./store/materializers/projection-manager.js";
|
|
6
6
|
import { LedgerRepository } from "./store/repositories/ledger-repository.js";
|
|
7
7
|
import { withStoreWriteQueue } from "./store/write-queue.js";
|
|
8
8
|
import { getWorkspaceStorePath, listStoreKeysSync, readStoreJsonSync, storeExistsSync, } from "./store/store-snapshot.js";
|
|
@@ -234,9 +234,8 @@ export async function appendRunLedgerEntrySafe(input) {
|
|
|
234
234
|
},
|
|
235
235
|
});
|
|
236
236
|
await store.commit();
|
|
237
|
-
const
|
|
238
|
-
await
|
|
239
|
-
await store.commit();
|
|
237
|
+
const projections = new ProjectionManager(store, root);
|
|
238
|
+
await projections.projectAfterCommit(["ledger"]);
|
|
240
239
|
return {
|
|
241
240
|
entry: toLegacyRunLedgerEntryDirect(record),
|
|
242
241
|
path: operationalArtifactVirtualPath(root, RUN_LEDGER_REL),
|
package/dist/server.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
2
|
export declare const ACE_MCP_SERVER_NAME = "ace-swarm";
|
|
3
|
-
export declare const ACE_MCP_SERVER_VERSION = "
|
|
3
|
+
export declare const ACE_MCP_SERVER_VERSION = "0.2.1";
|
|
4
4
|
export interface CreateAceServerOptions {
|
|
5
5
|
toolGovernance?: boolean;
|
|
6
6
|
instructions?: string;
|
package/dist/server.js
CHANGED
|
@@ -7,7 +7,7 @@ import { registerResources } from "./resources.js";
|
|
|
7
7
|
import { registerTools } from "./tools.js";
|
|
8
8
|
import { backfillHandoffsIntoScheduler } from "./tools-handoff.js";
|
|
9
9
|
export const ACE_MCP_SERVER_NAME = "ace-swarm";
|
|
10
|
-
export const ACE_MCP_SERVER_VERSION = "
|
|
10
|
+
export const ACE_MCP_SERVER_VERSION = "0.2.1";
|
|
11
11
|
export function createAceServer(options = {}) {
|
|
12
12
|
const workspaceRoot = resolveWorkspaceRoot();
|
|
13
13
|
const server = new McpServer({
|
package/dist/shared.d.ts
CHANGED
|
@@ -45,9 +45,9 @@ export declare const ROLE_ENUM: z.ZodEnum<{
|
|
|
45
45
|
release: "release";
|
|
46
46
|
}>;
|
|
47
47
|
export declare const HANDOFF_VALIDATION_MODE: z.ZodOptional<z.ZodEnum<{
|
|
48
|
+
"agent-state": "agent-state";
|
|
48
49
|
auto: "auto";
|
|
49
50
|
swarm: "swarm";
|
|
50
|
-
"agent-state": "agent-state";
|
|
51
51
|
}>>;
|
|
52
52
|
export declare const KERNEL_KEY_ENUM: z.ZodEnum<{
|
|
53
53
|
directive_kernel: "directive_kernel";
|
package/dist/status-events.js
CHANGED
|
@@ -4,7 +4,7 @@ import { resolveWorkspaceRoot, resolveWorkspaceArtifactPath as resolveWorkspaceA
|
|
|
4
4
|
import { isReadError } from "./shared.js";
|
|
5
5
|
import { validateStatusEventPayload, PROVENANCE_CRITICAL_EVENT_TYPES } from "./schemas.js";
|
|
6
6
|
import { openStore } from "./store/ace-packed-store.js";
|
|
7
|
-
import {
|
|
7
|
+
import { ProjectionManager } from "./store/materializers/projection-manager.js";
|
|
8
8
|
import { TrackerRepository } from "./store/repositories/tracker-repository.js";
|
|
9
9
|
import { getWorkspaceStorePath, listStoreKeysSync, readStoreJsonSync, storeExistsSync, } from "./store/store-snapshot.js";
|
|
10
10
|
import { operationalArtifactVirtualPath } from "./store/store-artifacts.js";
|
|
@@ -90,8 +90,7 @@ function rotateIfNeeded(content) {
|
|
|
90
90
|
safeWriteWorkspaceFile(STATUS_EVENTS_ARCHIVE_REL, archiveFull);
|
|
91
91
|
return lines.join("\n") + "\n";
|
|
92
92
|
}
|
|
93
|
-
async function mirrorStatusEventToStore(event) {
|
|
94
|
-
const root = workspaceRoot();
|
|
93
|
+
async function mirrorStatusEventToStore(root, event) {
|
|
95
94
|
const storePath = getWorkspaceStorePath(root);
|
|
96
95
|
if (!existsSync(storePath))
|
|
97
96
|
return;
|
|
@@ -115,9 +114,8 @@ async function mirrorStatusEventToStore(event) {
|
|
|
115
114
|
},
|
|
116
115
|
});
|
|
117
116
|
await store.commit();
|
|
118
|
-
const
|
|
119
|
-
await
|
|
120
|
-
await store.commit();
|
|
117
|
+
const projections = new ProjectionManager(store, root);
|
|
118
|
+
await projections.projectAfterCommit(["status_events"]);
|
|
121
119
|
}
|
|
122
120
|
finally {
|
|
123
121
|
await store.close();
|
|
@@ -125,17 +123,17 @@ async function mirrorStatusEventToStore(event) {
|
|
|
125
123
|
});
|
|
126
124
|
}
|
|
127
125
|
function scheduleStatusEventMirror(event) {
|
|
126
|
+
const root = workspaceRoot();
|
|
128
127
|
const snapshot = JSON.parse(JSON.stringify(event));
|
|
129
128
|
trackerMirrorChain = trackerMirrorChain
|
|
130
129
|
.catch(() => { })
|
|
131
|
-
.then(() => mirrorStatusEventToStore(snapshot))
|
|
130
|
+
.then(() => mirrorStatusEventToStore(root, snapshot))
|
|
132
131
|
.catch(() => { });
|
|
133
132
|
}
|
|
134
133
|
export async function waitForPendingStatusEventMirrors() {
|
|
135
134
|
await trackerMirrorChain.catch(() => { });
|
|
136
135
|
}
|
|
137
|
-
async function appendStatusEventStoreBacked(event) {
|
|
138
|
-
const root = workspaceRoot();
|
|
136
|
+
async function appendStatusEventStoreBacked(root, event) {
|
|
139
137
|
const storePath = getWorkspaceStorePath(root);
|
|
140
138
|
return withStoreWriteQueue(storePath, async () => {
|
|
141
139
|
const store = await openStore(storePath);
|
|
@@ -157,9 +155,8 @@ async function appendStatusEventStoreBacked(event) {
|
|
|
157
155
|
},
|
|
158
156
|
});
|
|
159
157
|
await store.commit();
|
|
160
|
-
const
|
|
161
|
-
await
|
|
162
|
-
await store.commit();
|
|
158
|
+
const projections = new ProjectionManager(store, root);
|
|
159
|
+
await projections.projectAfterCommit(["status_events"]);
|
|
163
160
|
return {
|
|
164
161
|
path: operationalArtifactVirtualPath(root, STATUS_EVENTS_REL_PATH),
|
|
165
162
|
event,
|
|
@@ -187,11 +184,12 @@ export function appendStatusEvent(input) {
|
|
|
187
184
|
* Use this from async tool handlers where parallel subagents may fire.
|
|
188
185
|
*/
|
|
189
186
|
export async function appendStatusEventSafe(input) {
|
|
187
|
+
const root = workspaceRoot();
|
|
190
188
|
return withDynamicFileLock(STATUS_EVENTS_REL_PATH, async () => {
|
|
191
189
|
const event = buildStatusEvent(input);
|
|
192
190
|
validateStatusEvent(event);
|
|
193
|
-
if (storeExistsSync(
|
|
194
|
-
return appendStatusEventStoreBacked(event);
|
|
191
|
+
if (storeExistsSync(root)) {
|
|
192
|
+
return appendStatusEventStoreBacked(root, event);
|
|
195
193
|
}
|
|
196
194
|
return appendStatusEvent(input);
|
|
197
195
|
});
|
|
@@ -1,42 +1,81 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* AcePackedStore
|
|
2
|
+
* AcePackedStore — ACEPACK v2
|
|
3
3
|
*
|
|
4
|
-
*
|
|
5
|
-
*
|
|
4
|
+
* Single-file binary store. One file, no subdirectories, no companion files.
|
|
5
|
+
* Events grow in the columnar section indefinitely (Option A). compact() only
|
|
6
|
+
* reclaims dead space in the KV chunk region — it does NOT reset or archive
|
|
7
|
+
* events. Historical event replay across all past runs is always available.
|
|
6
8
|
*
|
|
7
|
-
* File
|
|
8
|
-
* [ Header 64 bytes ] magic(8) version(4) flags(4) index_offset(8) index_length(8) reserved(32)
|
|
9
|
-
* [ Chunk region ] chunk_0: [uint32 length][bytes] ... chunk_N: [uint32 length][bytes]
|
|
10
|
-
* [ Index region ] UTF-8 JSON: { key → { offset, length } }
|
|
9
|
+
* File layout:
|
|
11
10
|
*
|
|
12
|
-
*
|
|
13
|
-
*
|
|
14
|
-
*
|
|
15
|
-
*
|
|
11
|
+
* ┌─────────────────────────────────────────────────────────┐
|
|
12
|
+
* │ Header (128 bytes, big-endian) │
|
|
13
|
+
* │ magic "ACEPACK\0" · version uint32 · flags uint32 │
|
|
14
|
+
* │ kv_index_offset uint64 · kv_index_length uint64 │
|
|
15
|
+
* │ kv_chunk_end uint64 │
|
|
16
|
+
* │ evt_offset uint64 · evt_length uint64 │
|
|
17
|
+
* │ evt_count uint32 · evt_base_id uint32 │
|
|
18
|
+
* │ reserved (zeros) │
|
|
19
|
+
* ├─────────────────────────────────────────────────────────┤
|
|
20
|
+
* │ KV Chunk Region (append-only, random-access) │
|
|
21
|
+
* │ knowledge/agents/{name}/{file} → instruction text │
|
|
22
|
+
* │ knowledge/skills/{name}/{file} → skill content │
|
|
23
|
+
* │ topology/{kind} → JSON array │
|
|
24
|
+
* │ state/{...} → runtime state blobs │
|
|
25
|
+
* │ meta/{...} → schema version etc. │
|
|
26
|
+
* │ Each chunk: [uint32 length BE][bytes] │
|
|
27
|
+
* ├─────────────────────────────────────────────────────────┤
|
|
28
|
+
* │ Columnar Event Section (rewritten on every commit) │
|
|
29
|
+
* │ uint32 count │
|
|
30
|
+
* │ int64[N] timestamps (epoch ms, big-endian) │
|
|
31
|
+
* │ uint8[N] kinds (EntityKind enum) │
|
|
32
|
+
* │ uint8[N] sources (ContentSource enum) │
|
|
33
|
+
* │ uint8[N] flags (0x01=deleted) │
|
|
34
|
+
* │ [pad to 4-byte alignment] │
|
|
35
|
+
* │ uint32[N] pay_offsets (relative to pool start) │
|
|
36
|
+
* │ uint32[N] pay_lengths │
|
|
37
|
+
* │ uint32 pool_length │
|
|
38
|
+
* │ bytes[M] payload pool (UTF-8 JSON: {key,payload,...})│
|
|
39
|
+
* ├─────────────────────────────────────────────────────────┤
|
|
40
|
+
* │ KV Index (JSON, rewritten on commit) │
|
|
41
|
+
* │ { "knowledge/agents/ace-ops/AGENT.md": {offset, length}, ... }
|
|
42
|
+
* └─────────────────────────────────────────────────────────┘
|
|
16
43
|
*
|
|
17
|
-
*
|
|
18
|
-
*
|
|
44
|
+
* Event overhead: ~19 bytes fixed + payload (vs ~150 bytes full JSON).
|
|
45
|
+
* Timestamp/kind scanning reads only the fixed-width columns, never the pool.
|
|
19
46
|
*
|
|
20
|
-
*
|
|
21
|
-
*
|
|
22
|
-
*
|
|
47
|
+
* compact() removes dead KV space (overwritten keys) via atomic tmp-rename.
|
|
48
|
+
* Events are preserved across compact() — they accumulate for the workspace
|
|
49
|
+
* lifetime. See HOT_COLD_EVENT_TIERING.md for a future branch that adds
|
|
50
|
+
* BGZF batch archiving for very long-lived workspaces.
|
|
51
|
+
*
|
|
52
|
+
* Backward compat: v1 files (64-byte header) are migrated on first commit().
|
|
53
|
+
* v1 events live in core/log/ KV blobs and are pulled into the columnar section.
|
|
23
54
|
*/
|
|
24
55
|
import { IAcePackedStore, UnifiedEntry, EntryFilter, TopologyEntry, ContentSourceCode } from "./types.js";
|
|
25
56
|
export declare class AcePackedStore implements IAcePackedStore {
|
|
26
57
|
private storePath;
|
|
27
|
-
private lockPath;
|
|
28
58
|
private readOnly;
|
|
29
|
-
private index;
|
|
30
|
-
private chunkEnd;
|
|
31
59
|
private fh;
|
|
32
|
-
private
|
|
33
|
-
private
|
|
60
|
+
private kvIndex;
|
|
61
|
+
private kvChunkEnd;
|
|
62
|
+
private committed;
|
|
63
|
+
private pending;
|
|
64
|
+
private evtBaseId;
|
|
34
65
|
open(path: string, opts?: {
|
|
35
66
|
readOnly?: boolean;
|
|
36
67
|
}): Promise<void>;
|
|
37
|
-
private
|
|
38
|
-
private
|
|
68
|
+
private _initNew;
|
|
69
|
+
private _loadExisting;
|
|
70
|
+
/** Read a KV blob directly from a loaded file buffer (used during migration). */
|
|
71
|
+
private _readKvBlobDirect;
|
|
39
72
|
commit(): Promise<void>;
|
|
73
|
+
/**
|
|
74
|
+
* Compacts the KV chunk region — removes dead space left by overwritten keys.
|
|
75
|
+
* Events are NOT reset or archived; they accumulate in the columnar section
|
|
76
|
+
* for the workspace lifetime (Option A — grow forever).
|
|
77
|
+
* See HOT_COLD_EVENT_TIERING.md for the future branch that adds BGZF archiving.
|
|
78
|
+
*/
|
|
40
79
|
compact(): Promise<void>;
|
|
41
80
|
close(): Promise<void>;
|
|
42
81
|
get(key: string): Promise<Uint8Array | undefined>;
|
|
@@ -49,7 +88,7 @@ export declare class AcePackedStore implements IAcePackedStore {
|
|
|
49
88
|
setBlob(key: string, content: string): Promise<void>;
|
|
50
89
|
appendEntry(entry: Omit<UnifiedEntry, "id" | "ts">): Promise<UnifiedEntry>;
|
|
51
90
|
getEntries(filter?: EntryFilter): Promise<UnifiedEntry[]>;
|
|
52
|
-
getEntry(
|
|
91
|
+
getEntry(_key: string): Promise<UnifiedEntry | undefined>;
|
|
53
92
|
getTopology(kind: string): Promise<TopologyEntry[]>;
|
|
54
93
|
setTopology(kind: string, entries: TopologyEntry[]): Promise<void>;
|
|
55
94
|
getAgentInstruction(agent: string, file: string): Promise<string | undefined>;
|
|
@@ -58,9 +97,9 @@ export declare class AcePackedStore implements IAcePackedStore {
|
|
|
58
97
|
getSkillContent(skill: string, file: string): Promise<string | undefined>;
|
|
59
98
|
setSkillContent(skill: string, file: string, content: string, source?: ContentSourceCode): Promise<void>;
|
|
60
99
|
listSkills(): Promise<string[]>;
|
|
61
|
-
/** Dead space ratio
|
|
100
|
+
/** Dead space ratio in the KV chunk region. Used to decide if compaction is needed. */
|
|
62
101
|
get deadSpaceRatio(): number;
|
|
63
|
-
/**
|
|
102
|
+
/** Total events in log (committed + pending). */
|
|
64
103
|
get entryCount(): number;
|
|
65
104
|
}
|
|
66
105
|
export declare function openStore(storePath: string, opts?: {
|