@virtengine/openfleet 0.25.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +914 -0
- package/LICENSE +190 -0
- package/README.md +500 -0
- package/agent-endpoint.mjs +918 -0
- package/agent-hook-bridge.mjs +230 -0
- package/agent-hooks.mjs +1188 -0
- package/agent-pool.mjs +2403 -0
- package/agent-prompts.mjs +689 -0
- package/agent-sdk.mjs +141 -0
- package/anomaly-detector.mjs +1195 -0
- package/autofix.mjs +1294 -0
- package/claude-shell.mjs +708 -0
- package/cli.mjs +906 -0
- package/codex-config.mjs +1274 -0
- package/codex-model-profiles.mjs +135 -0
- package/codex-shell.mjs +762 -0
- package/config-doctor.mjs +613 -0
- package/config.mjs +1720 -0
- package/conflict-resolver.mjs +248 -0
- package/container-runner.mjs +450 -0
- package/copilot-shell.mjs +827 -0
- package/daemon-restart-policy.mjs +56 -0
- package/diff-stats.mjs +282 -0
- package/error-detector.mjs +829 -0
- package/fetch-runtime.mjs +34 -0
- package/fleet-coordinator.mjs +838 -0
- package/get-telegram-chat-id.mjs +71 -0
- package/git-safety.mjs +170 -0
- package/github-reconciler.mjs +403 -0
- package/hook-profiles.mjs +651 -0
- package/kanban-adapter.mjs +4491 -0
- package/lib/logger.mjs +645 -0
- package/maintenance.mjs +828 -0
- package/merge-strategy.mjs +1171 -0
- package/monitor.mjs +12207 -0
- package/openfleet.config.example.json +115 -0
- package/openfleet.schema.json +465 -0
- package/package.json +203 -0
- package/postinstall.mjs +187 -0
- package/pr-cleanup-daemon.mjs +978 -0
- package/preflight.mjs +408 -0
- package/prepublish-check.mjs +90 -0
- package/presence.mjs +328 -0
- package/primary-agent.mjs +282 -0
- package/publish.mjs +151 -0
- package/repo-root.mjs +29 -0
- package/restart-controller.mjs +100 -0
- package/review-agent.mjs +557 -0
- package/rotate-agent-logs.sh +133 -0
- package/sdk-conflict-resolver.mjs +973 -0
- package/session-tracker.mjs +880 -0
- package/setup.mjs +3937 -0
- package/shared-knowledge.mjs +410 -0
- package/shared-state-manager.mjs +841 -0
- package/shared-workspace-cli.mjs +199 -0
- package/shared-workspace-registry.mjs +537 -0
- package/shared-workspaces.json +18 -0
- package/startup-service.mjs +1070 -0
- package/sync-engine.mjs +1063 -0
- package/task-archiver.mjs +801 -0
- package/task-assessment.mjs +550 -0
- package/task-claims.mjs +924 -0
- package/task-complexity.mjs +581 -0
- package/task-executor.mjs +5111 -0
- package/task-store.mjs +753 -0
- package/telegram-bot.mjs +9281 -0
- package/telegram-sentinel.mjs +2010 -0
- package/ui/app.js +867 -0
- package/ui/app.legacy.js +1464 -0
- package/ui/app.monolith.js +2488 -0
- package/ui/components/charts.js +226 -0
- package/ui/components/chat-view.js +567 -0
- package/ui/components/command-palette.js +587 -0
- package/ui/components/diff-viewer.js +190 -0
- package/ui/components/forms.js +327 -0
- package/ui/components/kanban-board.js +451 -0
- package/ui/components/session-list.js +305 -0
- package/ui/components/shared.js +473 -0
- package/ui/index.html +70 -0
- package/ui/modules/api.js +297 -0
- package/ui/modules/icons.js +461 -0
- package/ui/modules/router.js +81 -0
- package/ui/modules/settings-schema.js +261 -0
- package/ui/modules/state.js +679 -0
- package/ui/modules/telegram.js +331 -0
- package/ui/modules/utils.js +270 -0
- package/ui/styles/animations.css +140 -0
- package/ui/styles/base.css +98 -0
- package/ui/styles/components.css +1915 -0
- package/ui/styles/kanban.css +286 -0
- package/ui/styles/layout.css +809 -0
- package/ui/styles/sessions.css +827 -0
- package/ui/styles/variables.css +188 -0
- package/ui/styles.css +141 -0
- package/ui/styles.monolith.css +1046 -0
- package/ui/tabs/agents.js +1417 -0
- package/ui/tabs/chat.js +74 -0
- package/ui/tabs/control.js +887 -0
- package/ui/tabs/dashboard.js +515 -0
- package/ui/tabs/infra.js +537 -0
- package/ui/tabs/logs.js +783 -0
- package/ui/tabs/settings.js +1487 -0
- package/ui/tabs/tasks.js +1385 -0
- package/ui-server.mjs +4073 -0
- package/update-check.mjs +465 -0
- package/utils.mjs +172 -0
- package/ve-kanban.mjs +654 -0
- package/ve-kanban.ps1 +1365 -0
- package/ve-kanban.sh +18 -0
- package/ve-orchestrator.mjs +340 -0
- package/ve-orchestrator.ps1 +6546 -0
- package/ve-orchestrator.sh +18 -0
- package/vibe-kanban-wrapper.mjs +41 -0
- package/vk-error-resolver.mjs +470 -0
- package/vk-log-stream.mjs +914 -0
- package/whatsapp-channel.mjs +520 -0
- package/workspace-monitor.mjs +581 -0
- package/workspace-reaper.mjs +405 -0
- package/workspace-registry.mjs +238 -0
- package/worktree-manager.mjs +1266 -0
|
@@ -0,0 +1,838 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* fleet-coordinator.mjs — Multi-workstation coordination for openfleet.
|
|
3
|
+
*
|
|
4
|
+
* Provides:
|
|
5
|
+
* - Repo fingerprinting: detect identical repos across workstations
|
|
6
|
+
* - Fleet discovery: enumerate active instances working on the same repo
|
|
7
|
+
* - Coordinator election integration: single leader dispatches tasks
|
|
8
|
+
* - Task slot aggregation: total parallel capacity across fleet
|
|
9
|
+
* - Conflict-aware task dispatch: order + assign tasks to minimize conflicts
|
|
10
|
+
* - Maintenance mode: when backlog is empty, fleet enters maintenance
|
|
11
|
+
*
|
|
12
|
+
* The coordinator (elected via presence.mjs) is the only instance that:
|
|
13
|
+
* 1. Triggers the task planner to generate new backlog items
|
|
14
|
+
* 2. Assigns execution order and workstation routing hints
|
|
15
|
+
* 3. Broadcasts fleet status updates
|
|
16
|
+
*
|
|
17
|
+
* Non-coordinator instances:
|
|
18
|
+
* - Report their presence and capacity
|
|
19
|
+
* - Pull tasks from VK backlog in the assigned order
|
|
20
|
+
* - Contribute shared knowledge entries
|
|
21
|
+
*/
|
|
22
|
+
|
|
23
|
+
import crypto from "node:crypto";
|
|
24
|
+
import { execSync } from "node:child_process";
|
|
25
|
+
import { readFile, writeFile, mkdir } from "node:fs/promises";
|
|
26
|
+
import { existsSync, readFileSync } from "node:fs";
|
|
27
|
+
import { resolve, dirname } from "node:path";
|
|
28
|
+
import { fileURLToPath } from "node:url";
|
|
29
|
+
import {
|
|
30
|
+
initPresence,
|
|
31
|
+
buildLocalPresence,
|
|
32
|
+
listActiveInstances,
|
|
33
|
+
selectCoordinator,
|
|
34
|
+
getPresenceState,
|
|
35
|
+
} from "./presence.mjs";
|
|
36
|
+
|
|
37
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
38
|
+
|
|
39
|
+
// ── Repo Fingerprinting ──────────────────────────────────────────────────────
|
|
40
|
+
|
|
41
|
+
function buildGitEnv() {
|
|
42
|
+
const env = { ...process.env };
|
|
43
|
+
delete env.GIT_DIR;
|
|
44
|
+
delete env.GIT_WORK_TREE;
|
|
45
|
+
delete env.GIT_INDEX_FILE;
|
|
46
|
+
return env;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Generate a stable fingerprint for a git repository.
|
|
51
|
+
* Two workstations with the same repo will produce the same fingerprint.
|
|
52
|
+
*
|
|
53
|
+
* Components (in order of reliability):
|
|
54
|
+
* 1. Remote origin URL (normalized — strips .git suffix, protocol variance)
|
|
55
|
+
* 2. Fallback: first commit hash (immutable root of the repo)
|
|
56
|
+
*/
|
|
57
|
+
export function computeRepoFingerprint(repoRoot) {
|
|
58
|
+
if (!repoRoot) return null;
|
|
59
|
+
|
|
60
|
+
// Ensure repoRoot is actually inside a git worktree.
|
|
61
|
+
try {
|
|
62
|
+
const isWorktree = execSync("git rev-parse --is-inside-work-tree", {
|
|
63
|
+
cwd: repoRoot,
|
|
64
|
+
encoding: "utf8",
|
|
65
|
+
stdio: ["ignore", "pipe", "ignore"],
|
|
66
|
+
}).trim();
|
|
67
|
+
if (isWorktree !== "true") {
|
|
68
|
+
return null;
|
|
69
|
+
}
|
|
70
|
+
} catch {
|
|
71
|
+
return null;
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Try remote origin URL first (most reliable for same-repo detection)
|
|
75
|
+
let remoteUrl = null;
|
|
76
|
+
try {
|
|
77
|
+
remoteUrl = execSync("git config --get remote.origin.url", {
|
|
78
|
+
cwd: repoRoot,
|
|
79
|
+
encoding: "utf8",
|
|
80
|
+
env: buildGitEnv(),
|
|
81
|
+
stdio: ["ignore", "pipe", "ignore"],
|
|
82
|
+
}).trim();
|
|
83
|
+
} catch {
|
|
84
|
+
// no remote configured
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
if (remoteUrl) {
|
|
88
|
+
const normalized = normalizeGitUrl(remoteUrl);
|
|
89
|
+
return {
|
|
90
|
+
method: "remote-origin",
|
|
91
|
+
raw: remoteUrl,
|
|
92
|
+
normalized,
|
|
93
|
+
hash: hashString(normalized),
|
|
94
|
+
};
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// Fallback: first commit hash (root of the DAG)
|
|
98
|
+
try {
|
|
99
|
+
const rootCommit = execSync("git rev-list --max-parents=0 HEAD", {
|
|
100
|
+
cwd: repoRoot,
|
|
101
|
+
encoding: "utf8",
|
|
102
|
+
env: buildGitEnv(),
|
|
103
|
+
stdio: ["ignore", "pipe", "ignore"],
|
|
104
|
+
}).trim().split("\n")[0];
|
|
105
|
+
|
|
106
|
+
if (rootCommit) {
|
|
107
|
+
return {
|
|
108
|
+
method: "root-commit",
|
|
109
|
+
raw: rootCommit,
|
|
110
|
+
normalized: rootCommit,
|
|
111
|
+
hash: hashString(rootCommit),
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
} catch {
|
|
115
|
+
// not a git repo or no commits
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
return null;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* Normalize a git URL to strip protocol/auth/suffix variance.
|
|
123
|
+
* Examples:
|
|
124
|
+
* https://github.com/acme/widgets.git → github.com/acme/widgets
|
|
125
|
+
* git@github.com:acme/widgets.git → github.com/acme/widgets
|
|
126
|
+
* ssh://git@github.com/acme/widgets → github.com/acme/widgets
|
|
127
|
+
*/
|
|
128
|
+
export function normalizeGitUrl(url) {
|
|
129
|
+
if (!url) return "";
|
|
130
|
+
let s = String(url).trim();
|
|
131
|
+
|
|
132
|
+
// Strip protocol
|
|
133
|
+
s = s.replace(/^(?:https?|ssh|git):\/\//, "");
|
|
134
|
+
|
|
135
|
+
// Strip user@ prefix (git@github.com: or user@host/)
|
|
136
|
+
s = s.replace(/^[^@]+@/, "");
|
|
137
|
+
|
|
138
|
+
// Normalize SSH colon syntax (github.com:org/repo → github.com/org/repo)
|
|
139
|
+
s = s.replace(/^([^/:]+):/, "$1/");
|
|
140
|
+
|
|
141
|
+
// Strip .git suffix
|
|
142
|
+
s = s.replace(/\.git$/, "");
|
|
143
|
+
|
|
144
|
+
// Strip trailing slashes
|
|
145
|
+
s = s.replace(/\/+$/, "");
|
|
146
|
+
|
|
147
|
+
return s.toLowerCase();
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
function hashString(s) {
|
|
151
|
+
return crypto.createHash("sha256").update(s).digest("hex").slice(0, 16);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// ── Fleet State ──────────────────────────────────────────────────────────────
|
|
155
|
+
|
|
156
|
+
const FLEET_STATE_FILENAME = "fleet-state.json";
|
|
157
|
+
|
|
158
|
+
const fleetState = {
|
|
159
|
+
initialized: false,
|
|
160
|
+
repoFingerprint: null,
|
|
161
|
+
isCoordinator: false,
|
|
162
|
+
fleetSize: 0,
|
|
163
|
+
totalSlots: 0,
|
|
164
|
+
localSlots: 0,
|
|
165
|
+
mode: "solo", // solo | fleet | maintenance
|
|
166
|
+
peers: [], // instances with same repo fingerprint
|
|
167
|
+
dispatchOrder: [], // task IDs in conflict-minimized order
|
|
168
|
+
lastSyncAt: null,
|
|
169
|
+
};
|
|
170
|
+
|
|
171
|
+
/**
|
|
172
|
+
* Initialize fleet coordination.
|
|
173
|
+
* Must be called after presence.mjs is initialized.
|
|
174
|
+
*
|
|
175
|
+
* @param {object} opts
|
|
176
|
+
* @param {string} opts.repoRoot - Git repository root
|
|
177
|
+
* @param {number} opts.localSlots - Max parallel agents on this workstation
|
|
178
|
+
* @param {number} [opts.ttlMs] - Presence TTL (default: 5 min)
|
|
179
|
+
* @param {object} [opts.localWorkspace] - Workspace identity from registry
|
|
180
|
+
*/
|
|
181
|
+
export async function initFleet(opts = {}) {
|
|
182
|
+
const { repoRoot, localSlots = 6, ttlMs = 5 * 60 * 1000, localWorkspace } = opts;
|
|
183
|
+
|
|
184
|
+
if (!repoRoot) {
|
|
185
|
+
console.warn("[fleet] No repo root provided — running in solo mode");
|
|
186
|
+
fleetState.mode = "solo";
|
|
187
|
+
fleetState.localSlots = localSlots;
|
|
188
|
+
fleetState.totalSlots = localSlots;
|
|
189
|
+
fleetState.initialized = true;
|
|
190
|
+
return fleetState;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// Compute repo fingerprint
|
|
194
|
+
fleetState.repoFingerprint = computeRepoFingerprint(repoRoot);
|
|
195
|
+
fleetState.localSlots = localSlots;
|
|
196
|
+
|
|
197
|
+
// Ensure presence is initialized
|
|
198
|
+
await initPresence({ repoRoot, localWorkspace });
|
|
199
|
+
|
|
200
|
+
// Discover fleet
|
|
201
|
+
await refreshFleet({ ttlMs });
|
|
202
|
+
|
|
203
|
+
fleetState.initialized = true;
|
|
204
|
+
console.log(
|
|
205
|
+
`[fleet] initialized: mode=${fleetState.mode}, peers=${fleetState.fleetSize}, ` +
|
|
206
|
+
`totalSlots=${fleetState.totalSlots}, fingerprint=${fleetState.repoFingerprint?.hash || "none"}`,
|
|
207
|
+
);
|
|
208
|
+
|
|
209
|
+
return fleetState;
|
|
210
|
+
}
|
|
211
|
+
|
|
212
|
+
/**
|
|
213
|
+
* Refresh fleet state from presence data.
|
|
214
|
+
* Called periodically by the maintenance loop.
|
|
215
|
+
*/
|
|
216
|
+
export async function refreshFleet({ ttlMs = 5 * 60 * 1000 } = {}) {
|
|
217
|
+
const nowMs = Date.now();
|
|
218
|
+
const allInstances = listActiveInstances({ nowMs, ttlMs });
|
|
219
|
+
const localFingerprint = fleetState.repoFingerprint?.hash;
|
|
220
|
+
|
|
221
|
+
if (!localFingerprint || allInstances.length <= 1) {
|
|
222
|
+
// Solo mode — only us
|
|
223
|
+
fleetState.mode = "solo";
|
|
224
|
+
fleetState.fleetSize = 1;
|
|
225
|
+
fleetState.totalSlots = fleetState.localSlots;
|
|
226
|
+
fleetState.peers = [];
|
|
227
|
+
fleetState.isCoordinator = true; // solo = always coordinator
|
|
228
|
+
fleetState.lastSyncAt = new Date().toISOString();
|
|
229
|
+
return fleetState;
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
// Filter to peers with matching repo fingerprint
|
|
233
|
+
const peers = allInstances.filter((inst) => {
|
|
234
|
+
const peerFingerprint = inst.repo_fingerprint;
|
|
235
|
+
return peerFingerprint && peerFingerprint === localFingerprint;
|
|
236
|
+
});
|
|
237
|
+
|
|
238
|
+
fleetState.peers = peers;
|
|
239
|
+
fleetState.fleetSize = Math.max(1, peers.length);
|
|
240
|
+
|
|
241
|
+
// Aggregate capacity
|
|
242
|
+
let totalSlots = 0;
|
|
243
|
+
for (const peer of peers) {
|
|
244
|
+
totalSlots += typeof peer.max_parallel === "number" ? peer.max_parallel : 6;
|
|
245
|
+
}
|
|
246
|
+
// Ensure we count ourselves even if not yet in the presence list
|
|
247
|
+
if (!peers.some((p) => p.instance_id === getPresenceState().instance_id)) {
|
|
248
|
+
totalSlots += fleetState.localSlots;
|
|
249
|
+
fleetState.fleetSize += 1;
|
|
250
|
+
}
|
|
251
|
+
fleetState.totalSlots = totalSlots || fleetState.localSlots;
|
|
252
|
+
|
|
253
|
+
// Determine if we're the coordinator
|
|
254
|
+
const coordinator = selectCoordinator({ nowMs, ttlMs });
|
|
255
|
+
const myId = getPresenceState().instance_id;
|
|
256
|
+
fleetState.isCoordinator = coordinator?.instance_id === myId;
|
|
257
|
+
|
|
258
|
+
// Fleet vs solo
|
|
259
|
+
fleetState.mode = fleetState.fleetSize > 1 ? "fleet" : "solo";
|
|
260
|
+
fleetState.lastSyncAt = new Date().toISOString();
|
|
261
|
+
|
|
262
|
+
return fleetState;
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// ── Fleet-Aware Presence Payload ─────────────────────────────────────────────
|
|
266
|
+
|
|
267
|
+
/**
|
|
268
|
+
* Build a presence payload enriched with fleet coordination data.
|
|
269
|
+
* This is broadcast to other instances so they can match repos.
|
|
270
|
+
*/
|
|
271
|
+
export function buildFleetPresence(extra = {}) {
|
|
272
|
+
const base = buildLocalPresence(extra);
|
|
273
|
+
return {
|
|
274
|
+
...base,
|
|
275
|
+
repo_fingerprint: fleetState.repoFingerprint?.hash || null,
|
|
276
|
+
max_parallel: fleetState.localSlots,
|
|
277
|
+
fleet_mode: fleetState.mode,
|
|
278
|
+
is_coordinator: fleetState.isCoordinator,
|
|
279
|
+
};
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
// ── Conflict-Aware Task Ordering ────────────────────────────────────────────
|
|
283
|
+
|
|
284
|
+
/**
|
|
285
|
+
* File-path based conflict graph for tasks.
|
|
286
|
+
* Tasks touching overlapping file paths should not run in parallel.
|
|
287
|
+
*
|
|
288
|
+
* @param {Array<{id: string, title: string, scope?: string, filePaths?: string[]}>} tasks
|
|
289
|
+
* @returns {Array<Array<string>>} waves — groups of task IDs safe for parallel execution
|
|
290
|
+
*/
|
|
291
|
+
export function buildExecutionWaves(tasks) {
|
|
292
|
+
if (!tasks || tasks.length === 0) return [];
|
|
293
|
+
|
|
294
|
+
// Build scope-based conflict sets (tasks with same scope conflict)
|
|
295
|
+
const scopeMap = new Map(); // scope → [taskId, ...]
|
|
296
|
+
const fileMap = new Map(); // filePath → [taskId, ...]
|
|
297
|
+
const taskById = new Map();
|
|
298
|
+
|
|
299
|
+
for (const task of tasks) {
|
|
300
|
+
const id = task.id || task.title;
|
|
301
|
+
taskById.set(id, task);
|
|
302
|
+
|
|
303
|
+
// Scope-based conflicts
|
|
304
|
+
const scope = task.scope || extractScopeFromTask(task.title);
|
|
305
|
+
if (scope) {
|
|
306
|
+
if (!scopeMap.has(scope)) scopeMap.set(scope, []);
|
|
307
|
+
scopeMap.get(scope).push(id);
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// File-path based conflicts (when available)
|
|
311
|
+
if (Array.isArray(task.filePaths)) {
|
|
312
|
+
for (const fp of task.filePaths) {
|
|
313
|
+
const normalizedPath = fp.replace(/\\/g, "/").toLowerCase();
|
|
314
|
+
if (!fileMap.has(normalizedPath)) fileMap.set(normalizedPath, []);
|
|
315
|
+
fileMap.get(normalizedPath).push(id);
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
// Build adjacency list (conflict graph)
|
|
321
|
+
const conflicts = new Map(); // taskId → Set<conflicting taskIds>
|
|
322
|
+
for (const [, taskIds] of [...scopeMap, ...fileMap]) {
|
|
323
|
+
if (taskIds.length > 1) {
|
|
324
|
+
for (let i = 0; i < taskIds.length; i++) {
|
|
325
|
+
for (let j = i + 1; j < taskIds.length; j++) {
|
|
326
|
+
if (!conflicts.has(taskIds[i])) conflicts.set(taskIds[i], new Set());
|
|
327
|
+
if (!conflicts.has(taskIds[j])) conflicts.set(taskIds[j], new Set());
|
|
328
|
+
conflicts.get(taskIds[i]).add(taskIds[j]);
|
|
329
|
+
conflicts.get(taskIds[j]).add(taskIds[i]);
|
|
330
|
+
}
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
// Greedy graph coloring (Welsh-Powell) for wave assignment
|
|
336
|
+
const allIds = tasks.map((t) => t.id || t.title);
|
|
337
|
+
const sortedIds = [...allIds].sort((a, b) => {
|
|
338
|
+
const ca = conflicts.get(a)?.size || 0;
|
|
339
|
+
const cb = conflicts.get(b)?.size || 0;
|
|
340
|
+
return cb - ca; // highest degree first
|
|
341
|
+
});
|
|
342
|
+
|
|
343
|
+
const waves = [];
|
|
344
|
+
const assigned = new Set();
|
|
345
|
+
|
|
346
|
+
for (const taskId of sortedIds) {
|
|
347
|
+
if (assigned.has(taskId)) continue;
|
|
348
|
+
|
|
349
|
+
// Find first wave this task can join (no conflicts with existing members)
|
|
350
|
+
let placed = false;
|
|
351
|
+
for (const wave of waves) {
|
|
352
|
+
const hasConflict = wave.some(
|
|
353
|
+
(wId) => conflicts.get(taskId)?.has(wId) || conflicts.get(wId)?.has(taskId),
|
|
354
|
+
);
|
|
355
|
+
if (!hasConflict) {
|
|
356
|
+
wave.push(taskId);
|
|
357
|
+
assigned.add(taskId);
|
|
358
|
+
placed = true;
|
|
359
|
+
break;
|
|
360
|
+
}
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
if (!placed) {
|
|
364
|
+
waves.push([taskId]);
|
|
365
|
+
assigned.add(taskId);
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
return waves;
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
/**
|
|
373
|
+
* Extract scope from a task title (conventional commit format).
|
|
374
|
+
* E.g., "feat(veid): add flow" → "veid"
|
|
375
|
+
*/
|
|
376
|
+
function extractScopeFromTask(title) {
|
|
377
|
+
if (!title) return null;
|
|
378
|
+
const m = title.match(
|
|
379
|
+
/^(?:\[P\d+\]\s*)?(?:feat|fix|docs|style|refactor|perf|test|build|ci|chore|revert)\(([^)]+)\)/i,
|
|
380
|
+
);
|
|
381
|
+
return m ? m[1].toLowerCase() : null;
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
// ── Workstation Task Assignment ──────────────────────────────────────────────
|
|
385
|
+
|
|
386
|
+
/**
|
|
387
|
+
* Given a set of execution waves and fleet peers, assign tasks to workstations.
|
|
388
|
+
* Returns a dispatch plan that each workstation can consume.
|
|
389
|
+
*
|
|
390
|
+
* @param {Array<Array<string>>} waves - Output of buildExecutionWaves
|
|
391
|
+
* @param {Array<{instance_id: string, max_parallel?: number, capabilities?: string[]}>} peers
|
|
392
|
+
* @returns {object} dispatchPlan
|
|
393
|
+
*/
|
|
394
|
+
export function assignTasksToWorkstations(waves, peers, taskMap = new Map()) {
|
|
395
|
+
if (!peers || peers.length === 0 || !waves || waves.length === 0) {
|
|
396
|
+
return { assignments: [], totalTasks: 0, totalPeers: 0 };
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
const assignments = [];
|
|
400
|
+
let waveIndex = 0;
|
|
401
|
+
|
|
402
|
+
for (const wave of waves) {
|
|
403
|
+
waveIndex++;
|
|
404
|
+
const waveAssignments = [];
|
|
405
|
+
|
|
406
|
+
// Round-robin distribute tasks in this wave across peers
|
|
407
|
+
for (let i = 0; i < wave.length; i++) {
|
|
408
|
+
const taskId = wave[i];
|
|
409
|
+
const peer = peers[i % peers.length];
|
|
410
|
+
const task = taskMap.get(taskId);
|
|
411
|
+
|
|
412
|
+
// Try capability-based routing: if task has a scope/capability hint
|
|
413
|
+
// and a peer has matching capabilities, prefer that peer
|
|
414
|
+
let bestPeer = peer;
|
|
415
|
+
if (task?.scope) {
|
|
416
|
+
const capMatch = peers.find((p) =>
|
|
417
|
+
Array.isArray(p.capabilities) &&
|
|
418
|
+
p.capabilities.some((c) =>
|
|
419
|
+
c.toLowerCase().includes(task.scope.toLowerCase()),
|
|
420
|
+
),
|
|
421
|
+
);
|
|
422
|
+
if (capMatch) bestPeer = capMatch;
|
|
423
|
+
}
|
|
424
|
+
|
|
425
|
+
waveAssignments.push({
|
|
426
|
+
taskId,
|
|
427
|
+
taskTitle: task?.title || taskId,
|
|
428
|
+
wave: waveIndex,
|
|
429
|
+
assignedTo: bestPeer.instance_id,
|
|
430
|
+
assignedToLabel: bestPeer.instance_label || bestPeer.instance_id,
|
|
431
|
+
});
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
assignments.push(...waveAssignments);
|
|
435
|
+
}
|
|
436
|
+
|
|
437
|
+
return {
|
|
438
|
+
assignments,
|
|
439
|
+
totalTasks: assignments.length,
|
|
440
|
+
totalPeers: peers.length,
|
|
441
|
+
waveCount: waves.length,
|
|
442
|
+
createdAt: new Date().toISOString(),
|
|
443
|
+
};
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
// ── Backlog Depth Calculator ─────────────────────────────────────────────────
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* Calculate how many tasks should be in the backlog based on fleet size.
|
|
450
|
+
* More workstations = deeper backlog to keep everyone busy.
|
|
451
|
+
*
|
|
452
|
+
* @param {object} opts
|
|
453
|
+
* @param {number} opts.totalSlots - Total parallel agent slots across fleet
|
|
454
|
+
* @param {number} opts.currentBacklog - Current todo tasks in VK
|
|
455
|
+
* @param {number} [opts.bufferMultiplier=3] - How many rounds of work to buffer
|
|
456
|
+
* @param {number} [opts.minTasks=6] - Minimum backlog depth
|
|
457
|
+
* @param {number} [opts.maxTasks=100] - Cap to prevent over-generation
|
|
458
|
+
* @returns {object} { targetDepth, deficit, shouldGenerate }
|
|
459
|
+
*/
|
|
460
|
+
export function calculateBacklogDepth(opts = {}) {
|
|
461
|
+
const {
|
|
462
|
+
totalSlots = 6,
|
|
463
|
+
currentBacklog = 0,
|
|
464
|
+
bufferMultiplier = 3,
|
|
465
|
+
minTasks = 6,
|
|
466
|
+
maxTasks = 100,
|
|
467
|
+
} = opts;
|
|
468
|
+
|
|
469
|
+
// Target: enough tasks for N full rounds of parallel execution
|
|
470
|
+
const rawTarget = totalSlots * bufferMultiplier;
|
|
471
|
+
const targetDepth = Math.max(minTasks, Math.min(rawTarget, maxTasks));
|
|
472
|
+
const deficit = Math.max(0, targetDepth - currentBacklog);
|
|
473
|
+
|
|
474
|
+
return {
|
|
475
|
+
totalSlots,
|
|
476
|
+
currentBacklog,
|
|
477
|
+
targetDepth,
|
|
478
|
+
deficit,
|
|
479
|
+
shouldGenerate: deficit > 0,
|
|
480
|
+
formula: `${totalSlots} slots × ${bufferMultiplier} buffer = ${rawTarget} (clamped to ${targetDepth})`,
|
|
481
|
+
};
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
// ── Maintenance Mode Detection ───────────────────────────────────────────────
|
|
485
|
+
|
|
486
|
+
/**
|
|
487
|
+
* Determine if the fleet should enter maintenance mode.
|
|
488
|
+
* Maintenance mode means: all functional work is done, switch to
|
|
489
|
+
* housekeeping (dependency updates, test coverage, refactoring, docs).
|
|
490
|
+
*
|
|
491
|
+
* @param {object} status - VK project status
|
|
492
|
+
* @returns {object} { isMaintenanceMode, reason }
|
|
493
|
+
*/
|
|
494
|
+
export function detectMaintenanceMode(status) {
|
|
495
|
+
if (!status) return { isMaintenanceMode: false, reason: "no status data" };
|
|
496
|
+
|
|
497
|
+
const counts = status.counts || {};
|
|
498
|
+
const backlog = status.backlog_remaining ?? 0;
|
|
499
|
+
const running = counts.running ?? 0;
|
|
500
|
+
const review = counts.review ?? 0;
|
|
501
|
+
const todo = counts.todo ?? 0;
|
|
502
|
+
|
|
503
|
+
// Maintenance mode: nothing to do AND nothing in progress
|
|
504
|
+
if (backlog === 0 && todo === 0 && running === 0 && review === 0) {
|
|
505
|
+
return {
|
|
506
|
+
isMaintenanceMode: true,
|
|
507
|
+
reason: "all tasks completed — no backlog, no active work",
|
|
508
|
+
};
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
return {
|
|
512
|
+
isMaintenanceMode: false,
|
|
513
|
+
reason: `active: backlog=${backlog} todo=${todo} running=${running} review=${review}`,
|
|
514
|
+
};
|
|
515
|
+
}
|
|
516
|
+
|
|
517
|
+
// ── Task List Sharing ────────────────────────────────────────────────────────
|
|
518
|
+
|
|
519
|
+
/**
|
|
520
|
+
* @typedef {object} SharedTaskList
|
|
521
|
+
* @property {string} instanceId - Which workstation published this list
|
|
522
|
+
* @property {string} instanceLabel
|
|
523
|
+
* @property {string} repoFingerprint
|
|
524
|
+
* @property {Array<{id: string, title: string, status: string, size?: string, complexity?: string}>} tasks
|
|
525
|
+
* @property {string} publishedAt - ISO timestamp
|
|
526
|
+
*/
|
|
527
|
+
|
|
528
|
+
const SHARED_TASKS_FILENAME = "fleet-tasks.json";
|
|
529
|
+
|
|
530
|
+
/**
|
|
531
|
+
* Publish this workstation's current task list so peers can pull it.
|
|
532
|
+
* Called periodically by the fleet sync loop.
|
|
533
|
+
*
|
|
534
|
+
* @param {object} opts
|
|
535
|
+
* @param {string} opts.repoRoot - Git repository root for persistence
|
|
536
|
+
* @param {Array<object>} opts.tasks - Current tasks (from VK or orchestrator)
|
|
537
|
+
* @returns {SharedTaskList}
|
|
538
|
+
*/
|
|
539
|
+
export async function publishTaskList({ repoRoot, tasks = [] } = {}) {
|
|
540
|
+
const presenceState = getPresenceState();
|
|
541
|
+
const payload = {
|
|
542
|
+
instanceId: presenceState.instance_id,
|
|
543
|
+
instanceLabel: presenceState.instance_label || presenceState.instance_id,
|
|
544
|
+
repoFingerprint: fleetState.repoFingerprint?.hash || null,
|
|
545
|
+
tasks: tasks.map((t) => ({
|
|
546
|
+
id: t.id,
|
|
547
|
+
title: t.title || "",
|
|
548
|
+
status: t.status || "unknown",
|
|
549
|
+
size: t.size || t.metadata?.size || null,
|
|
550
|
+
complexity: t.complexity || null,
|
|
551
|
+
})),
|
|
552
|
+
publishedAt: new Date().toISOString(),
|
|
553
|
+
};
|
|
554
|
+
|
|
555
|
+
try {
|
|
556
|
+
const dir = resolve(repoRoot || process.cwd(), FLEET_STATE_DIR);
|
|
557
|
+
await mkdir(dir, { recursive: true });
|
|
558
|
+
const path = resolve(dir, SHARED_TASKS_FILENAME);
|
|
559
|
+
await writeFile(path, JSON.stringify(payload, null, 2), "utf8");
|
|
560
|
+
} catch (err) {
|
|
561
|
+
console.warn(`[fleet] publishTaskList error: ${err.message}`);
|
|
562
|
+
}
|
|
563
|
+
|
|
564
|
+
return payload;
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
/**
|
|
568
|
+
* Read another workstation's shared task list (from their fleet-tasks.json).
|
|
569
|
+
* In practice, workstations share this via a shared filesystem or git sync.
|
|
570
|
+
*
|
|
571
|
+
* @param {string} filePath - Path to the fleet-tasks.json file
|
|
572
|
+
* @returns {SharedTaskList|null}
|
|
573
|
+
*/
|
|
574
|
+
export async function readPeerTaskList(filePath) {
|
|
575
|
+
try {
|
|
576
|
+
if (!existsSync(filePath)) return null;
|
|
577
|
+
const raw = await readFile(filePath, "utf8");
|
|
578
|
+
const data = JSON.parse(raw);
|
|
579
|
+
// Validate shape
|
|
580
|
+
if (!data.instanceId || !Array.isArray(data.tasks)) return null;
|
|
581
|
+
return data;
|
|
582
|
+
} catch {
|
|
583
|
+
return null;
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
/**
|
|
588
|
+
* Bootstrap a new workstation from an existing peer's task list.
|
|
589
|
+
* When a new workstation joins with no local backlog, it can pull tasks
|
|
590
|
+
* from the coordinator or any active peer.
|
|
591
|
+
*
|
|
592
|
+
* @param {object} opts
|
|
593
|
+
* @param {Array<SharedTaskList>} opts.peerLists - Task lists from peers
|
|
594
|
+
* @param {string} [opts.myInstanceId] - This workstation's instance ID (to exclude self)
|
|
595
|
+
* @returns {{ tasks: Array, source: string, sourceLabel: string }|null}
|
|
596
|
+
*/
|
|
597
|
+
export function bootstrapFromPeer({ peerLists = [], myInstanceId } = {}) {
|
|
598
|
+
if (!peerLists.length) return null;
|
|
599
|
+
|
|
600
|
+
// Filter to peers with matching repo fingerprint (if we have one)
|
|
601
|
+
const myFingerprint = fleetState.repoFingerprint?.hash;
|
|
602
|
+
let candidates = peerLists;
|
|
603
|
+
if (myFingerprint) {
|
|
604
|
+
candidates = peerLists.filter(
|
|
605
|
+
(pl) => pl.repoFingerprint === myFingerprint,
|
|
606
|
+
);
|
|
607
|
+
}
|
|
608
|
+
|
|
609
|
+
// Exclude self
|
|
610
|
+
if (myInstanceId) {
|
|
611
|
+
candidates = candidates.filter((pl) => pl.instanceId !== myInstanceId);
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
if (!candidates.length) return null;
|
|
615
|
+
|
|
616
|
+
// Pick the peer with the most todo tasks
|
|
617
|
+
let best = null;
|
|
618
|
+
let bestTodoCount = 0;
|
|
619
|
+
for (const pl of candidates) {
|
|
620
|
+
const todoCount = pl.tasks.filter((t) => t.status === "todo").length;
|
|
621
|
+
if (todoCount > bestTodoCount) {
|
|
622
|
+
bestTodoCount = todoCount;
|
|
623
|
+
best = pl;
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
if (!best || bestTodoCount === 0) return null;
|
|
628
|
+
|
|
629
|
+
return {
|
|
630
|
+
tasks: best.tasks.filter((t) => t.status === "todo"),
|
|
631
|
+
source: best.instanceId,
|
|
632
|
+
sourceLabel: best.instanceLabel,
|
|
633
|
+
totalAvailable: bestTodoCount,
|
|
634
|
+
};
|
|
635
|
+
}
|
|
636
|
+
|
|
637
|
+
// ── Task Auto-Generation Trigger ─────────────────────────────────────────────
|
|
638
|
+
|
|
639
|
+
/**
|
|
640
|
+
* @typedef {object} AutoGenDecision
|
|
641
|
+
* @property {boolean} shouldGenerate - Whether to trigger task generation
|
|
642
|
+
* @property {string} reason - Why/why not
|
|
643
|
+
* @property {number} deficit - How many tasks are needed
|
|
644
|
+
* @property {boolean} needsApproval - Whether user approval is required first
|
|
645
|
+
* @property {string} mode - "auto" | "confirm" | "skip"
|
|
646
|
+
*/
|
|
647
|
+
|
|
648
|
+
/** @type {number|null} Last time auto-gen was triggered */
|
|
649
|
+
let lastAutoGenTimestamp = null;
|
|
650
|
+
|
|
651
|
+
/**
|
|
652
|
+
* Decide whether to trigger automatic task generation.
|
|
653
|
+
*
|
|
654
|
+
* Conditions for generation:
|
|
655
|
+
* 1. Backlog is below threshold (based on fleet capacity)
|
|
656
|
+
* 2. Planner is not disabled
|
|
657
|
+
* 3. Cooldown has elapsed since last generation
|
|
658
|
+
* 4. This instance is the coordinator (in fleet mode)
|
|
659
|
+
*
|
|
660
|
+
* @param {object} opts
|
|
661
|
+
* @param {number} opts.currentBacklog - Current todo task count
|
|
662
|
+
* @param {string} opts.plannerMode - "codex-sdk" | "kanban" | "disabled"
|
|
663
|
+
* @param {number} [opts.cooldownMs=3600000] - Min time between generations (default: 1 hour)
|
|
664
|
+
* @param {boolean} [opts.requireApproval=true] - Whether to require user confirmation
|
|
665
|
+
* @returns {AutoGenDecision}
|
|
666
|
+
*/
|
|
667
|
+
export function shouldAutoGenerateTasks({
|
|
668
|
+
currentBacklog = 0,
|
|
669
|
+
plannerMode = "kanban",
|
|
670
|
+
cooldownMs = 60 * 60 * 1000,
|
|
671
|
+
requireApproval = true,
|
|
672
|
+
} = {}) {
|
|
673
|
+
// Disabled planner → skip
|
|
674
|
+
if (plannerMode === "disabled") {
|
|
675
|
+
return {
|
|
676
|
+
shouldGenerate: false,
|
|
677
|
+
reason: "planner disabled",
|
|
678
|
+
deficit: 0,
|
|
679
|
+
needsApproval: false,
|
|
680
|
+
mode: "skip",
|
|
681
|
+
};
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
// Not coordinator in fleet mode → skip (only coordinator generates)
|
|
685
|
+
if (fleetState.mode === "fleet" && !fleetState.isCoordinator) {
|
|
686
|
+
return {
|
|
687
|
+
shouldGenerate: false,
|
|
688
|
+
reason: "not fleet coordinator",
|
|
689
|
+
deficit: 0,
|
|
690
|
+
needsApproval: false,
|
|
691
|
+
mode: "skip",
|
|
692
|
+
};
|
|
693
|
+
}
|
|
694
|
+
|
|
695
|
+
// Cooldown check
|
|
696
|
+
if (lastAutoGenTimestamp && Date.now() - lastAutoGenTimestamp < cooldownMs) {
|
|
697
|
+
const remainingMs = cooldownMs - (Date.now() - lastAutoGenTimestamp);
|
|
698
|
+
return {
|
|
699
|
+
shouldGenerate: false,
|
|
700
|
+
reason: `cooldown active (${Math.ceil(remainingMs / 60000)} min remaining)`,
|
|
701
|
+
deficit: 0,
|
|
702
|
+
needsApproval: false,
|
|
703
|
+
mode: "skip",
|
|
704
|
+
};
|
|
705
|
+
}
|
|
706
|
+
|
|
707
|
+
// Calculate backlog depth
|
|
708
|
+
const depth = calculateBacklogDepth({
|
|
709
|
+
totalSlots: fleetState.totalSlots || fleetState.localSlots,
|
|
710
|
+
currentBacklog,
|
|
711
|
+
});
|
|
712
|
+
|
|
713
|
+
if (!depth.shouldGenerate) {
|
|
714
|
+
return {
|
|
715
|
+
shouldGenerate: false,
|
|
716
|
+
reason: `backlog sufficient (${currentBacklog}/${depth.targetDepth})`,
|
|
717
|
+
deficit: 0,
|
|
718
|
+
needsApproval: false,
|
|
719
|
+
mode: "skip",
|
|
720
|
+
};
|
|
721
|
+
}
|
|
722
|
+
|
|
723
|
+
return {
|
|
724
|
+
shouldGenerate: true,
|
|
725
|
+
reason: `backlog low (${currentBacklog}/${depth.targetDepth}, deficit=${depth.deficit})`,
|
|
726
|
+
deficit: depth.deficit,
|
|
727
|
+
needsApproval: requireApproval,
|
|
728
|
+
mode: requireApproval ? "confirm" : "auto",
|
|
729
|
+
};
|
|
730
|
+
}
|
|
731
|
+
|
|
732
|
+
/**
|
|
733
|
+
* Mark that auto-generation was triggered (for cooldown tracking).
|
|
734
|
+
*/
|
|
735
|
+
export function markAutoGenTriggered() {
|
|
736
|
+
lastAutoGenTimestamp = Date.now();
|
|
737
|
+
}
|
|
738
|
+
|
|
739
|
+
/**
|
|
740
|
+
* Reset auto-gen cooldown (for testing).
|
|
741
|
+
*/
|
|
742
|
+
export function resetAutoGenCooldown() {
|
|
743
|
+
lastAutoGenTimestamp = null;
|
|
744
|
+
}
|
|
745
|
+
// ── Fleet State Persistence ──────────────────────────────────────────────────
|
|
746
|
+
|
|
747
|
+
const FLEET_STATE_DIR = ".cache/openfleet";
|
|
748
|
+
|
|
749
|
+
async function getFleetStatePath(repoRoot) {
|
|
750
|
+
const dir = resolve(repoRoot || process.cwd(), FLEET_STATE_DIR);
|
|
751
|
+
await mkdir(dir, { recursive: true });
|
|
752
|
+
return resolve(dir, FLEET_STATE_FILENAME);
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
export async function persistFleetState(repoRoot) {
|
|
756
|
+
try {
|
|
757
|
+
const path = await getFleetStatePath(repoRoot);
|
|
758
|
+
const payload = {
|
|
759
|
+
...fleetState,
|
|
760
|
+
peers: fleetState.peers.map((p) => ({
|
|
761
|
+
instance_id: p.instance_id,
|
|
762
|
+
instance_label: p.instance_label,
|
|
763
|
+
max_parallel: p.max_parallel,
|
|
764
|
+
capabilities: p.capabilities,
|
|
765
|
+
host: p.host,
|
|
766
|
+
})),
|
|
767
|
+
updatedAt: new Date().toISOString(),
|
|
768
|
+
};
|
|
769
|
+
await writeFile(path, JSON.stringify(payload, null, 2), "utf8");
|
|
770
|
+
} catch (err) {
|
|
771
|
+
console.warn(`[fleet] persist error: ${err.message}`);
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
|
|
775
|
+
export async function loadFleetState(repoRoot) {
|
|
776
|
+
try {
|
|
777
|
+
const path = await getFleetStatePath(repoRoot);
|
|
778
|
+
if (!existsSync(path)) return null;
|
|
779
|
+
const raw = await readFile(path, "utf8");
|
|
780
|
+
return JSON.parse(raw);
|
|
781
|
+
} catch {
|
|
782
|
+
return null;
|
|
783
|
+
}
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
// ── Public Getters ───────────────────────────────────────────────────────────
|
|
787
|
+
|
|
788
|
+
export function getFleetState() {
|
|
789
|
+
return { ...fleetState };
|
|
790
|
+
}
|
|
791
|
+
|
|
792
|
+
export function isFleetCoordinator() {
|
|
793
|
+
return fleetState.isCoordinator;
|
|
794
|
+
}
|
|
795
|
+
|
|
796
|
+
export function getFleetMode() {
|
|
797
|
+
return fleetState.mode;
|
|
798
|
+
}
|
|
799
|
+
|
|
800
|
+
export function getFleetSize() {
|
|
801
|
+
return fleetState.fleetSize;
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
export function getTotalFleetSlots() {
|
|
805
|
+
return fleetState.totalSlots;
|
|
806
|
+
}
|
|
807
|
+
|
|
808
|
+
/**
|
|
809
|
+
* Format a human-readable fleet status summary.
|
|
810
|
+
*/
|
|
811
|
+
export function formatFleetSummary() {
|
|
812
|
+
const fp = fleetState.repoFingerprint;
|
|
813
|
+
const lines = [
|
|
814
|
+
`🛰️ Fleet Status: ${fleetState.mode.toUpperCase()}`,
|
|
815
|
+
`Repo: ${fp?.normalized || "unknown"} (${fp?.hash?.slice(0, 8) || "no fingerprint"})`,
|
|
816
|
+
`Coordinator: ${fleetState.isCoordinator ? "THIS INSTANCE" : "remote"}`,
|
|
817
|
+
`Fleet size: ${fleetState.fleetSize} workstation(s)`,
|
|
818
|
+
`Total slots: ${fleetState.totalSlots}`,
|
|
819
|
+
`Local slots: ${fleetState.localSlots}`,
|
|
820
|
+
];
|
|
821
|
+
|
|
822
|
+
if (fleetState.peers.length > 0) {
|
|
823
|
+
lines.push("", "Peers:");
|
|
824
|
+
for (const peer of fleetState.peers) {
|
|
825
|
+
const label = peer.instance_label || peer.instance_id;
|
|
826
|
+
const slots = peer.max_parallel ?? "?";
|
|
827
|
+
const host = peer.host || "unknown";
|
|
828
|
+
const coordTag = peer.is_coordinator ? " ⭐" : "";
|
|
829
|
+
lines.push(` • ${label}${coordTag} — ${host} (${slots} slots)`);
|
|
830
|
+
}
|
|
831
|
+
}
|
|
832
|
+
|
|
833
|
+
if (fleetState.lastSyncAt) {
|
|
834
|
+
lines.push("", `Last sync: ${fleetState.lastSyncAt}`);
|
|
835
|
+
}
|
|
836
|
+
|
|
837
|
+
return lines.join("\n");
|
|
838
|
+
}
|