oh-my-codex 0.2.2 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +56 -23
- package/dist/cli/__tests__/doctor-team.test.d.ts +2 -0
- package/dist/cli/__tests__/doctor-team.test.d.ts.map +1 -0
- package/dist/cli/__tests__/doctor-team.test.js +151 -0
- package/dist/cli/__tests__/doctor-team.test.js.map +1 -0
- package/dist/cli/__tests__/index.test.js +31 -1
- package/dist/cli/__tests__/index.test.js.map +1 -1
- package/dist/cli/__tests__/session-scoped-runtime.test.js +2 -0
- package/dist/cli/__tests__/session-scoped-runtime.test.js.map +1 -1
- package/dist/cli/__tests__/setup-gh-star.test.d.ts +2 -0
- package/dist/cli/__tests__/setup-gh-star.test.d.ts.map +1 -0
- package/dist/cli/__tests__/setup-gh-star.test.js +59 -0
- package/dist/cli/__tests__/setup-gh-star.test.js.map +1 -0
- package/dist/cli/doctor.d.ts +1 -0
- package/dist/cli/doctor.d.ts.map +1 -1
- package/dist/cli/doctor.js +172 -1
- package/dist/cli/doctor.js.map +1 -1
- package/dist/cli/index.d.ts +2 -0
- package/dist/cli/index.d.ts.map +1 -1
- package/dist/cli/index.js +140 -2
- package/dist/cli/index.js.map +1 -1
- package/dist/cli/setup.d.ts.map +1 -1
- package/dist/cli/setup.js +34 -0
- package/dist/cli/setup.js.map +1 -1
- package/dist/cli/team.d.ts +6 -0
- package/dist/cli/team.d.ts.map +1 -0
- package/dist/cli/team.js +140 -0
- package/dist/cli/team.js.map +1 -0
- package/dist/cli/tmux-hook.d.ts +1 -0
- package/dist/cli/tmux-hook.d.ts.map +1 -1
- package/dist/cli/tmux-hook.js +131 -24
- package/dist/cli/tmux-hook.js.map +1 -1
- package/dist/config/__tests__/generator-notify.test.js +79 -12
- package/dist/config/__tests__/generator-notify.test.js.map +1 -1
- package/dist/config/generator.d.ts.map +1 -1
- package/dist/config/generator.js +12 -5
- package/dist/config/generator.js.map +1 -1
- package/dist/hooks/__tests__/keyword-detector.test.d.ts +2 -0
- package/dist/hooks/__tests__/keyword-detector.test.d.ts.map +1 -0
- package/dist/hooks/__tests__/keyword-detector.test.js +39 -0
- package/dist/hooks/__tests__/keyword-detector.test.js.map +1 -0
- package/dist/hooks/__tests__/notify-hook-team-leader-nudge.test.d.ts +2 -0
- package/dist/hooks/__tests__/notify-hook-team-leader-nudge.test.d.ts.map +1 -0
- package/dist/hooks/__tests__/notify-hook-team-leader-nudge.test.js +99 -0
- package/dist/hooks/__tests__/notify-hook-team-leader-nudge.test.js.map +1 -0
- package/dist/hooks/__tests__/tmux-hook-engine.test.js +36 -0
- package/dist/hooks/__tests__/tmux-hook-engine.test.js.map +1 -1
- package/dist/hooks/emulator.d.ts.map +1 -1
- package/dist/hooks/emulator.js +3 -0
- package/dist/hooks/emulator.js.map +1 -1
- package/dist/hooks/keyword-detector.d.ts.map +1 -1
- package/dist/hooks/keyword-detector.js +2 -1
- package/dist/hooks/keyword-detector.js.map +1 -1
- package/dist/mcp/__tests__/state-server-team-tools.test.d.ts +2 -0
- package/dist/mcp/__tests__/state-server-team-tools.test.d.ts.map +1 -0
- package/dist/mcp/__tests__/state-server-team-tools.test.js +128 -0
- package/dist/mcp/__tests__/state-server-team-tools.test.js.map +1 -0
- package/dist/mcp/__tests__/state-server.test.d.ts +2 -0
- package/dist/mcp/__tests__/state-server.test.d.ts.map +1 -0
- package/dist/mcp/__tests__/state-server.test.js +52 -0
- package/dist/mcp/__tests__/state-server.test.js.map +1 -0
- package/dist/mcp/state-server.d.ts +18 -1
- package/dist/mcp/state-server.d.ts.map +1 -1
- package/dist/mcp/state-server.js +752 -6
- package/dist/mcp/state-server.js.map +1 -1
- package/dist/team/__tests__/mcp-comm.test.d.ts +2 -0
- package/dist/team/__tests__/mcp-comm.test.d.ts.map +1 -0
- package/dist/team/__tests__/mcp-comm.test.js +93 -0
- package/dist/team/__tests__/mcp-comm.test.js.map +1 -0
- package/dist/team/__tests__/runtime.test.d.ts +2 -0
- package/dist/team/__tests__/runtime.test.d.ts.map +1 -0
- package/dist/team/__tests__/runtime.test.js +271 -0
- package/dist/team/__tests__/runtime.test.js.map +1 -0
- package/dist/team/__tests__/state.test.d.ts +2 -0
- package/dist/team/__tests__/state.test.d.ts.map +1 -0
- package/dist/team/__tests__/state.test.js +556 -0
- package/dist/team/__tests__/state.test.js.map +1 -0
- package/dist/team/__tests__/tmux-session.test.d.ts +2 -0
- package/dist/team/__tests__/tmux-session.test.d.ts.map +1 -0
- package/dist/team/__tests__/tmux-session.test.js +140 -0
- package/dist/team/__tests__/tmux-session.test.js.map +1 -0
- package/dist/team/__tests__/worker-bootstrap.test.d.ts +2 -0
- package/dist/team/__tests__/worker-bootstrap.test.d.ts.map +1 -0
- package/dist/team/__tests__/worker-bootstrap.test.js +174 -0
- package/dist/team/__tests__/worker-bootstrap.test.js.map +1 -0
- package/dist/team/mcp-comm.d.ts +45 -0
- package/dist/team/mcp-comm.d.ts.map +1 -0
- package/dist/team/mcp-comm.js +26 -0
- package/dist/team/mcp-comm.js.map +1 -0
- package/dist/team/runtime.d.ts +72 -0
- package/dist/team/runtime.d.ts.map +1 -0
- package/dist/team/runtime.js +676 -0
- package/dist/team/runtime.js.map +1 -0
- package/dist/team/state.d.ts +217 -0
- package/dist/team/state.d.ts.map +1 -0
- package/dist/team/state.js +1114 -0
- package/dist/team/state.js.map +1 -0
- package/dist/team/team-ops.d.ts +48 -0
- package/dist/team/team-ops.d.ts.map +1 -0
- package/dist/team/team-ops.js +58 -0
- package/dist/team/team-ops.js.map +1 -0
- package/dist/team/tmux-session.d.ts +21 -0
- package/dist/team/tmux-session.d.ts.map +1 -0
- package/dist/team/tmux-session.js +425 -0
- package/dist/team/tmux-session.js.map +1 -0
- package/dist/team/worker-bootstrap.d.ts +39 -0
- package/dist/team/worker-bootstrap.d.ts.map +1 -0
- package/dist/team/worker-bootstrap.js +183 -0
- package/dist/team/worker-bootstrap.js.map +1 -0
- package/package.json +1 -1
- package/scripts/notify-hook.js +240 -78
- package/scripts/tmux-hook-engine.js +11 -2
- package/skills/cancel/SKILL.md +50 -55
- package/skills/hud/SKILL.md +5 -4
- package/skills/team/SKILL.md +171 -773
- package/skills/worker/SKILL.md +65 -0
- package/templates/AGENTS.md +2 -1
|
@@ -0,0 +1,676 @@
|
|
|
1
|
+
import { join } from 'path';
|
|
2
|
+
import { existsSync } from 'fs';
|
|
3
|
+
import { readdir, readFile } from 'fs/promises';
|
|
4
|
+
import { sanitizeTeamName, isTmuxAvailable, createTeamSession, waitForWorkerReady, sendToWorker, notifyLeaderStatus, isWorkerAlive, getWorkerPanePid, killWorker, killWorkerByPaneId, destroyTeamSession, listTeamSessions, } from './tmux-session.js';
|
|
5
|
+
import { teamInit as initTeamState, DEFAULT_MAX_WORKERS, teamReadConfig as readTeamConfig, teamWriteWorkerIdentity as writeWorkerIdentity, teamReadWorkerHeartbeat as readWorkerHeartbeat, teamReadWorkerStatus as readWorkerStatus, teamWriteWorkerInbox as writeWorkerInbox, teamCreateTask as createStateTask, teamReadTask as readTask, teamListTasks as listTasks, teamReadManifest as readTeamManifestV2, teamClaimTask as claimTask, teamReleaseTaskClaim as releaseTaskClaim, teamAppendEvent as appendTeamEvent, teamReadTaskApproval as readTaskApproval, teamListMailbox as listMailboxMessages, teamMarkMessageNotified as markMessageNotified, teamCleanup as cleanupTeamState, teamSaveConfig as saveTeamConfig, teamWriteShutdownRequest as writeShutdownRequest, teamReadShutdownAck as readShutdownAck, teamReadMonitorSnapshot as readMonitorSnapshot, teamWriteMonitorSnapshot as writeMonitorSnapshot, } from './team-ops.js';
|
|
6
|
+
import { queueInboxInstruction, queueDirectMailboxMessage, queueBroadcastMailboxMessage, } from './mcp-comm.js';
|
|
7
|
+
import { generateWorkerOverlay, applyWorkerOverlay, stripWorkerOverlay, generateInitialInbox, generateTaskAssignmentInbox, generateShutdownInbox, generateTriggerMessage, generateMailboxTriggerMessage, } from './worker-bootstrap.js';
|
|
8
|
+
function resolveWorkerReadyTimeoutMs(env) {
|
|
9
|
+
const raw = env.OMX_TEAM_READY_TIMEOUT_MS;
|
|
10
|
+
const parsed = Number.parseInt(String(raw ?? ''), 10);
|
|
11
|
+
if (Number.isFinite(parsed) && parsed >= 5_000)
|
|
12
|
+
return parsed;
|
|
13
|
+
return 45_000;
|
|
14
|
+
}
|
|
15
|
+
function shouldSkipWorkerReadyWait(env) {
|
|
16
|
+
return env.OMX_TEAM_SKIP_READY_WAIT === '1';
|
|
17
|
+
}
|
|
18
|
+
function resolveWorkerLaunchArgsFromEnv(env) {
|
|
19
|
+
// Keep it intentionally simple: whitespace split, no quoting/escaping support.
|
|
20
|
+
// Primary use is passing stable Codex flags like `--no-alt-screen`.
|
|
21
|
+
const raw = env.OMX_TEAM_WORKER_LAUNCH_ARGS;
|
|
22
|
+
if (!raw || raw.trim() === '')
|
|
23
|
+
return [];
|
|
24
|
+
return raw
|
|
25
|
+
.split(/\s+/)
|
|
26
|
+
.map((s) => s.trim())
|
|
27
|
+
.filter(Boolean);
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Start a new team: init state, create tmux session, bootstrap workers.
|
|
31
|
+
*/
|
|
32
|
+
export async function startTeam(teamName, task, agentType, workerCount, tasks, cwd) {
|
|
33
|
+
if (process.env.OMX_TEAM_WORKER) {
|
|
34
|
+
throw new Error('nested_team_disallowed');
|
|
35
|
+
}
|
|
36
|
+
// tmux-only runtime
|
|
37
|
+
if (!isTmuxAvailable()) {
|
|
38
|
+
throw new Error('Team mode requires tmux. Install with: apt install tmux / brew install tmux');
|
|
39
|
+
}
|
|
40
|
+
const displayMode = 'split_pane';
|
|
41
|
+
if (!process.env.TMUX) {
|
|
42
|
+
throw new Error('Team mode requires running inside tmux current leader pane');
|
|
43
|
+
}
|
|
44
|
+
const leaderSessionId = await resolveLeaderSessionId(cwd);
|
|
45
|
+
// Topology guard: one active team per leader session/process context.
|
|
46
|
+
const activeTeams = await findActiveTeams(cwd, leaderSessionId);
|
|
47
|
+
if (activeTeams.length > 0) {
|
|
48
|
+
throw new Error(`leader_session_conflict: active team exists (${activeTeams.join(', ')})`);
|
|
49
|
+
}
|
|
50
|
+
// 2. Sanitize team name
|
|
51
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
52
|
+
let sessionName = `omx-team-${sanitized}`;
|
|
53
|
+
const agentsMdPath = join(cwd, 'AGENTS.md');
|
|
54
|
+
const overlay = generateWorkerOverlay(sanitized);
|
|
55
|
+
let overlayApplied = false;
|
|
56
|
+
let sessionCreated = false;
|
|
57
|
+
const createdWorkerPaneIds = [];
|
|
58
|
+
const workerLaunchArgs = resolveWorkerLaunchArgsFromEnv(process.env);
|
|
59
|
+
const workerReadyTimeoutMs = resolveWorkerReadyTimeoutMs(process.env);
|
|
60
|
+
const skipWorkerReadyWait = shouldSkipWorkerReadyWait(process.env);
|
|
61
|
+
try {
|
|
62
|
+
// 3. Init state directory + config
|
|
63
|
+
const config = await initTeamState(sanitized, task, agentType, workerCount, cwd, DEFAULT_MAX_WORKERS, { ...process.env, OMX_TEAM_DISPLAY_MODE: displayMode });
|
|
64
|
+
// 4. Create tasks
|
|
65
|
+
for (const t of tasks) {
|
|
66
|
+
await createStateTask(sanitized, {
|
|
67
|
+
subject: t.subject,
|
|
68
|
+
description: t.description,
|
|
69
|
+
status: 'pending',
|
|
70
|
+
owner: t.owner,
|
|
71
|
+
blocked_by: t.blocked_by,
|
|
72
|
+
}, cwd);
|
|
73
|
+
}
|
|
74
|
+
// 5. Apply generic AGENTS.md overlay
|
|
75
|
+
await applyWorkerOverlay(agentsMdPath, overlay);
|
|
76
|
+
overlayApplied = true;
|
|
77
|
+
// 6. Create tmux session with workers
|
|
78
|
+
const createdSession = createTeamSession(sanitized, workerCount, cwd, workerLaunchArgs);
|
|
79
|
+
sessionName = createdSession.name;
|
|
80
|
+
createdWorkerPaneIds.push(...createdSession.workerPaneIds);
|
|
81
|
+
config.tmux_session = sessionName;
|
|
82
|
+
await saveTeamConfig(config, cwd);
|
|
83
|
+
sessionCreated = true;
|
|
84
|
+
// 7. Wait for all workers to be ready, then bootstrap them
|
|
85
|
+
const allTasks = await listTasks(sanitized, cwd);
|
|
86
|
+
for (let i = 1; i <= workerCount; i++) {
|
|
87
|
+
const workerName = `worker-${i}`;
|
|
88
|
+
const paneId = createdSession.workerPaneIds[i - 1];
|
|
89
|
+
// Get tasks assigned to this worker
|
|
90
|
+
const workerTasks = allTasks.filter(t => t.owner === workerName);
|
|
91
|
+
// Write worker identity
|
|
92
|
+
const identity = {
|
|
93
|
+
name: workerName,
|
|
94
|
+
index: i,
|
|
95
|
+
role: agentType,
|
|
96
|
+
assigned_tasks: workerTasks.map(t => t.id),
|
|
97
|
+
};
|
|
98
|
+
// Get pane PID and store it
|
|
99
|
+
const panePid = getWorkerPanePid(sessionName, i);
|
|
100
|
+
if (panePid)
|
|
101
|
+
identity.pid = panePid;
|
|
102
|
+
if (paneId)
|
|
103
|
+
identity.pane_id = paneId;
|
|
104
|
+
if (paneId && config.workers[i - 1])
|
|
105
|
+
config.workers[i - 1].pane_id = paneId;
|
|
106
|
+
await writeWorkerIdentity(sanitized, workerName, identity, cwd);
|
|
107
|
+
// Wait for worker readiness
|
|
108
|
+
if (!skipWorkerReadyWait) {
|
|
109
|
+
const ready = waitForWorkerReady(sessionName, i, workerReadyTimeoutMs, paneId);
|
|
110
|
+
if (!ready) {
|
|
111
|
+
throw new Error(`Worker ${workerName} did not become ready in tmux session ${sessionName}`);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
// Queue inbox via MCP/state then notify worker via tmux transport.
|
|
115
|
+
const inbox = generateInitialInbox(workerName, sanitized, agentType, workerTasks);
|
|
116
|
+
const trigger = generateTriggerMessage(workerName, sanitized);
|
|
117
|
+
const notified = await queueInboxInstruction({
|
|
118
|
+
teamName: sanitized,
|
|
119
|
+
workerName,
|
|
120
|
+
workerIndex: i,
|
|
121
|
+
paneId,
|
|
122
|
+
inbox,
|
|
123
|
+
triggerMessage: trigger,
|
|
124
|
+
cwd,
|
|
125
|
+
notify: (_target, message) => notifyWorker(config, i, message, paneId),
|
|
126
|
+
});
|
|
127
|
+
if (!notified) {
|
|
128
|
+
throw new Error(`worker_notify_failed:${workerName}`);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
await saveTeamConfig(config, cwd);
|
|
132
|
+
return {
|
|
133
|
+
teamName: sanitized,
|
|
134
|
+
sanitizedName: sanitized,
|
|
135
|
+
sessionName,
|
|
136
|
+
config,
|
|
137
|
+
cwd,
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
catch (error) {
|
|
141
|
+
const rollbackErrors = [];
|
|
142
|
+
if (sessionCreated) {
|
|
143
|
+
try {
|
|
144
|
+
// In split-pane topology, we must not kill the entire tmux session; kill only created panes.
|
|
145
|
+
if (sessionName.includes(':')) {
|
|
146
|
+
for (const paneId of createdWorkerPaneIds) {
|
|
147
|
+
try {
|
|
148
|
+
killWorkerByPaneId(paneId);
|
|
149
|
+
}
|
|
150
|
+
catch { /* ignore */ }
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
else {
|
|
154
|
+
destroyTeamSession(sessionName);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
catch (cleanupError) {
|
|
158
|
+
rollbackErrors.push(`destroyTeamSession: ${String(cleanupError)}`);
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
if (overlayApplied) {
|
|
162
|
+
try {
|
|
163
|
+
await stripWorkerOverlay(agentsMdPath);
|
|
164
|
+
}
|
|
165
|
+
catch (cleanupError) {
|
|
166
|
+
rollbackErrors.push(`stripWorkerOverlay: ${String(cleanupError)}`);
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
try {
|
|
170
|
+
await cleanupTeamState(sanitized, cwd);
|
|
171
|
+
}
|
|
172
|
+
catch (cleanupError) {
|
|
173
|
+
rollbackErrors.push(`cleanupTeamState: ${String(cleanupError)}`);
|
|
174
|
+
}
|
|
175
|
+
if (rollbackErrors.length > 0) {
|
|
176
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
177
|
+
throw new Error(`${message}; rollback encountered errors: ${rollbackErrors.join(' | ')}`);
|
|
178
|
+
}
|
|
179
|
+
throw error;
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Monitor team state by polling files. Returns a snapshot.
|
|
184
|
+
*/
|
|
185
|
+
export async function monitorTeam(teamName, cwd) {
|
|
186
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
187
|
+
const config = await readTeamConfig(sanitized, cwd);
|
|
188
|
+
if (!config)
|
|
189
|
+
return null;
|
|
190
|
+
const previousSnapshot = await readMonitorSnapshot(sanitized, cwd);
|
|
191
|
+
const sessionName = config.tmux_session;
|
|
192
|
+
const allTasks = await listTasks(sanitized, cwd);
|
|
193
|
+
const inProgressByOwner = new Map();
|
|
194
|
+
for (const task of allTasks) {
|
|
195
|
+
if (task.status !== 'in_progress' || !task.owner)
|
|
196
|
+
continue;
|
|
197
|
+
const existing = inProgressByOwner.get(task.owner) || [];
|
|
198
|
+
existing.push(task);
|
|
199
|
+
inProgressByOwner.set(task.owner, existing);
|
|
200
|
+
}
|
|
201
|
+
const workers = [];
|
|
202
|
+
const deadWorkers = [];
|
|
203
|
+
const nonReportingWorkers = [];
|
|
204
|
+
const recommendations = [];
|
|
205
|
+
for (const w of config.workers) {
|
|
206
|
+
const alive = isWorkerAlive(sessionName, w.index, w.pane_id);
|
|
207
|
+
const [status, heartbeat] = await Promise.all([
|
|
208
|
+
readWorkerStatus(sanitized, w.name, cwd),
|
|
209
|
+
readWorkerHeartbeat(sanitized, w.name, cwd),
|
|
210
|
+
]);
|
|
211
|
+
const currentTask = status.current_task_id ? allTasks.find((t) => t.id === status.current_task_id) : null;
|
|
212
|
+
const previousTurns = previousSnapshot ? (previousSnapshot.workerTurnCountByName[w.name] ?? 0) : null;
|
|
213
|
+
const previousTaskId = previousSnapshot?.workerTaskIdByName[w.name] ?? '';
|
|
214
|
+
const currentTaskId = status.current_task_id ?? '';
|
|
215
|
+
const turnsWithoutProgress = heartbeat &&
|
|
216
|
+
previousTurns !== null &&
|
|
217
|
+
status.state === 'working' &&
|
|
218
|
+
currentTask &&
|
|
219
|
+
(currentTask.status === 'pending' || currentTask.status === 'in_progress') &&
|
|
220
|
+
currentTaskId !== '' &&
|
|
221
|
+
previousTaskId === currentTaskId
|
|
222
|
+
? Math.max(0, heartbeat.turn_count - previousTurns)
|
|
223
|
+
: 0;
|
|
224
|
+
workers.push({
|
|
225
|
+
name: w.name,
|
|
226
|
+
alive,
|
|
227
|
+
status,
|
|
228
|
+
heartbeat,
|
|
229
|
+
assignedTasks: w.assigned_tasks,
|
|
230
|
+
turnsWithoutProgress,
|
|
231
|
+
});
|
|
232
|
+
if (!alive) {
|
|
233
|
+
deadWorkers.push(w.name);
|
|
234
|
+
// Find in-progress tasks owned by this dead worker
|
|
235
|
+
const deadWorkerTasks = inProgressByOwner.get(w.name) || [];
|
|
236
|
+
for (const t of deadWorkerTasks) {
|
|
237
|
+
recommendations.push(`Reassign task-${t.id} from dead ${w.name}`);
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
if (alive && turnsWithoutProgress > 5) {
|
|
241
|
+
nonReportingWorkers.push(w.name);
|
|
242
|
+
recommendations.push(`Send reminder to non-reporting ${w.name}`);
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
// Count tasks
|
|
246
|
+
const taskCounts = {
|
|
247
|
+
total: allTasks.length,
|
|
248
|
+
pending: allTasks.filter(t => t.status === 'pending').length,
|
|
249
|
+
blocked: allTasks.filter(t => t.status === 'blocked').length,
|
|
250
|
+
in_progress: allTasks.filter(t => t.status === 'in_progress').length,
|
|
251
|
+
completed: allTasks.filter(t => t.status === 'completed').length,
|
|
252
|
+
failed: allTasks.filter(t => t.status === 'failed').length,
|
|
253
|
+
};
|
|
254
|
+
const allTasksTerminal = taskCounts.pending === 0 && taskCounts.blocked === 0 && taskCounts.in_progress === 0;
|
|
255
|
+
// Determine phase from state file (simplified -- read from mode state if available)
|
|
256
|
+
// For now use a heuristic based on task statuses
|
|
257
|
+
let phase = 'team-exec';
|
|
258
|
+
if (allTasksTerminal && taskCounts.failed === 0)
|
|
259
|
+
phase = 'complete';
|
|
260
|
+
else if (allTasksTerminal && taskCounts.failed > 0)
|
|
261
|
+
phase = 'team-fix';
|
|
262
|
+
await emitMonitorDerivedEvents(sanitized, allTasks, workers, previousSnapshot, cwd);
|
|
263
|
+
const mailboxNotifiedByMessageId = await deliverPendingMailboxMessages(sanitized, config, workers, previousSnapshot?.mailboxNotifiedByMessageId ?? {}, cwd);
|
|
264
|
+
await writeMonitorSnapshot(sanitized, {
|
|
265
|
+
taskStatusById: Object.fromEntries(allTasks.map((t) => [t.id, t.status])),
|
|
266
|
+
workerAliveByName: Object.fromEntries(workers.map((w) => [w.name, w.alive])),
|
|
267
|
+
workerStateByName: Object.fromEntries(workers.map((w) => [w.name, w.status.state])),
|
|
268
|
+
workerTurnCountByName: Object.fromEntries(workers.map((w) => [w.name, w.heartbeat?.turn_count ?? 0])),
|
|
269
|
+
workerTaskIdByName: Object.fromEntries(workers.map((w) => [w.name, w.status.current_task_id ?? ''])),
|
|
270
|
+
mailboxNotifiedByMessageId,
|
|
271
|
+
}, cwd);
|
|
272
|
+
return {
|
|
273
|
+
teamName: sanitized,
|
|
274
|
+
phase,
|
|
275
|
+
workers,
|
|
276
|
+
tasks: {
|
|
277
|
+
...taskCounts,
|
|
278
|
+
items: allTasks,
|
|
279
|
+
},
|
|
280
|
+
allTasksTerminal,
|
|
281
|
+
deadWorkers,
|
|
282
|
+
nonReportingWorkers,
|
|
283
|
+
recommendations,
|
|
284
|
+
};
|
|
285
|
+
}
|
|
286
|
+
/**
|
|
287
|
+
* Assign a task to a worker by writing inbox and sending trigger.
|
|
288
|
+
*/
|
|
289
|
+
export async function assignTask(teamName, workerName, taskId, cwd) {
|
|
290
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
291
|
+
const task = await readTask(sanitized, taskId, cwd);
|
|
292
|
+
if (!task)
|
|
293
|
+
throw new Error(`Task ${taskId} not found`);
|
|
294
|
+
const manifest = await readTeamManifestV2(sanitized, cwd);
|
|
295
|
+
if (manifest?.policy?.delegation_only && workerName === 'leader-fixed') {
|
|
296
|
+
throw new Error('delegation_only_violation');
|
|
297
|
+
}
|
|
298
|
+
if (manifest?.policy?.plan_approval_required && task.requires_code_change === true) {
|
|
299
|
+
const approved = await isTaskApprovedForExecution(sanitized, taskId, cwd);
|
|
300
|
+
if (!approved) {
|
|
301
|
+
throw new Error('plan_approval_required');
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
const config = await readTeamConfig(sanitized, cwd);
|
|
305
|
+
if (!config)
|
|
306
|
+
throw new Error(`Team ${sanitized} not found`);
|
|
307
|
+
const workerInfo = config.workers.find(w => w.name === workerName);
|
|
308
|
+
if (!workerInfo)
|
|
309
|
+
throw new Error(`Worker ${workerName} not found in team`);
|
|
310
|
+
const claim = await claimTask(sanitized, taskId, workerName, task.version ?? 1, cwd);
|
|
311
|
+
if (!claim.ok) {
|
|
312
|
+
if (claim.error === 'blocked_dependency') {
|
|
313
|
+
throw new Error(`blocked_dependency:${(claim.dependencies ?? []).join(',')}`);
|
|
314
|
+
}
|
|
315
|
+
throw new Error(claim.error);
|
|
316
|
+
}
|
|
317
|
+
try {
|
|
318
|
+
const inbox = generateTaskAssignmentInbox(workerName, sanitized, taskId, task.description);
|
|
319
|
+
const notified = await queueInboxInstruction({
|
|
320
|
+
teamName: sanitized,
|
|
321
|
+
workerName,
|
|
322
|
+
workerIndex: workerInfo.index,
|
|
323
|
+
paneId: workerInfo.pane_id,
|
|
324
|
+
inbox,
|
|
325
|
+
triggerMessage: generateTriggerMessage(workerName, sanitized),
|
|
326
|
+
cwd,
|
|
327
|
+
notify: (_target, message) => notifyWorker(config, workerInfo.index, message, workerInfo.pane_id),
|
|
328
|
+
});
|
|
329
|
+
if (!notified) {
|
|
330
|
+
throw new Error('worker_notify_failed');
|
|
331
|
+
}
|
|
332
|
+
}
|
|
333
|
+
catch (error) {
|
|
334
|
+
// Roll back claim to avoid stuck in_progress tasks on any post-claim dispatch failure.
|
|
335
|
+
const released = await releaseTaskClaim(sanitized, taskId, claim.claimToken, workerName, cwd);
|
|
336
|
+
const reason = error instanceof Error && error.message.trim() !== ''
|
|
337
|
+
? error.message
|
|
338
|
+
: 'worker_assignment_failed';
|
|
339
|
+
try {
|
|
340
|
+
await writeWorkerInbox(sanitized, workerName, `# Assignment Cancelled\n\nTask ${taskId} was not dispatched due to ${reason}.\nDo not execute this task from prior inbox content.`, cwd);
|
|
341
|
+
}
|
|
342
|
+
catch {
|
|
343
|
+
// best effort
|
|
344
|
+
}
|
|
345
|
+
if (!released.ok) {
|
|
346
|
+
throw new Error(`${reason}:${released.error}`);
|
|
347
|
+
}
|
|
348
|
+
if (reason === 'worker_notify_failed')
|
|
349
|
+
throw new Error('worker_notify_failed');
|
|
350
|
+
throw new Error(`worker_assignment_failed:${reason}`);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
/**
|
|
354
|
+
* Reassign a task from one worker to another.
|
|
355
|
+
*/
|
|
356
|
+
export async function reassignTask(teamName, taskId, _fromWorker, toWorker, cwd) {
|
|
357
|
+
await assignTask(teamName, toWorker, taskId, cwd);
|
|
358
|
+
}
|
|
359
|
+
/**
|
|
360
|
+
* Graceful shutdown: send shutdown inbox to all workers, wait, force kill, cleanup.
|
|
361
|
+
*/
|
|
362
|
+
export async function shutdownTeam(teamName, cwd, options = {}) {
|
|
363
|
+
const force = options.force === true;
|
|
364
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
365
|
+
const config = await readTeamConfig(sanitized, cwd);
|
|
366
|
+
if (!config) {
|
|
367
|
+
// No config -- just try to kill tmux session and clean up
|
|
368
|
+
try {
|
|
369
|
+
destroyTeamSession(`omx-team-${sanitized}`);
|
|
370
|
+
}
|
|
371
|
+
catch { /* ignore */ }
|
|
372
|
+
await cleanupTeamState(sanitized, cwd);
|
|
373
|
+
return;
|
|
374
|
+
}
|
|
375
|
+
const sessionName = config.tmux_session;
|
|
376
|
+
const shutdownRequestTimes = new Map();
|
|
377
|
+
// 1. Send shutdown inbox to each worker
|
|
378
|
+
for (const w of config.workers) {
|
|
379
|
+
try {
|
|
380
|
+
const requestedAt = new Date().toISOString();
|
|
381
|
+
await writeShutdownRequest(sanitized, w.name, 'leader-fixed', cwd);
|
|
382
|
+
shutdownRequestTimes.set(w.name, requestedAt);
|
|
383
|
+
const notified = await queueInboxInstruction({
|
|
384
|
+
teamName: sanitized,
|
|
385
|
+
workerName: w.name,
|
|
386
|
+
workerIndex: w.index,
|
|
387
|
+
paneId: w.pane_id,
|
|
388
|
+
inbox: generateShutdownInbox(sanitized, w.name),
|
|
389
|
+
triggerMessage: generateTriggerMessage(w.name, sanitized),
|
|
390
|
+
cwd,
|
|
391
|
+
notify: (_target, message) => notifyWorker(config, w.index, message, w.pane_id),
|
|
392
|
+
});
|
|
393
|
+
if (!notified) {
|
|
394
|
+
// best effort: worker may already be gone
|
|
395
|
+
}
|
|
396
|
+
}
|
|
397
|
+
catch { /* worker might already be dead */ }
|
|
398
|
+
}
|
|
399
|
+
// 2. Wait up to 15s for workers to exit and collect acks
|
|
400
|
+
const deadline = Date.now() + 15_000;
|
|
401
|
+
const rejected = [];
|
|
402
|
+
while (Date.now() < deadline) {
|
|
403
|
+
for (const w of config.workers) {
|
|
404
|
+
const ack = await readShutdownAck(sanitized, w.name, cwd, shutdownRequestTimes.get(w.name));
|
|
405
|
+
if (ack?.status === 'reject') {
|
|
406
|
+
if (!rejected.some((r) => r.worker === w.name)) {
|
|
407
|
+
rejected.push({ worker: w.name, reason: ack.reason || 'no_reason' });
|
|
408
|
+
}
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
if (rejected.length > 0 && !force) {
|
|
412
|
+
const detail = rejected.map(r => `${r.worker}:${r.reason}`).join(',');
|
|
413
|
+
throw new Error(`shutdown_rejected:${detail}`);
|
|
414
|
+
}
|
|
415
|
+
const anyAlive = config.workers.some(w => isWorkerAlive(sessionName, w.index, w.pane_id));
|
|
416
|
+
if (!anyAlive)
|
|
417
|
+
break;
|
|
418
|
+
// Sleep 2s
|
|
419
|
+
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
420
|
+
}
|
|
421
|
+
const anyAliveAfterWait = config.workers.some(w => isWorkerAlive(sessionName, w.index, w.pane_id));
|
|
422
|
+
if (anyAliveAfterWait && !force) {
|
|
423
|
+
// Workers may have accepted shutdown but not exited (Codex TUI requires explicit exit).
|
|
424
|
+
// In this case, proceed to force kill panes (next step) rather than failing and leaving state around.
|
|
425
|
+
}
|
|
426
|
+
// 3. Force kill remaining workers
|
|
427
|
+
for (const w of config.workers) {
|
|
428
|
+
try {
|
|
429
|
+
if (isWorkerAlive(sessionName, w.index, w.pane_id)) {
|
|
430
|
+
killWorker(sessionName, w.index, w.pane_id);
|
|
431
|
+
}
|
|
432
|
+
}
|
|
433
|
+
catch { /* ignore */ }
|
|
434
|
+
}
|
|
435
|
+
// 4. Destroy tmux session
|
|
436
|
+
if (!sessionName.includes(':')) {
|
|
437
|
+
try {
|
|
438
|
+
destroyTeamSession(sessionName);
|
|
439
|
+
}
|
|
440
|
+
catch { /* ignore */ }
|
|
441
|
+
}
|
|
442
|
+
// 5. Strip AGENTS.md overlay
|
|
443
|
+
const agentsMdPath = join(cwd, 'AGENTS.md');
|
|
444
|
+
try {
|
|
445
|
+
await stripWorkerOverlay(agentsMdPath);
|
|
446
|
+
}
|
|
447
|
+
catch { /* ignore */ }
|
|
448
|
+
// 6. Cleanup state
|
|
449
|
+
await cleanupTeamState(sanitized, cwd);
|
|
450
|
+
}
|
|
451
|
+
/**
|
|
452
|
+
* Resume monitoring an existing team.
|
|
453
|
+
*/
|
|
454
|
+
export async function resumeTeam(teamName, cwd) {
|
|
455
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
456
|
+
const config = await readTeamConfig(sanitized, cwd);
|
|
457
|
+
if (!config)
|
|
458
|
+
return null;
|
|
459
|
+
// Check if tmux session still exists
|
|
460
|
+
const sessions = listTeamSessions();
|
|
461
|
+
const baseSession = config.tmux_session.split(':')[0];
|
|
462
|
+
if (!sessions.includes(baseSession))
|
|
463
|
+
return null;
|
|
464
|
+
return {
|
|
465
|
+
teamName: sanitized,
|
|
466
|
+
sanitizedName: sanitized,
|
|
467
|
+
sessionName: config.tmux_session,
|
|
468
|
+
config,
|
|
469
|
+
cwd,
|
|
470
|
+
};
|
|
471
|
+
}
|
|
472
|
+
async function findActiveTeams(cwd, leaderSessionId) {
|
|
473
|
+
const root = join(cwd, '.omx', 'state', 'team');
|
|
474
|
+
if (!existsSync(root))
|
|
475
|
+
return [];
|
|
476
|
+
const sessions = new Set(listTeamSessions());
|
|
477
|
+
const entries = await readdir(root, { withFileTypes: true });
|
|
478
|
+
const active = [];
|
|
479
|
+
for (const e of entries) {
|
|
480
|
+
if (!e.isDirectory())
|
|
481
|
+
continue;
|
|
482
|
+
const teamName = e.name;
|
|
483
|
+
const cfg = await readTeamConfig(teamName, cwd);
|
|
484
|
+
const manifest = await readTeamManifestV2(teamName, cwd);
|
|
485
|
+
if (manifest?.policy?.one_team_per_leader_session === false)
|
|
486
|
+
continue;
|
|
487
|
+
const tmuxSession = (manifest?.tmux_session || cfg?.tmux_session || `omx-team-${teamName}`).split(':')[0];
|
|
488
|
+
if (leaderSessionId) {
|
|
489
|
+
const ownerSessionId = manifest?.leader?.session_id?.trim() ?? '';
|
|
490
|
+
if (ownerSessionId && ownerSessionId !== leaderSessionId)
|
|
491
|
+
continue;
|
|
492
|
+
}
|
|
493
|
+
if (sessions.has(tmuxSession))
|
|
494
|
+
active.push(teamName);
|
|
495
|
+
}
|
|
496
|
+
return active;
|
|
497
|
+
}
|
|
498
|
+
async function resolveLeaderSessionId(cwd) {
|
|
499
|
+
const fromEnv = process.env.OMX_SESSION_ID || process.env.CODEX_SESSION_ID || process.env.SESSION_ID;
|
|
500
|
+
if (fromEnv && fromEnv.trim() !== '')
|
|
501
|
+
return fromEnv.trim();
|
|
502
|
+
const p = join(cwd, '.omx', 'state', 'session.json');
|
|
503
|
+
if (!existsSync(p))
|
|
504
|
+
return '';
|
|
505
|
+
try {
|
|
506
|
+
const raw = await readFile(p, 'utf-8');
|
|
507
|
+
const parsed = JSON.parse(raw);
|
|
508
|
+
if (typeof parsed.session_id === 'string' && parsed.session_id.trim() !== '')
|
|
509
|
+
return parsed.session_id.trim();
|
|
510
|
+
}
|
|
511
|
+
catch {
|
|
512
|
+
return '';
|
|
513
|
+
}
|
|
514
|
+
return '';
|
|
515
|
+
}
|
|
516
|
+
async function isTaskApprovedForExecution(teamName, taskId, cwd) {
|
|
517
|
+
const record = await readTaskApproval(teamName, taskId, cwd);
|
|
518
|
+
return record?.status === 'approved';
|
|
519
|
+
}
|
|
520
|
+
async function emitMonitorDerivedEvents(teamName, tasks, workers, previous, cwd) {
|
|
521
|
+
if (!previous)
|
|
522
|
+
return;
|
|
523
|
+
for (const task of tasks) {
|
|
524
|
+
const prevStatus = previous.taskStatusById[task.id];
|
|
525
|
+
if (prevStatus && prevStatus !== 'completed' && task.status === 'completed') {
|
|
526
|
+
await appendTeamEvent(teamName, {
|
|
527
|
+
type: 'task_completed',
|
|
528
|
+
worker: task.owner || 'unknown',
|
|
529
|
+
task_id: task.id,
|
|
530
|
+
message_id: null,
|
|
531
|
+
reason: undefined,
|
|
532
|
+
}, cwd);
|
|
533
|
+
}
|
|
534
|
+
}
|
|
535
|
+
for (const worker of workers) {
|
|
536
|
+
const prevAlive = previous.workerAliveByName[worker.name];
|
|
537
|
+
if (prevAlive === true && worker.alive === false) {
|
|
538
|
+
await appendTeamEvent(teamName, {
|
|
539
|
+
type: 'worker_stopped',
|
|
540
|
+
worker: worker.name,
|
|
541
|
+
task_id: worker.status.current_task_id,
|
|
542
|
+
message_id: null,
|
|
543
|
+
reason: worker.status.reason,
|
|
544
|
+
}, cwd);
|
|
545
|
+
}
|
|
546
|
+
const prevState = previous.workerStateByName[worker.name];
|
|
547
|
+
if (prevState && prevState !== 'idle' && worker.status.state === 'idle') {
|
|
548
|
+
await appendTeamEvent(teamName, {
|
|
549
|
+
type: 'worker_idle',
|
|
550
|
+
worker: worker.name,
|
|
551
|
+
task_id: worker.status.current_task_id,
|
|
552
|
+
message_id: null,
|
|
553
|
+
reason: undefined,
|
|
554
|
+
}, cwd);
|
|
555
|
+
}
|
|
556
|
+
}
|
|
557
|
+
}
|
|
558
|
+
function notifyWorker(config, workerIndex, message, workerPaneId) {
|
|
559
|
+
if (!config.tmux_session || !isTmuxAvailable())
|
|
560
|
+
return false;
|
|
561
|
+
try {
|
|
562
|
+
sendToWorker(config.tmux_session, workerIndex, message, workerPaneId);
|
|
563
|
+
return true;
|
|
564
|
+
}
|
|
565
|
+
catch {
|
|
566
|
+
return false;
|
|
567
|
+
}
|
|
568
|
+
}
|
|
569
|
+
function notifyLeader(config, message) {
|
|
570
|
+
if (!config.tmux_session)
|
|
571
|
+
return false;
|
|
572
|
+
return notifyLeaderStatus(config.tmux_session, message);
|
|
573
|
+
}
|
|
574
|
+
async function deliverPendingMailboxMessages(teamName, config, workers, previousNotifications, cwd) {
|
|
575
|
+
const nextNotifications = {};
|
|
576
|
+
const pendingIdsAcrossTeam = new Set();
|
|
577
|
+
const retryAfterMs = 15_000;
|
|
578
|
+
for (const worker of workers) {
|
|
579
|
+
if (!worker.alive)
|
|
580
|
+
continue;
|
|
581
|
+
const workerInfo = config.workers.find((w) => w.name === worker.name);
|
|
582
|
+
if (!workerInfo)
|
|
583
|
+
continue;
|
|
584
|
+
const mailbox = await listMailboxMessages(teamName, worker.name, cwd);
|
|
585
|
+
const pending = mailbox.filter((m) => !m.delivered_at);
|
|
586
|
+
if (pending.length === 0)
|
|
587
|
+
continue;
|
|
588
|
+
const pendingIds = pending.map((m) => m.message_id);
|
|
589
|
+
for (const id of pendingIds)
|
|
590
|
+
pendingIdsAcrossTeam.add(id);
|
|
591
|
+
for (const msg of pending) {
|
|
592
|
+
nextNotifications[msg.message_id] = msg.notified_at || previousNotifications[msg.message_id] || '';
|
|
593
|
+
}
|
|
594
|
+
const nowMs = Date.now();
|
|
595
|
+
const alreadyNotified = pending.every((m) => Boolean(m.notified_at) || typeof previousNotifications[m.message_id] === 'string');
|
|
596
|
+
const shouldRetry = pending.some((m) => {
|
|
597
|
+
const lastNotifiedIso = m.notified_at || previousNotifications[m.message_id];
|
|
598
|
+
if (!lastNotifiedIso)
|
|
599
|
+
return true;
|
|
600
|
+
const lastNotifiedMs = Date.parse(lastNotifiedIso);
|
|
601
|
+
if (!Number.isFinite(lastNotifiedMs))
|
|
602
|
+
return true;
|
|
603
|
+
return nowMs - lastNotifiedMs >= retryAfterMs;
|
|
604
|
+
});
|
|
605
|
+
let notifiedNow = false;
|
|
606
|
+
if (!alreadyNotified || shouldRetry) {
|
|
607
|
+
notifiedNow = notifyWorker(config, workerInfo.index, generateMailboxTriggerMessage(worker.name, teamName, pending.length), workerInfo.pane_id);
|
|
608
|
+
}
|
|
609
|
+
if ((!alreadyNotified || shouldRetry) && !notifiedNow)
|
|
610
|
+
continue;
|
|
611
|
+
if (!alreadyNotified || shouldRetry) {
|
|
612
|
+
for (const msg of pending) {
|
|
613
|
+
const notified = await markMessageNotified(teamName, worker.name, msg.message_id, cwd);
|
|
614
|
+
if (notified) {
|
|
615
|
+
nextNotifications[msg.message_id] = new Date().toISOString();
|
|
616
|
+
}
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
const pruned = {};
|
|
621
|
+
for (const [messageId, ts] of Object.entries(nextNotifications)) {
|
|
622
|
+
if (pendingIdsAcrossTeam.has(messageId) && ts)
|
|
623
|
+
pruned[messageId] = ts;
|
|
624
|
+
}
|
|
625
|
+
return pruned;
|
|
626
|
+
}
|
|
627
|
+
export async function sendWorkerMessage(teamName, fromWorker, toWorker, body, cwd) {
|
|
628
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
629
|
+
const config = await readTeamConfig(sanitized, cwd);
|
|
630
|
+
if (!config)
|
|
631
|
+
throw new Error(`Team ${sanitized} not found`);
|
|
632
|
+
if (toWorker === 'leader-fixed') {
|
|
633
|
+
await queueDirectMailboxMessage({
|
|
634
|
+
teamName: sanitized,
|
|
635
|
+
fromWorker,
|
|
636
|
+
toWorker,
|
|
637
|
+
body,
|
|
638
|
+
triggerMessage: `Team ${sanitized}: new worker message for leader from ${fromWorker}`,
|
|
639
|
+
cwd,
|
|
640
|
+
notify: (_target, message) => notifyLeader(config, message),
|
|
641
|
+
});
|
|
642
|
+
return;
|
|
643
|
+
}
|
|
644
|
+
const recipient = config.workers.find((w) => w.name === toWorker);
|
|
645
|
+
if (!recipient)
|
|
646
|
+
throw new Error(`Worker ${toWorker} not found in team`);
|
|
647
|
+
await queueDirectMailboxMessage({
|
|
648
|
+
teamName: sanitized,
|
|
649
|
+
fromWorker,
|
|
650
|
+
toWorker,
|
|
651
|
+
toWorkerIndex: recipient.index,
|
|
652
|
+
toPaneId: recipient.pane_id,
|
|
653
|
+
body,
|
|
654
|
+
triggerMessage: generateMailboxTriggerMessage(toWorker, sanitized, 1),
|
|
655
|
+
cwd,
|
|
656
|
+
notify: (_target, message) => notifyWorker(config, recipient.index, message, recipient.pane_id),
|
|
657
|
+
});
|
|
658
|
+
}
|
|
659
|
+
export async function broadcastWorkerMessage(teamName, fromWorker, body, cwd) {
|
|
660
|
+
const sanitized = sanitizeTeamName(teamName);
|
|
661
|
+
const config = await readTeamConfig(sanitized, cwd);
|
|
662
|
+
if (!config)
|
|
663
|
+
throw new Error(`Team ${sanitized} not found`);
|
|
664
|
+
await queueBroadcastMailboxMessage({
|
|
665
|
+
teamName: sanitized,
|
|
666
|
+
fromWorker,
|
|
667
|
+
recipients: config.workers.map((w) => ({ workerName: w.name, workerIndex: w.index, paneId: w.pane_id })),
|
|
668
|
+
body,
|
|
669
|
+
cwd,
|
|
670
|
+
triggerFor: (workerName) => generateMailboxTriggerMessage(workerName, sanitized, 1),
|
|
671
|
+
notify: (target, message) => typeof target.workerIndex === 'number'
|
|
672
|
+
? notifyWorker(config, target.workerIndex, message, target.paneId)
|
|
673
|
+
: false,
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
//# sourceMappingURL=runtime.js.map
|