@stoneforge/smithy 1.4.1 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api/orchestrator-api.d.ts.map +1 -1
- package/dist/api/orchestrator-api.js +9 -0
- package/dist/api/orchestrator-api.js.map +1 -1
- package/dist/cli/commands/daemon.d.ts +5 -1
- package/dist/cli/commands/daemon.d.ts.map +1 -1
- package/dist/cli/commands/daemon.js +184 -6
- package/dist/cli/commands/daemon.js.map +1 -1
- package/dist/cli/index.d.ts +1 -1
- package/dist/cli/index.d.ts.map +1 -1
- package/dist/cli/index.js +1 -1
- package/dist/cli/index.js.map +1 -1
- package/dist/git/merge.d.ts +2 -0
- package/dist/git/merge.d.ts.map +1 -1
- package/dist/git/merge.js +19 -0
- package/dist/git/merge.js.map +1 -1
- package/dist/git/worktree-manager.d.ts.map +1 -1
- package/dist/git/worktree-manager.js +18 -2
- package/dist/git/worktree-manager.js.map +1 -1
- package/dist/runtime/session-manager.d.ts +2 -0
- package/dist/runtime/session-manager.d.ts.map +1 -1
- package/dist/runtime/session-manager.js +10 -2
- package/dist/runtime/session-manager.js.map +1 -1
- package/dist/runtime/spawner.d.ts.map +1 -1
- package/dist/runtime/spawner.js +13 -0
- package/dist/runtime/spawner.js.map +1 -1
- package/dist/server/routes/daemon.d.ts.map +1 -1
- package/dist/server/routes/daemon.js +84 -0
- package/dist/server/routes/daemon.js.map +1 -1
- package/dist/server/routes/settings.d.ts.map +1 -1
- package/dist/server/routes/settings.js +15 -2
- package/dist/server/routes/settings.js.map +1 -1
- package/dist/server/services.d.ts.map +1 -1
- package/dist/server/services.js +10 -1
- package/dist/server/services.js.map +1 -1
- package/dist/services/dispatch-daemon.d.ts +54 -2
- package/dist/services/dispatch-daemon.d.ts.map +1 -1
- package/dist/services/dispatch-daemon.js +204 -6
- package/dist/services/dispatch-daemon.js.map +1 -1
- package/dist/services/index.d.ts +2 -0
- package/dist/services/index.d.ts.map +1 -1
- package/dist/services/index.js +5 -0
- package/dist/services/index.js.map +1 -1
- package/dist/services/merge-steward-service.d.ts +8 -0
- package/dist/services/merge-steward-service.d.ts.map +1 -1
- package/dist/services/merge-steward-service.js +57 -0
- package/dist/services/merge-steward-service.js.map +1 -1
- package/dist/services/rate-limit-tracker.d.ts +77 -0
- package/dist/services/rate-limit-tracker.d.ts.map +1 -0
- package/dist/services/rate-limit-tracker.js +117 -0
- package/dist/services/rate-limit-tracker.js.map +1 -0
- package/dist/services/settings-service.d.ts +2 -0
- package/dist/services/settings-service.d.ts.map +1 -1
- package/dist/services/settings-service.js +10 -1
- package/dist/services/settings-service.js.map +1 -1
- package/dist/types/task-meta.d.ts +2 -0
- package/dist/types/task-meta.d.ts.map +1 -1
- package/dist/types/task-meta.js.map +1 -1
- package/dist/utils/rate-limit-parser.d.ts +55 -0
- package/dist/utils/rate-limit-parser.d.ts.map +1 -0
- package/dist/utils/rate-limit-parser.js +233 -0
- package/dist/utils/rate-limit-parser.js.map +1 -0
- package/package.json +5 -5
- package/web/assets/{index-8dBly5AJ.js → index--kBvwTYH.js} +100 -100
- package/web/assets/{utils-vendor-B7jOGaxP.js → utils-vendor-CnRJgAVd.js} +1 -1
- package/web/index.html +2 -2
|
@@ -22,6 +22,7 @@ import { InboxStatus, createTimestamp, TaskStatus, asEntityId, asElementId, Plan
|
|
|
22
22
|
import { loadTriagePrompt, loadRolePrompt } from '../prompts/index.js';
|
|
23
23
|
import { createLogger } from '../utils/logger.js';
|
|
24
24
|
import { getAgentMetadata } from './agent-registry.js';
|
|
25
|
+
import { createRateLimitTracker } from './rate-limit-tracker.js';
|
|
25
26
|
import { getOrchestratorTaskMeta, updateOrchestratorTaskMeta, appendTaskSessionHistory, } from '../types/task-meta.js';
|
|
26
27
|
const logger = createLogger('dispatch-daemon');
|
|
27
28
|
// ============================================================================
|
|
@@ -55,12 +56,15 @@ export class DispatchDaemonImpl {
|
|
|
55
56
|
stewardScheduler;
|
|
56
57
|
inboxService;
|
|
57
58
|
poolService;
|
|
59
|
+
settingsService;
|
|
60
|
+
rateLimitTracker;
|
|
58
61
|
emitter;
|
|
59
62
|
config;
|
|
60
63
|
running = false;
|
|
61
64
|
polling = false;
|
|
62
65
|
pollIntervalHandle;
|
|
63
66
|
currentPollCycle;
|
|
67
|
+
rateLimitSleepTimer;
|
|
64
68
|
/**
|
|
65
69
|
* Tracks inbox item IDs that are currently being forwarded to persistent agents.
|
|
66
70
|
* Prevents duplicate message delivery when concurrent pollInboxes() calls
|
|
@@ -72,7 +76,7 @@ export class DispatchDaemonImpl {
|
|
|
72
76
|
* Items are added before forwarding and removed after markAsRead() completes.
|
|
73
77
|
*/
|
|
74
78
|
forwardingInboxItems = new Set();
|
|
75
|
-
constructor(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService) {
|
|
79
|
+
constructor(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService, settingsService) {
|
|
76
80
|
this.api = api;
|
|
77
81
|
this.agentRegistry = agentRegistry;
|
|
78
82
|
this.sessionManager = sessionManager;
|
|
@@ -82,6 +86,8 @@ export class DispatchDaemonImpl {
|
|
|
82
86
|
this.stewardScheduler = stewardScheduler;
|
|
83
87
|
this.inboxService = inboxService;
|
|
84
88
|
this.poolService = poolService;
|
|
89
|
+
this.settingsService = settingsService;
|
|
90
|
+
this.rateLimitTracker = createRateLimitTracker();
|
|
85
91
|
this.emitter = new EventEmitter();
|
|
86
92
|
this.config = this.normalizeConfig(config);
|
|
87
93
|
}
|
|
@@ -134,6 +140,10 @@ export class DispatchDaemonImpl {
|
|
|
134
140
|
clearInterval(this.pollIntervalHandle);
|
|
135
141
|
this.pollIntervalHandle = undefined;
|
|
136
142
|
}
|
|
143
|
+
if (this.rateLimitSleepTimer) {
|
|
144
|
+
clearTimeout(this.rateLimitSleepTimer);
|
|
145
|
+
this.rateLimitSleepTimer = undefined;
|
|
146
|
+
}
|
|
137
147
|
// Wait for in-flight poll cycle to complete (M-8)
|
|
138
148
|
if (this.currentPollCycle) {
|
|
139
149
|
try {
|
|
@@ -150,6 +160,57 @@ export class DispatchDaemonImpl {
|
|
|
150
160
|
return this.running;
|
|
151
161
|
}
|
|
152
162
|
// ----------------------------------------
|
|
163
|
+
// Rate Limiting
|
|
164
|
+
// ----------------------------------------
|
|
165
|
+
handleRateLimitDetected(executable, resetsAt) {
|
|
166
|
+
this.rateLimitTracker.markLimited(executable, resetsAt);
|
|
167
|
+
logger.info(`Rate limit detected for executable '${executable}', resets at ${resetsAt.toISOString()}`);
|
|
168
|
+
}
|
|
169
|
+
getRateLimitStatus() {
|
|
170
|
+
const fallbackChain = this.settingsService?.getAgentDefaults().fallbackChain ?? [];
|
|
171
|
+
const isPaused = fallbackChain.length > 0 && this.rateLimitTracker.isAllLimited(fallbackChain);
|
|
172
|
+
const allLimits = this.rateLimitTracker.getAllLimits();
|
|
173
|
+
const soonestReset = this.rateLimitTracker.getSoonestResetTime();
|
|
174
|
+
return {
|
|
175
|
+
isPaused,
|
|
176
|
+
limits: allLimits.map((entry) => ({
|
|
177
|
+
executable: entry.executable,
|
|
178
|
+
resetsAt: entry.resetsAt.toISOString(),
|
|
179
|
+
})),
|
|
180
|
+
soonestReset: soonestReset?.toISOString(),
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
sleepUntil(resetTime) {
|
|
184
|
+
const fallbackChain = this.settingsService?.getAgentDefaults().fallbackChain ?? [];
|
|
185
|
+
if (fallbackChain.length === 0) {
|
|
186
|
+
logger.warn('sleepUntil: No fallback chain configured — nothing to mark as limited');
|
|
187
|
+
return;
|
|
188
|
+
}
|
|
189
|
+
// Mark all executables in the fallback chain as rate-limited until the given time
|
|
190
|
+
for (const executable of fallbackChain) {
|
|
191
|
+
this.rateLimitTracker.markLimited(executable, resetTime);
|
|
192
|
+
}
|
|
193
|
+
// Clear any existing sleep timer and set a new one
|
|
194
|
+
if (this.rateLimitSleepTimer) {
|
|
195
|
+
clearTimeout(this.rateLimitSleepTimer);
|
|
196
|
+
}
|
|
197
|
+
const sleepMs = Math.max(0, resetTime.getTime() - Date.now());
|
|
198
|
+
logger.info(`Manual sleep: pausing dispatch for ${Math.round(sleepMs / 1000)}s (until ${resetTime.toISOString()})`);
|
|
199
|
+
this.rateLimitSleepTimer = setTimeout(() => {
|
|
200
|
+
this.rateLimitSleepTimer = undefined;
|
|
201
|
+
}, sleepMs);
|
|
202
|
+
}
|
|
203
|
+
wake() {
|
|
204
|
+
// Clear all rate limit entries
|
|
205
|
+
this.rateLimitTracker.clear();
|
|
206
|
+
// Clear the sleep timer
|
|
207
|
+
if (this.rateLimitSleepTimer) {
|
|
208
|
+
clearTimeout(this.rateLimitSleepTimer);
|
|
209
|
+
this.rateLimitSleepTimer = undefined;
|
|
210
|
+
}
|
|
211
|
+
logger.info('Manual wake: cleared all rate limits, dispatch will resume on next poll cycle');
|
|
212
|
+
}
|
|
213
|
+
// ----------------------------------------
|
|
153
214
|
// Manual Poll Triggers
|
|
154
215
|
// ----------------------------------------
|
|
155
216
|
async pollWorkerAvailability() {
|
|
@@ -542,7 +603,33 @@ export class DispatchDaemonImpl {
|
|
|
542
603
|
if (stewardTasks.length === 0)
|
|
543
604
|
continue;
|
|
544
605
|
const orphanedAssignment = stewardTasks[0];
|
|
606
|
+
// Safety valve: cap steward recovery attempts to prevent infinite re-dispatch loops
|
|
607
|
+
const stewardRecoveryCount = orphanedAssignment.orchestratorMeta?.stewardRecoveryCount ?? 0;
|
|
608
|
+
if (stewardRecoveryCount >= 3) {
|
|
609
|
+
logger.warn(`Steward recovery limit reached for task ${orphanedAssignment.task.id}, setting mergeStatus to 'failed'`);
|
|
610
|
+
try {
|
|
611
|
+
await this.api.update(orphanedAssignment.task.id, {
|
|
612
|
+
assignee: undefined,
|
|
613
|
+
metadata: updateOrchestratorTaskMeta(orphanedAssignment.task.metadata, {
|
|
614
|
+
mergeStatus: 'failed',
|
|
615
|
+
mergeFailureReason: `Steward recovery limit reached after ${stewardRecoveryCount} attempts`,
|
|
616
|
+
}),
|
|
617
|
+
});
|
|
618
|
+
processed++;
|
|
619
|
+
}
|
|
620
|
+
catch (error) {
|
|
621
|
+
errors++;
|
|
622
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
623
|
+
errorMessages.push(`Steward recovery limit for ${steward.name}: ${errorMessage}`);
|
|
624
|
+
logger.error(`Error setting mergeStatus to failed for task ${orphanedAssignment.task.id}:`, error);
|
|
625
|
+
}
|
|
626
|
+
continue;
|
|
627
|
+
}
|
|
545
628
|
try {
|
|
629
|
+
// Increment stewardRecoveryCount before recovering
|
|
630
|
+
await this.api.update(orphanedAssignment.task.id, {
|
|
631
|
+
metadata: updateOrchestratorTaskMeta(orphanedAssignment.task.metadata, { stewardRecoveryCount: stewardRecoveryCount + 1 }),
|
|
632
|
+
});
|
|
546
633
|
await this.recoverOrphanedStewardTask(steward, orphanedAssignment.task, orphanedAssignment.orchestratorMeta);
|
|
547
634
|
processed++;
|
|
548
635
|
}
|
|
@@ -891,6 +978,40 @@ export class DispatchDaemonImpl {
|
|
|
891
978
|
return;
|
|
892
979
|
this.polling = true;
|
|
893
980
|
try {
|
|
981
|
+
// Check if all executables in the fallback chain are rate-limited.
|
|
982
|
+
// When paused, skip dispatch-related polls but still run non-dispatch work.
|
|
983
|
+
const fallbackChain = this.settingsService?.getAgentDefaults().fallbackChain ?? [];
|
|
984
|
+
const allLimited = fallbackChain.length > 0 && this.rateLimitTracker.isAllLimited(fallbackChain);
|
|
985
|
+
if (allLimited) {
|
|
986
|
+
// Schedule a wake-up timer so we re-check when the soonest limit expires
|
|
987
|
+
const soonestReset = this.rateLimitTracker.getSoonestResetTime();
|
|
988
|
+
if (soonestReset && !this.rateLimitSleepTimer) {
|
|
989
|
+
const sleepMs = Math.max(0, soonestReset.getTime() - Date.now());
|
|
990
|
+
logger.info(`All executables rate-limited. Pausing dispatch polls for ${Math.round(sleepMs / 1000)}s (until ${soonestReset.toISOString()})`);
|
|
991
|
+
this.rateLimitSleepTimer = setTimeout(() => {
|
|
992
|
+
this.rateLimitSleepTimer = undefined;
|
|
993
|
+
}, sleepMs);
|
|
994
|
+
}
|
|
995
|
+
// Run non-dispatch polls only
|
|
996
|
+
if (this.config.inboxPollEnabled) {
|
|
997
|
+
await this.pollInboxes();
|
|
998
|
+
}
|
|
999
|
+
if (this.config.closedUnmergedReconciliationEnabled) {
|
|
1000
|
+
await this.reconcileClosedUnmergedTasks();
|
|
1001
|
+
}
|
|
1002
|
+
if (this.config.stuckMergeRecoveryEnabled) {
|
|
1003
|
+
await this.recoverStuckMergeTasks();
|
|
1004
|
+
}
|
|
1005
|
+
if (this.config.planAutoCompleteEnabled) {
|
|
1006
|
+
await this.pollPlanAutoComplete();
|
|
1007
|
+
}
|
|
1008
|
+
return;
|
|
1009
|
+
}
|
|
1010
|
+
// Clear sleep timer if limits have expired
|
|
1011
|
+
if (this.rateLimitSleepTimer) {
|
|
1012
|
+
clearTimeout(this.rateLimitSleepTimer);
|
|
1013
|
+
this.rateLimitSleepTimer = undefined;
|
|
1014
|
+
}
|
|
894
1015
|
// Recover orphaned assignments first — workers with tasks but no session
|
|
895
1016
|
// (e.g. from mid-cycle crashes). Runs before availability polling so
|
|
896
1017
|
// orphans are handled before they'd be skipped.
|
|
@@ -933,6 +1054,48 @@ export class DispatchDaemonImpl {
|
|
|
933
1054
|
this.polling = false;
|
|
934
1055
|
}
|
|
935
1056
|
}
|
|
1057
|
+
/**
|
|
1058
|
+
* Resolves the executable path for a session, checking rate limits and applying
|
|
1059
|
+
* fallback selection when the primary executable is rate-limited.
|
|
1060
|
+
*
|
|
1061
|
+
* @param agent - The agent to resolve the executable for
|
|
1062
|
+
* @returns The executable path override if fallback was needed, undefined if primary is OK,
|
|
1063
|
+
* or 'all_limited' if all executables in the fallback chain are rate-limited.
|
|
1064
|
+
*/
|
|
1065
|
+
resolveExecutableWithFallback(agent) {
|
|
1066
|
+
const meta = getAgentMetadata(agent);
|
|
1067
|
+
if (!meta)
|
|
1068
|
+
return undefined;
|
|
1069
|
+
// Get the configured executable for this agent
|
|
1070
|
+
const agentExecutablePath = meta.executablePath;
|
|
1071
|
+
const providerName = meta.provider ?? 'claude';
|
|
1072
|
+
// Determine the effective executable path that would be used
|
|
1073
|
+
// Priority: agent-specific → workspace-wide default → provider default
|
|
1074
|
+
let effectiveExecutable = agentExecutablePath;
|
|
1075
|
+
if (!effectiveExecutable && this.settingsService) {
|
|
1076
|
+
const defaults = this.settingsService.getAgentDefaults();
|
|
1077
|
+
effectiveExecutable = defaults.defaultExecutablePaths[providerName];
|
|
1078
|
+
}
|
|
1079
|
+
if (!effectiveExecutable) {
|
|
1080
|
+
effectiveExecutable = providerName;
|
|
1081
|
+
}
|
|
1082
|
+
// Check if the effective executable is rate-limited
|
|
1083
|
+
if (!this.rateLimitTracker.isLimited(effectiveExecutable)) {
|
|
1084
|
+
return undefined; // Primary is fine, no override needed
|
|
1085
|
+
}
|
|
1086
|
+
// Primary is limited — try fallback chain
|
|
1087
|
+
const fallbackChain = this.settingsService?.getAgentDefaults().fallbackChain ?? [];
|
|
1088
|
+
if (fallbackChain.length === 0) {
|
|
1089
|
+
// No fallback chain configured, can't dispatch
|
|
1090
|
+
return 'all_limited';
|
|
1091
|
+
}
|
|
1092
|
+
const available = this.rateLimitTracker.getAvailableExecutable(fallbackChain);
|
|
1093
|
+
if (!available) {
|
|
1094
|
+
return 'all_limited';
|
|
1095
|
+
}
|
|
1096
|
+
logger.info(`Executable '${effectiveExecutable}' is rate-limited, falling back to '${available}'`);
|
|
1097
|
+
return available;
|
|
1098
|
+
}
|
|
936
1099
|
/**
|
|
937
1100
|
* Terminates sessions that have exceeded the configured max duration.
|
|
938
1101
|
* Prevents stuck workers from blocking their slot indefinitely.
|
|
@@ -1055,16 +1218,25 @@ export class DispatchDaemonImpl {
|
|
|
1055
1218
|
}
|
|
1056
1219
|
catch (error) {
|
|
1057
1220
|
logger.warn(`Failed to resume session ${previousSessionId} for worker ${worker.name}, falling back to fresh spawn:`, error);
|
|
1221
|
+
// Clear stale session ID so next recovery cycle doesn't try to resume again
|
|
1222
|
+
const clearedMeta = updateOrchestratorTaskMeta(task.metadata, { sessionId: undefined });
|
|
1223
|
+
await this.api.update(task.id, { metadata: clearedMeta });
|
|
1058
1224
|
}
|
|
1059
1225
|
}
|
|
1060
|
-
// 3. Fall back to fresh spawn
|
|
1226
|
+
// 3. Fall back to fresh spawn (with rate limit fallback)
|
|
1227
|
+
const orphanExecutableOverride = this.resolveExecutableWithFallback(worker);
|
|
1228
|
+
if (orphanExecutableOverride === 'all_limited') {
|
|
1229
|
+
logger.warn(`All executables rate-limited, deferring orphan recovery for worker ${worker.name}`);
|
|
1230
|
+
return;
|
|
1231
|
+
}
|
|
1061
1232
|
const initialPrompt = await this.buildTaskPrompt(task, workerId);
|
|
1062
1233
|
const { session, events } = await this.sessionManager.startSession(workerId, {
|
|
1063
1234
|
workingDirectory: worktreePath,
|
|
1064
1235
|
worktree: worktreePath,
|
|
1065
1236
|
initialPrompt,
|
|
1237
|
+
executablePathOverride: orphanExecutableOverride ?? undefined,
|
|
1066
1238
|
});
|
|
1067
|
-
// Record session history entry for fresh spawned worker session
|
|
1239
|
+
// Record session history entry and new sessionId for fresh spawned worker session
|
|
1068
1240
|
const freshSpawnHistoryEntry = {
|
|
1069
1241
|
sessionId: session.id,
|
|
1070
1242
|
providerSessionId: session.providerSessionId,
|
|
@@ -1076,7 +1248,9 @@ export class DispatchDaemonImpl {
|
|
|
1076
1248
|
const taskAfterFreshSpawn = await this.api.get(task.id);
|
|
1077
1249
|
if (taskAfterFreshSpawn) {
|
|
1078
1250
|
const metadataWithHistory = appendTaskSessionHistory(taskAfterFreshSpawn.metadata, freshSpawnHistoryEntry);
|
|
1079
|
-
|
|
1251
|
+
// Write the new session ID so future recovery cycles can resume this session
|
|
1252
|
+
const metadataWithSessionId = updateOrchestratorTaskMeta(metadataWithHistory, { sessionId: session.providerSessionId ?? session.id });
|
|
1253
|
+
await this.api.update(task.id, { metadata: metadataWithSessionId });
|
|
1080
1254
|
}
|
|
1081
1255
|
if (this.config.onSessionStarted) {
|
|
1082
1256
|
this.config.onSessionStarted(session, events, workerId, initialPrompt);
|
|
@@ -1139,6 +1313,9 @@ export class DispatchDaemonImpl {
|
|
|
1139
1313
|
}
|
|
1140
1314
|
catch (error) {
|
|
1141
1315
|
logger.warn(`Failed to resume steward session ${previousSessionId} for ${steward.name}, falling back to fresh spawn:`, error);
|
|
1316
|
+
// Clear stale session ID so next recovery cycle doesn't try to resume again
|
|
1317
|
+
const clearedMeta = updateOrchestratorTaskMeta(task.metadata, { sessionId: undefined });
|
|
1318
|
+
await this.api.update(task.id, { metadata: clearedMeta });
|
|
1142
1319
|
}
|
|
1143
1320
|
}
|
|
1144
1321
|
// 3. Fall back to fresh spawn (spawnMergeStewardForTask handles metadata update AND session history)
|
|
@@ -1178,6 +1355,12 @@ export class DispatchDaemonImpl {
|
|
|
1178
1355
|
}
|
|
1179
1356
|
}
|
|
1180
1357
|
}
|
|
1358
|
+
// Check rate limits and determine executable path override if needed
|
|
1359
|
+
const executableOverride = this.resolveExecutableWithFallback(worker);
|
|
1360
|
+
if (executableOverride === 'all_limited') {
|
|
1361
|
+
logger.warn(`All executables rate-limited, skipping dispatch for worker ${worker.name}`);
|
|
1362
|
+
return false;
|
|
1363
|
+
}
|
|
1181
1364
|
// Check for existing worktree/branch in task metadata
|
|
1182
1365
|
// Priority: handoff > existing assignment > create new
|
|
1183
1366
|
const taskMeta = getOrchestratorTaskMeta(task.metadata);
|
|
@@ -1228,6 +1411,7 @@ export class DispatchDaemonImpl {
|
|
|
1228
1411
|
workingDirectory: worktreePath,
|
|
1229
1412
|
worktree: worktreePath,
|
|
1230
1413
|
initialPrompt,
|
|
1414
|
+
executablePathOverride: executableOverride ?? undefined,
|
|
1231
1415
|
});
|
|
1232
1416
|
// Session started successfully — now dispatch the task (assigns + sends message)
|
|
1233
1417
|
const dispatchOptions = {
|
|
@@ -1421,6 +1605,12 @@ export class DispatchDaemonImpl {
|
|
|
1421
1605
|
return;
|
|
1422
1606
|
}
|
|
1423
1607
|
}
|
|
1608
|
+
// Check rate limits and determine executable path override if needed
|
|
1609
|
+
const stewardExecutableOverride = this.resolveExecutableWithFallback(steward);
|
|
1610
|
+
if (stewardExecutableOverride === 'all_limited') {
|
|
1611
|
+
logger.warn(`All executables rate-limited, skipping merge steward dispatch for ${steward.name}`);
|
|
1612
|
+
return;
|
|
1613
|
+
}
|
|
1424
1614
|
// Get task metadata for worktree path
|
|
1425
1615
|
const taskMeta = task.metadata;
|
|
1426
1616
|
const orchestratorMeta = taskMeta?.orchestrator;
|
|
@@ -1486,6 +1676,7 @@ export class DispatchDaemonImpl {
|
|
|
1486
1676
|
worktree: worktreePath,
|
|
1487
1677
|
initialPrompt,
|
|
1488
1678
|
interactive: false, // Stewards use headless mode
|
|
1679
|
+
executablePathOverride: stewardExecutableOverride ?? undefined,
|
|
1489
1680
|
});
|
|
1490
1681
|
// Record steward assignment and session history on the task to prevent double-dispatch and enable recovery.
|
|
1491
1682
|
// Setting task.assignee makes the steward visible in the UI and enables
|
|
@@ -1579,6 +1770,12 @@ export class DispatchDaemonImpl {
|
|
|
1579
1770
|
return;
|
|
1580
1771
|
}
|
|
1581
1772
|
}
|
|
1773
|
+
// 2b. Check rate limits and determine executable path override
|
|
1774
|
+
const recoveryExecutableOverride = this.resolveExecutableWithFallback(recoverySteward);
|
|
1775
|
+
if (recoveryExecutableOverride === 'all_limited') {
|
|
1776
|
+
logger.warn(`All executables rate-limited, deferring recovery steward spawn for ${recoverySteward.name}`);
|
|
1777
|
+
return;
|
|
1778
|
+
}
|
|
1582
1779
|
// 3. Resolve worktree — reuse the worker's existing worktree
|
|
1583
1780
|
let worktreePath = taskMeta?.worktree ?? taskMeta?.handoffWorktree;
|
|
1584
1781
|
const branch = taskMeta?.branch ?? taskMeta?.handoffBranch;
|
|
@@ -1629,6 +1826,7 @@ export class DispatchDaemonImpl {
|
|
|
1629
1826
|
worktree: worktreePath,
|
|
1630
1827
|
initialPrompt,
|
|
1631
1828
|
interactive: false, // Stewards use headless mode
|
|
1829
|
+
executablePathOverride: recoveryExecutableOverride ?? undefined,
|
|
1632
1830
|
});
|
|
1633
1831
|
// 7. Record steward assignment and session history on the task
|
|
1634
1832
|
const taskAfterUpdate = await this.api.get(task.id);
|
|
@@ -2155,7 +2353,7 @@ export class DispatchDaemonImpl {
|
|
|
2155
2353
|
/**
|
|
2156
2354
|
* Creates a DispatchDaemon instance
|
|
2157
2355
|
*/
|
|
2158
|
-
export function createDispatchDaemon(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService) {
|
|
2159
|
-
return new DispatchDaemonImpl(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService);
|
|
2356
|
+
export function createDispatchDaemon(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService, settingsService) {
|
|
2357
|
+
return new DispatchDaemonImpl(api, agentRegistry, sessionManager, dispatchService, worktreeManager, taskAssignment, stewardScheduler, inboxService, config, poolService, settingsService);
|
|
2160
2358
|
}
|
|
2161
2359
|
//# sourceMappingURL=dispatch-daemon.js.map
|