comfyui-node 1.6.0 → 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/.tsbuildinfo +1 -1
- package/dist/call-wrapper.js +856 -856
- package/dist/index.d.ts +13 -13
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +7 -7
- package/dist/index.js.map +1 -1
- package/dist/multipool/client-registry.d.ts +33 -0
- package/dist/multipool/client-registry.d.ts.map +1 -0
- package/dist/multipool/client-registry.js +153 -0
- package/dist/multipool/client-registry.js.map +1 -0
- package/dist/multipool/helpers.d.ts +5 -0
- package/dist/multipool/helpers.d.ts.map +1 -0
- package/dist/multipool/helpers.js +53 -0
- package/dist/multipool/helpers.js.map +1 -0
- package/dist/multipool/index.d.ts +3 -0
- package/dist/multipool/index.d.ts.map +1 -0
- package/dist/multipool/index.js +3 -0
- package/dist/multipool/index.js.map +1 -0
- package/dist/multipool/interfaces.d.ts +13 -0
- package/dist/multipool/interfaces.d.ts.map +1 -0
- package/dist/multipool/interfaces.js +2 -0
- package/dist/multipool/interfaces.js.map +1 -0
- package/dist/multipool/job-profiler.d.ts +128 -0
- package/dist/multipool/job-profiler.d.ts.map +1 -0
- package/dist/multipool/job-profiler.js +222 -0
- package/dist/multipool/job-profiler.js.map +1 -0
- package/dist/multipool/job-queue-processor.d.ts +28 -0
- package/dist/multipool/job-queue-processor.d.ts.map +1 -0
- package/dist/multipool/job-queue-processor.js +197 -0
- package/dist/multipool/job-queue-processor.js.map +1 -0
- package/dist/multipool/job-state-registry.d.ts +67 -0
- package/dist/multipool/job-state-registry.d.ts.map +1 -0
- package/dist/multipool/job-state-registry.js +283 -0
- package/dist/multipool/job-state-registry.js.map +1 -0
- package/dist/multipool/logger.d.ts +30 -0
- package/dist/multipool/logger.d.ts.map +1 -0
- package/dist/multipool/logger.js +75 -0
- package/dist/multipool/logger.js.map +1 -0
- package/dist/multipool/multi-workflow-pool.d.ts +43 -0
- package/dist/multipool/multi-workflow-pool.d.ts.map +1 -0
- package/dist/multipool/multi-workflow-pool.js +314 -0
- package/dist/multipool/multi-workflow-pool.js.map +1 -0
- package/dist/multipool/pool-event-manager.d.ts +11 -0
- package/dist/multipool/pool-event-manager.d.ts.map +1 -0
- package/dist/multipool/pool-event-manager.js +28 -0
- package/dist/multipool/pool-event-manager.js.map +1 -0
- package/dist/multipool/tests/error-classification-tests.d.ts +2 -0
- package/dist/multipool/tests/error-classification-tests.d.ts.map +1 -0
- package/dist/multipool/tests/error-classification-tests.js +374 -0
- package/dist/multipool/tests/error-classification-tests.js.map +1 -0
- package/dist/multipool/tests/job-state-registry.d.ts +17 -0
- package/dist/multipool/tests/job-state-registry.d.ts.map +1 -0
- package/dist/multipool/tests/job-state-registry.js +24 -0
- package/dist/multipool/tests/job-state-registry.js.map +1 -0
- package/dist/multipool/tests/multipool-basic.d.ts +12 -0
- package/dist/multipool/tests/multipool-basic.d.ts.map +1 -0
- package/dist/multipool/tests/multipool-basic.js +142 -0
- package/dist/multipool/tests/multipool-basic.js.map +1 -0
- package/dist/multipool/tests/profiling-demo.d.ts +7 -0
- package/dist/multipool/tests/profiling-demo.d.ts.map +1 -0
- package/dist/multipool/tests/profiling-demo.js +88 -0
- package/dist/multipool/tests/profiling-demo.js.map +1 -0
- package/dist/multipool/tests/prompt-generator.d.ts +10 -0
- package/dist/multipool/tests/prompt-generator.d.ts.map +1 -0
- package/dist/multipool/tests/prompt-generator.js +26 -0
- package/dist/multipool/tests/prompt-generator.js.map +1 -0
- package/dist/multipool/tests/test-helpers.d.ts +4 -0
- package/dist/multipool/tests/test-helpers.d.ts.map +1 -0
- package/dist/multipool/tests/test-helpers.js +10 -0
- package/dist/multipool/tests/test-helpers.js.map +1 -0
- package/dist/multipool/tests/two-stage-edit-simulation.d.ts +32 -0
- package/dist/multipool/tests/two-stage-edit-simulation.d.ts.map +1 -0
- package/dist/multipool/tests/two-stage-edit-simulation.js +299 -0
- package/dist/multipool/tests/two-stage-edit-simulation.js.map +1 -0
- package/dist/multipool/workflow.d.ts +179 -0
- package/dist/multipool/workflow.d.ts.map +1 -0
- package/dist/multipool/workflow.js +334 -0
- package/dist/multipool/workflow.js.map +1 -0
- package/dist/pool/SmartPool.d.ts +143 -143
- package/dist/pool/SmartPool.d.ts.map +1 -1
- package/dist/pool/SmartPool.js +676 -676
- package/dist/pool/SmartPool.js.map +1 -1
- package/dist/pool/SmartPoolV2.d.ts +119 -119
- package/dist/pool/SmartPoolV2.js +586 -586
- package/dist/pool/WorkflowPool.d.ts +202 -202
- package/dist/pool/WorkflowPool.d.ts.map +1 -1
- package/dist/pool/WorkflowPool.js +845 -840
- package/dist/pool/WorkflowPool.js.map +1 -1
- package/dist/pool/client/ClientManager.d.ts +86 -86
- package/dist/pool/client/ClientManager.js +215 -215
- package/dist/pool/index.d.ts +9 -11
- package/dist/pool/index.d.ts.map +1 -1
- package/dist/pool/index.js +3 -5
- package/dist/pool/index.js.map +1 -1
- package/package.json +1 -1
package/dist/pool/SmartPool.js
CHANGED
|
@@ -1,677 +1,677 @@
|
|
|
1
|
-
import { randomUUID } from "node:crypto";
|
|
2
|
-
import { hashWorkflow } from "
|
|
3
|
-
import { ComfyApi } from "
|
|
4
|
-
import { PromptBuilder } from "
|
|
5
|
-
import { MemoryQueueAdapter } from "./queue/adapters/memory.js";
|
|
6
|
-
import { TypedEventTarget } from "
|
|
7
|
-
const DEFAULT_SMART_POOL_OPTIONS = {
|
|
8
|
-
connectionTimeoutMs: 10000
|
|
9
|
-
};
|
|
10
|
-
export class SmartPool extends TypedEventTarget {
|
|
11
|
-
// Clients managed by the pool
|
|
12
|
-
clientMap = new Map();
|
|
13
|
-
// Queue state of pool clients
|
|
14
|
-
clientQueueStates = new Map();
|
|
15
|
-
// In-memory store for job records
|
|
16
|
-
jobStore = new Map();
|
|
17
|
-
// Affinities mapping workflow hashes to preferred clients
|
|
18
|
-
affinities = new Map();
|
|
19
|
-
// Server performance metrics tracking
|
|
20
|
-
serverPerformance = new Map();
|
|
21
|
-
// Queue adapter for job persistence
|
|
22
|
-
queueAdapter;
|
|
23
|
-
// Flag to prevent concurrent queue processing
|
|
24
|
-
processingNextJob = false;
|
|
25
|
-
// Pool options
|
|
26
|
-
options;
|
|
27
|
-
// Hooks for pool-wide events
|
|
28
|
-
hooks = {};
|
|
29
|
-
constructor(clients, options) {
|
|
30
|
-
super();
|
|
31
|
-
if (options) {
|
|
32
|
-
this.options = { ...DEFAULT_SMART_POOL_OPTIONS, ...options };
|
|
33
|
-
}
|
|
34
|
-
else {
|
|
35
|
-
this.options = DEFAULT_SMART_POOL_OPTIONS;
|
|
36
|
-
}
|
|
37
|
-
// Initialize queue adapter
|
|
38
|
-
this.queueAdapter = new MemoryQueueAdapter();
|
|
39
|
-
for (const client of clients) {
|
|
40
|
-
if (typeof client === "string") {
|
|
41
|
-
const apiClient = new ComfyApi(client);
|
|
42
|
-
this.clientMap.set(apiClient.apiHost, apiClient);
|
|
43
|
-
}
|
|
44
|
-
else {
|
|
45
|
-
this.clientMap.set(client.apiHost, client);
|
|
46
|
-
}
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
emitLegacy(event) {
|
|
50
|
-
if (this.hooks.any) {
|
|
51
|
-
this.hooks.any(event);
|
|
52
|
-
}
|
|
53
|
-
const specificHook = this.hooks[event.type];
|
|
54
|
-
if (specificHook) {
|
|
55
|
-
specificHook(event);
|
|
56
|
-
}
|
|
57
|
-
}
|
|
58
|
-
/**
|
|
59
|
-
* Adds an event listener for the specified event type.
|
|
60
|
-
* Properly typed wrapper around EventTarget.addEventListener.
|
|
61
|
-
*/
|
|
62
|
-
on(type, handler, options) {
|
|
63
|
-
super.on(type, handler, options);
|
|
64
|
-
return () => this.off(type, handler, options);
|
|
65
|
-
}
|
|
66
|
-
/**
|
|
67
|
-
* Removes an event listener for the specified event type.
|
|
68
|
-
* Properly typed wrapper around EventTarget.removeEventListener.
|
|
69
|
-
*/
|
|
70
|
-
off(type, handler, options) {
|
|
71
|
-
super.off(type, handler, options);
|
|
72
|
-
}
|
|
73
|
-
/**
|
|
74
|
-
* Adds a one-time event listener for the specified event type.
|
|
75
|
-
*/
|
|
76
|
-
once(type, handler, options) {
|
|
77
|
-
return super.once(type, handler, options);
|
|
78
|
-
}
|
|
79
|
-
async connect() {
|
|
80
|
-
const connectionPromises = [];
|
|
81
|
-
const tRefZero = Date.now();
|
|
82
|
-
for (const [url, client] of this.clientMap.entries()) {
|
|
83
|
-
connectionPromises.push(new Promise(async (resolve, reject) => {
|
|
84
|
-
const timeout = setTimeout(() => {
|
|
85
|
-
client.abortReconnect();
|
|
86
|
-
reject(new Error(`Connection to client at ${url} timed out`));
|
|
87
|
-
}, this.options.connectionTimeoutMs);
|
|
88
|
-
try {
|
|
89
|
-
const comfyApi = await client.init(1);
|
|
90
|
-
comfyApi.on("connected", (event) => {
|
|
91
|
-
if (event.type === "connected") {
|
|
92
|
-
const tRefDone = Date.now();
|
|
93
|
-
const tDelta = tRefDone - tRefZero;
|
|
94
|
-
console.log(`Client at ${url} (${event.target?.osType}) connected via websockets in ${tDelta} ms`);
|
|
95
|
-
resolve(comfyApi);
|
|
96
|
-
}
|
|
97
|
-
});
|
|
98
|
-
}
|
|
99
|
-
catch (reason) {
|
|
100
|
-
console.error(`Failed to connect to client at ${url}:`, reason);
|
|
101
|
-
reject(reason);
|
|
102
|
-
}
|
|
103
|
-
finally {
|
|
104
|
-
clearTimeout(timeout);
|
|
105
|
-
}
|
|
106
|
-
}));
|
|
107
|
-
}
|
|
108
|
-
// Wait for all connection attempts to settle
|
|
109
|
-
const results = await Promise.allSettled(connectionPromises);
|
|
110
|
-
// Check for any rejected connections
|
|
111
|
-
const rejected = results.filter(result => result.status === "rejected");
|
|
112
|
-
// Warn if there are any rejected connections
|
|
113
|
-
if (rejected.length > 0) {
|
|
114
|
-
console.warn(`${rejected.length} client(s) failed to connect.`);
|
|
115
|
-
for (const rejectedClient of rejected) {
|
|
116
|
-
console.warn(`Client rejection reason: ${rejectedClient.reason}`);
|
|
117
|
-
}
|
|
118
|
-
}
|
|
119
|
-
// Sync queue states after connections
|
|
120
|
-
await this.syncQueueStates();
|
|
121
|
-
}
|
|
122
|
-
shutdown() {
|
|
123
|
-
for (const client of this.clientMap.values()) {
|
|
124
|
-
try {
|
|
125
|
-
client.destroy();
|
|
126
|
-
}
|
|
127
|
-
catch (reason) {
|
|
128
|
-
console.error(`Error shutting down client at ${client.apiHost}:`, reason);
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
}
|
|
132
|
-
async syncQueueStates() {
|
|
133
|
-
const promises = Array
|
|
134
|
-
.from(this.clientMap.values())
|
|
135
|
-
.filter(value => value.isReady)
|
|
136
|
-
.map(value => {
|
|
137
|
-
return new Promise(resolve => {
|
|
138
|
-
value.getQueue().then(value1 => {
|
|
139
|
-
this.clientQueueStates.set(value.apiHost, {
|
|
140
|
-
queuedJobs: value1.queue_pending.length,
|
|
141
|
-
runningJobs: value1.queue_running.length
|
|
142
|
-
});
|
|
143
|
-
resolve(true);
|
|
144
|
-
});
|
|
145
|
-
});
|
|
146
|
-
});
|
|
147
|
-
await Promise.allSettled(promises);
|
|
148
|
-
}
|
|
149
|
-
// Add a job record to the pool
|
|
150
|
-
addJob(jobId, jobRecord) {
|
|
151
|
-
this.jobStore.set(jobId, jobRecord);
|
|
152
|
-
}
|
|
153
|
-
// Get a job record from the pool
|
|
154
|
-
getJob(jobId) {
|
|
155
|
-
return this.jobStore.get(jobId);
|
|
156
|
-
}
|
|
157
|
-
// Remove a job record from the pool
|
|
158
|
-
removeJob(jobId) {
|
|
159
|
-
this.jobStore.delete(jobId);
|
|
160
|
-
}
|
|
161
|
-
// Set the affinity for a workflow
|
|
162
|
-
setAffinity(workflow, affinity) {
|
|
163
|
-
const workflowHash = hashWorkflow(workflow);
|
|
164
|
-
this.affinities.set(workflowHash, {
|
|
165
|
-
workflowHash,
|
|
166
|
-
...affinity
|
|
167
|
-
});
|
|
168
|
-
}
|
|
169
|
-
// Get the affinity for a workflow
|
|
170
|
-
getAffinity(workflowHash) {
|
|
171
|
-
return this.affinities.get(workflowHash);
|
|
172
|
-
}
|
|
173
|
-
// Remove the affinity for a workflow
|
|
174
|
-
removeAffinity(workflowHash) {
|
|
175
|
-
this.affinities.delete(workflowHash);
|
|
176
|
-
}
|
|
177
|
-
/**
|
|
178
|
-
* Track server performance metrics for job execution
|
|
179
|
-
*/
|
|
180
|
-
updateServerPerformance(clientId, executionTimeMs) {
|
|
181
|
-
let metrics = this.serverPerformance.get(clientId);
|
|
182
|
-
if (!metrics) {
|
|
183
|
-
metrics = {
|
|
184
|
-
clientId,
|
|
185
|
-
totalJobsCompleted: 0,
|
|
186
|
-
totalExecutionTimeMs: 0,
|
|
187
|
-
averageExecutionTimeMs: 0,
|
|
188
|
-
lastJobDurationMs: 0
|
|
189
|
-
};
|
|
190
|
-
this.serverPerformance.set(clientId, metrics);
|
|
191
|
-
}
|
|
192
|
-
metrics.totalJobsCompleted++;
|
|
193
|
-
metrics.totalExecutionTimeMs += executionTimeMs;
|
|
194
|
-
metrics.lastJobDurationMs = executionTimeMs;
|
|
195
|
-
metrics.averageExecutionTimeMs = metrics.totalExecutionTimeMs / metrics.totalJobsCompleted;
|
|
196
|
-
}
|
|
197
|
-
/**
|
|
198
|
-
* Get server performance metrics
|
|
199
|
-
*/
|
|
200
|
-
getServerPerformance(clientId) {
|
|
201
|
-
return this.serverPerformance.get(clientId);
|
|
202
|
-
}
|
|
203
|
-
/**
|
|
204
|
-
* Get sorted list of servers by performance (fastest first) within a given set
|
|
205
|
-
*/
|
|
206
|
-
sortServersByPerformance(serverIds) {
|
|
207
|
-
return [...serverIds].sort((a, b) => {
|
|
208
|
-
const metricsA = this.serverPerformance.get(a);
|
|
209
|
-
const metricsB = this.serverPerformance.get(b);
|
|
210
|
-
// Servers with no metrics go to end (untracked/slow startup)
|
|
211
|
-
if (!metricsA)
|
|
212
|
-
return 1;
|
|
213
|
-
if (!metricsB)
|
|
214
|
-
return -1;
|
|
215
|
-
// Sort by average execution time (fastest first)
|
|
216
|
-
return metricsA.averageExecutionTimeMs - metricsB.averageExecutionTimeMs;
|
|
217
|
-
});
|
|
218
|
-
}
|
|
219
|
-
/**
|
|
220
|
-
* Enqueue a workflow for execution by the pool.
|
|
221
|
-
* Auto-triggers processing via setImmediate (batteries included).
|
|
222
|
-
*/
|
|
223
|
-
async enqueue(workflow, opts) {
|
|
224
|
-
const jobId = randomUUID();
|
|
225
|
-
const workflowHash = workflow.structureHash || hashWorkflow(workflow.json || workflow);
|
|
226
|
-
const workflowJson = workflow.json || workflow;
|
|
227
|
-
const outputNodeIds = workflow.outputNodeIds || [];
|
|
228
|
-
const outputAliases = workflow.outputAliases || {};
|
|
229
|
-
// Create job record
|
|
230
|
-
const jobRecord = {
|
|
231
|
-
jobId,
|
|
232
|
-
workflow: workflowJson,
|
|
233
|
-
workflowHash,
|
|
234
|
-
options: {
|
|
235
|
-
maxAttempts: 3,
|
|
236
|
-
retryDelayMs: 1000,
|
|
237
|
-
priority: opts?.priority ?? 0,
|
|
238
|
-
preferredClientIds: opts?.preferredClientIds ?? [],
|
|
239
|
-
excludeClientIds: [],
|
|
240
|
-
metadata: {}
|
|
241
|
-
},
|
|
242
|
-
attempts: 0,
|
|
243
|
-
enqueuedAt: Date.now(),
|
|
244
|
-
workflowMeta: {
|
|
245
|
-
outputNodeIds,
|
|
246
|
-
outputAliases
|
|
247
|
-
},
|
|
248
|
-
status: "queued"
|
|
249
|
-
};
|
|
250
|
-
// Store in job store
|
|
251
|
-
this.jobStore.set(jobId, jobRecord);
|
|
252
|
-
// Create payload for queue adapter
|
|
253
|
-
const payload = jobRecord;
|
|
254
|
-
// Enqueue with priority
|
|
255
|
-
await this.queueAdapter.enqueue(payload, {
|
|
256
|
-
priority: opts?.priority ?? 0
|
|
257
|
-
});
|
|
258
|
-
// Emit queued event
|
|
259
|
-
this.dispatchEvent(new CustomEvent("job:queued", { detail: { job: jobRecord } }));
|
|
260
|
-
// Auto-trigger queue processing immediately (not via setImmediate, so it processes right away)
|
|
261
|
-
setImmediate(() => this.processNextJobQueued());
|
|
262
|
-
return jobId;
|
|
263
|
-
}
|
|
264
|
-
/**
|
|
265
|
-
* Entry point for queue processing with deduplication guard.
|
|
266
|
-
* Prevents concurrent processing of jobs.
|
|
267
|
-
* Poll-based approach: check idle servers, collect compatible jobs, enqueue only when slots available.
|
|
268
|
-
*/
|
|
269
|
-
async processNextJobQueued() {
|
|
270
|
-
if (this.processingNextJob) {
|
|
271
|
-
return;
|
|
272
|
-
}
|
|
273
|
-
this.processingNextJob = true;
|
|
274
|
-
try {
|
|
275
|
-
// Continuously sync queue states and process available work
|
|
276
|
-
while (true) {
|
|
277
|
-
// Update queue states from all clients
|
|
278
|
-
await this.syncQueueStates();
|
|
279
|
-
// Find idle servers (not running, not pending)
|
|
280
|
-
const idleServers = this.findIdleServers();
|
|
281
|
-
if (idleServers.length === 0) {
|
|
282
|
-
// No idle servers, wait a bit then check again
|
|
283
|
-
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
284
|
-
continue;
|
|
285
|
-
}
|
|
286
|
-
// Try to assign jobs to idle servers
|
|
287
|
-
const jobsAssigned = await this.assignJobsToIdleServers(idleServers);
|
|
288
|
-
if (jobsAssigned === 0) {
|
|
289
|
-
// No jobs could be assigned, wait then try again
|
|
290
|
-
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
291
|
-
continue;
|
|
292
|
-
}
|
|
293
|
-
// Jobs were assigned, give them time to start then re-check
|
|
294
|
-
await new Promise(resolve => setTimeout(resolve, 500));
|
|
295
|
-
}
|
|
296
|
-
}
|
|
297
|
-
finally {
|
|
298
|
-
this.processingNextJob = false;
|
|
299
|
-
}
|
|
300
|
-
}
|
|
301
|
-
/**
|
|
302
|
-
* Find servers that are currently idle (no running or pending jobs)
|
|
303
|
-
*/
|
|
304
|
-
findIdleServers() {
|
|
305
|
-
const idleServers = [];
|
|
306
|
-
for (const [clientId, client] of this.clientMap) {
|
|
307
|
-
if (!client.isReady)
|
|
308
|
-
continue;
|
|
309
|
-
const state = this.clientQueueStates.get(clientId);
|
|
310
|
-
if (state && state.queuedJobs === 0 && state.runningJobs === 0) {
|
|
311
|
-
idleServers.push(client);
|
|
312
|
-
}
|
|
313
|
-
}
|
|
314
|
-
return idleServers;
|
|
315
|
-
}
|
|
316
|
-
/**
|
|
317
|
-
* Assign compatible jobs from our queue to idle servers
|
|
318
|
-
* Returns number of jobs assigned
|
|
319
|
-
*/
|
|
320
|
-
async assignJobsToIdleServers(idleServers) {
|
|
321
|
-
let jobsAssigned = 0;
|
|
322
|
-
// Peek at pending jobs
|
|
323
|
-
const pendingJobs = await this.queueAdapter.peek(100);
|
|
324
|
-
if (pendingJobs.length === 0) {
|
|
325
|
-
return 0;
|
|
326
|
-
}
|
|
327
|
-
const matches = [];
|
|
328
|
-
for (const payload of pendingJobs) {
|
|
329
|
-
const job = this.jobStore.get(payload.jobId);
|
|
330
|
-
if (!job)
|
|
331
|
-
continue;
|
|
332
|
-
// Find all compatible idle servers for this job
|
|
333
|
-
const compatibleServers = idleServers.filter(s => this.isJobCompatibleWithServer(payload, job, s));
|
|
334
|
-
if (compatibleServers.length > 0) {
|
|
335
|
-
// Sort compatible servers by performance (fastest first)
|
|
336
|
-
const sortedServers = this.sortServersByPerformance(compatibleServers.map(s => s.apiHost))
|
|
337
|
-
.map(id => idleServers.find(s => s.apiHost === id))
|
|
338
|
-
.filter((s) => s !== undefined);
|
|
339
|
-
matches.push({
|
|
340
|
-
payload,
|
|
341
|
-
job,
|
|
342
|
-
compatibleServers: sortedServers
|
|
343
|
-
});
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
// Sort by selectivity (jobs with fewer compatible servers first)
|
|
347
|
-
matches.sort((a, b) => {
|
|
348
|
-
return a.compatibleServers.length - b.compatibleServers.length;
|
|
349
|
-
});
|
|
350
|
-
// Assign jobs to idle servers
|
|
351
|
-
const assignedServers = new Set();
|
|
352
|
-
for (const match of matches) {
|
|
353
|
-
// Use the fastest compatible server that hasn't been assigned yet
|
|
354
|
-
let targetServer;
|
|
355
|
-
for (const server of match.compatibleServers) {
|
|
356
|
-
if (!assignedServers.has(server.apiHost)) {
|
|
357
|
-
targetServer = server;
|
|
358
|
-
break;
|
|
359
|
-
}
|
|
360
|
-
}
|
|
361
|
-
if (!targetServer) {
|
|
362
|
-
continue;
|
|
363
|
-
}
|
|
364
|
-
// Reserve this specific job
|
|
365
|
-
const reservation = await this.queueAdapter.reserveById(match.job.jobId);
|
|
366
|
-
if (!reservation) {
|
|
367
|
-
continue;
|
|
368
|
-
}
|
|
369
|
-
try {
|
|
370
|
-
const result = await this.enqueueJobOnServer(match.job, targetServer);
|
|
371
|
-
if (result) {
|
|
372
|
-
assignedServers.add(targetServer.apiHost);
|
|
373
|
-
jobsAssigned++;
|
|
374
|
-
// Commit to our queue
|
|
375
|
-
await this.queueAdapter.commit(reservation.reservationId);
|
|
376
|
-
}
|
|
377
|
-
else {
|
|
378
|
-
// Enqueue failed, retry later
|
|
379
|
-
await this.queueAdapter.retry(reservation.reservationId, { delayMs: 1000 });
|
|
380
|
-
}
|
|
381
|
-
}
|
|
382
|
-
catch (error) {
|
|
383
|
-
// Retry on error
|
|
384
|
-
await this.queueAdapter.retry(reservation.reservationId, { delayMs: 1000 });
|
|
385
|
-
}
|
|
386
|
-
}
|
|
387
|
-
return jobsAssigned;
|
|
388
|
-
}
|
|
389
|
-
/**
|
|
390
|
-
* Check if a job is compatible with a server
|
|
391
|
-
*/
|
|
392
|
-
isJobCompatibleWithServer(payload, job, server) {
|
|
393
|
-
// Check preferred client IDs first
|
|
394
|
-
if (payload.options.preferredClientIds && payload.options.preferredClientIds.length > 0) {
|
|
395
|
-
return payload.options.preferredClientIds.includes(server.apiHost);
|
|
396
|
-
}
|
|
397
|
-
// Check workflow affinity
|
|
398
|
-
const affinity = this.getAffinity(payload.workflowHash);
|
|
399
|
-
if (affinity && affinity.preferredClientIds) {
|
|
400
|
-
return affinity.preferredClientIds.includes(server.apiHost);
|
|
401
|
-
}
|
|
402
|
-
// No constraints, compatible with any server
|
|
403
|
-
return true;
|
|
404
|
-
}
|
|
405
|
-
/**
|
|
406
|
-
* Enqueue a job on a specific server
|
|
407
|
-
* Returns true if successful, false if failed
|
|
408
|
-
*/
|
|
409
|
-
async enqueueJobOnServer(job, server) {
|
|
410
|
-
try {
|
|
411
|
-
const workflowJson = job.workflow;
|
|
412
|
-
const outputNodeIds = job.workflowMeta?.outputNodeIds || [];
|
|
413
|
-
// Auto-randomize any seed fields set to -1
|
|
414
|
-
try {
|
|
415
|
-
for (const [_, node] of Object.entries(workflowJson)) {
|
|
416
|
-
const n = node;
|
|
417
|
-
if (n && n.inputs && Object.prototype.hasOwnProperty.call(n.inputs, 'seed')) {
|
|
418
|
-
if (n.inputs.seed === -1) {
|
|
419
|
-
const val = Math.floor(Math.random() * 2_147_483_647);
|
|
420
|
-
n.inputs.seed = val;
|
|
421
|
-
}
|
|
422
|
-
}
|
|
423
|
-
}
|
|
424
|
-
}
|
|
425
|
-
catch { /* non-fatal */ }
|
|
426
|
-
// Build prompt
|
|
427
|
-
const pb = new PromptBuilder(workflowJson, [], outputNodeIds);
|
|
428
|
-
for (const nodeId of outputNodeIds) {
|
|
429
|
-
pb.setOutputNode(nodeId, nodeId);
|
|
430
|
-
}
|
|
431
|
-
const promptJson = pb.prompt;
|
|
432
|
-
// Queue on client
|
|
433
|
-
const queueResponse = await server.ext.queue.appendPrompt(promptJson);
|
|
434
|
-
const promptId = queueResponse.prompt_id;
|
|
435
|
-
// Update job record
|
|
436
|
-
job.status = "running";
|
|
437
|
-
job.clientId = server.apiHost;
|
|
438
|
-
job.promptId = promptId;
|
|
439
|
-
job.attempts += 1;
|
|
440
|
-
job.startedAt = Date.now(); // Track when job starts executing
|
|
441
|
-
this.dispatchEvent(new CustomEvent("job:accepted", { detail: { job } }));
|
|
442
|
-
this.dispatchEvent(new CustomEvent("job:started", { detail: { job } }));
|
|
443
|
-
// Run execution in background
|
|
444
|
-
this.waitForExecutionCompletion(server, promptId, { json: workflowJson })
|
|
445
|
-
.then((result) => {
|
|
446
|
-
job.status = "completed";
|
|
447
|
-
job.result = result;
|
|
448
|
-
job.completedAt = Date.now();
|
|
449
|
-
// Track server performance
|
|
450
|
-
const executionTimeMs = job.completedAt - (job.startedAt || job.completedAt);
|
|
451
|
-
this.updateServerPerformance(server.apiHost, executionTimeMs);
|
|
452
|
-
this.dispatchEvent(new CustomEvent("job:completed", { detail: { job } }));
|
|
453
|
-
// Trigger next processing since job completed
|
|
454
|
-
setImmediate(() => this.processNextJobQueued());
|
|
455
|
-
})
|
|
456
|
-
.catch((error) => {
|
|
457
|
-
job.status = "failed";
|
|
458
|
-
job.lastError = error;
|
|
459
|
-
job.completedAt = Date.now();
|
|
460
|
-
this.dispatchEvent(new CustomEvent("job:failed", { detail: { job, willRetry: false } }));
|
|
461
|
-
// Trigger next processing since job completed
|
|
462
|
-
setImmediate(() => this.processNextJobQueued());
|
|
463
|
-
});
|
|
464
|
-
return true;
|
|
465
|
-
}
|
|
466
|
-
catch (error) {
|
|
467
|
-
console.error(`[SmartPool] Failed to enqueue job on ${server.apiHost}:`, error);
|
|
468
|
-
return false;
|
|
469
|
-
}
|
|
470
|
-
}
|
|
471
|
-
/**
|
|
472
|
-
* Retrieve images from a completed job's execution.
|
|
473
|
-
*/
|
|
474
|
-
async getJobOutputImages(jobId, nodeId) {
|
|
475
|
-
const job = this.jobStore.get(jobId);
|
|
476
|
-
if (!job) {
|
|
477
|
-
throw new Error(`Job ${jobId} not found`);
|
|
478
|
-
}
|
|
479
|
-
if (!job.clientId) {
|
|
480
|
-
throw new Error(`Job ${jobId} has no client assigned`);
|
|
481
|
-
}
|
|
482
|
-
if (!job.promptId) {
|
|
483
|
-
throw new Error(`Job ${jobId} has no promptId assigned`);
|
|
484
|
-
}
|
|
485
|
-
const client = this.clientMap.get(job.clientId);
|
|
486
|
-
if (!client) {
|
|
487
|
-
throw new Error(`Client ${job.clientId} not found`);
|
|
488
|
-
}
|
|
489
|
-
// Fetch history
|
|
490
|
-
const historyData = await client.ext.history.getHistory(job.promptId);
|
|
491
|
-
if (!historyData?.outputs) {
|
|
492
|
-
return [];
|
|
493
|
-
}
|
|
494
|
-
const images = [];
|
|
495
|
-
// Find images in specified node or first node with images
|
|
496
|
-
const outputEntries = Object.entries(historyData.outputs);
|
|
497
|
-
for (const [nId, nodeOutput] of outputEntries) {
|
|
498
|
-
if (nodeId && nId !== nodeId) {
|
|
499
|
-
continue;
|
|
500
|
-
}
|
|
501
|
-
const output = nodeOutput;
|
|
502
|
-
if (output.images && Array.isArray(output.images)) {
|
|
503
|
-
for (const imageRef of output.images) {
|
|
504
|
-
try {
|
|
505
|
-
const blob = await client.ext.file.getImage(imageRef);
|
|
506
|
-
images.push({
|
|
507
|
-
filename: imageRef.filename || `image_${nId}`,
|
|
508
|
-
blob
|
|
509
|
-
});
|
|
510
|
-
}
|
|
511
|
-
catch (e) {
|
|
512
|
-
console.error(`Failed to fetch image from node ${nId}:`, e);
|
|
513
|
-
}
|
|
514
|
-
}
|
|
515
|
-
if (nodeId) {
|
|
516
|
-
// Found specified node, stop searching
|
|
517
|
-
break;
|
|
518
|
-
}
|
|
519
|
-
}
|
|
520
|
-
}
|
|
521
|
-
return images;
|
|
522
|
-
}
|
|
523
|
-
async executeImmediate(workflow, opts) {
|
|
524
|
-
// Enqueue with maximum priority
|
|
525
|
-
const jobId = await this.enqueue(workflow, {
|
|
526
|
-
preferredClientIds: opts.preferableClientIds,
|
|
527
|
-
priority: 1000 // High priority for immediate execution
|
|
528
|
-
});
|
|
529
|
-
// Wait for job completion via event listener
|
|
530
|
-
return new Promise((resolve, reject) => {
|
|
531
|
-
const onComplete = (event) => {
|
|
532
|
-
const customEvent = event;
|
|
533
|
-
if (customEvent.detail.job.jobId === jobId) {
|
|
534
|
-
cleanup();
|
|
535
|
-
const job = customEvent.detail.job;
|
|
536
|
-
this.buildExecuteImmediateResult(job)
|
|
537
|
-
.then(resolve)
|
|
538
|
-
.catch(reject);
|
|
539
|
-
}
|
|
540
|
-
};
|
|
541
|
-
const onFailed = (event) => {
|
|
542
|
-
const customEvent = event;
|
|
543
|
-
if (customEvent.detail.job.jobId === jobId) {
|
|
544
|
-
cleanup();
|
|
545
|
-
reject(new Error(`Job failed: ${JSON.stringify(customEvent.detail.job.lastError)}`));
|
|
546
|
-
}
|
|
547
|
-
};
|
|
548
|
-
let cleanup = () => {
|
|
549
|
-
this.removeEventListener("job:completed", onComplete);
|
|
550
|
-
this.removeEventListener("job:failed", onFailed);
|
|
551
|
-
clearTimeout(timeoutHandle);
|
|
552
|
-
};
|
|
553
|
-
this.addEventListener("job:completed", onComplete);
|
|
554
|
-
this.addEventListener("job:failed", onFailed);
|
|
555
|
-
// Timeout after 5 minutes
|
|
556
|
-
const timeoutHandle = setTimeout(() => {
|
|
557
|
-
cleanup();
|
|
558
|
-
reject(new Error("Execution timeout"));
|
|
559
|
-
}, 5 * 60 * 1000);
|
|
560
|
-
});
|
|
561
|
-
}
|
|
562
|
-
/**
|
|
563
|
-
* Build the return value for executeImmediate() with images and blob.
|
|
564
|
-
*/
|
|
565
|
-
async buildExecuteImmediateResult(job) {
|
|
566
|
-
const images = [];
|
|
567
|
-
let imageBlob;
|
|
568
|
-
// Fetch images from job
|
|
569
|
-
try {
|
|
570
|
-
const jobImages = await this.getJobOutputImages(job.jobId);
|
|
571
|
-
for (const img of jobImages) {
|
|
572
|
-
images.push({
|
|
573
|
-
filename: img.filename
|
|
574
|
-
});
|
|
575
|
-
imageBlob = img.blob;
|
|
576
|
-
}
|
|
577
|
-
}
|
|
578
|
-
catch (e) {
|
|
579
|
-
console.log(`[SmartPool] Failed to fetch images: ${e}`);
|
|
580
|
-
}
|
|
581
|
-
return {
|
|
582
|
-
...job.result,
|
|
583
|
-
images,
|
|
584
|
-
imageBlob,
|
|
585
|
-
_promptId: job.promptId
|
|
586
|
-
};
|
|
587
|
-
}
|
|
588
|
-
async waitForExecutionCompletion(client, promptId, workflow) {
|
|
589
|
-
return new Promise((resolve, reject) => {
|
|
590
|
-
const result = {
|
|
591
|
-
_promptId: promptId,
|
|
592
|
-
_aliases: {},
|
|
593
|
-
_nodes: []
|
|
594
|
-
};
|
|
595
|
-
const collectedNodes = new Set();
|
|
596
|
-
const executedHandler = (ev) => {
|
|
597
|
-
const eventPromptId = ev.detail.prompt_id;
|
|
598
|
-
// Only process events for our specific prompt
|
|
599
|
-
if (eventPromptId !== promptId) {
|
|
600
|
-
return;
|
|
601
|
-
}
|
|
602
|
-
const nodeId = ev.detail.node;
|
|
603
|
-
const output = ev.detail.output;
|
|
604
|
-
// Store output keyed by node ID
|
|
605
|
-
result[nodeId] = output;
|
|
606
|
-
collectedNodes.add(nodeId);
|
|
607
|
-
};
|
|
608
|
-
const executionSuccessHandler = async (ev) => {
|
|
609
|
-
const eventPromptId = ev.detail.prompt_id;
|
|
610
|
-
// Only process events for our specific prompt
|
|
611
|
-
if (eventPromptId !== promptId) {
|
|
612
|
-
return;
|
|
613
|
-
}
|
|
614
|
-
// Try to fetch complete outputs from history
|
|
615
|
-
for (let retries = 0; retries < 5; retries++) {
|
|
616
|
-
try {
|
|
617
|
-
const historyData = await client.ext.history.getHistory(promptId);
|
|
618
|
-
if (historyData?.outputs) {
|
|
619
|
-
// Populate result from history for any nodes we didn't get from websocket
|
|
620
|
-
for (const [nodeIdStr, nodeOutput] of Object.entries(historyData.outputs)) {
|
|
621
|
-
const nodeId = parseInt(nodeIdStr, 10).toString();
|
|
622
|
-
// Only add if we haven't collected this node yet
|
|
623
|
-
if (!collectedNodes.has(nodeId) && nodeOutput) {
|
|
624
|
-
// Extract the actual output value
|
|
625
|
-
const outputValue = Array.isArray(nodeOutput) ? nodeOutput[0] : Object.values(nodeOutput)[0];
|
|
626
|
-
if (outputValue !== undefined) {
|
|
627
|
-
result[nodeId] = outputValue;
|
|
628
|
-
collectedNodes.add(nodeId);
|
|
629
|
-
}
|
|
630
|
-
}
|
|
631
|
-
}
|
|
632
|
-
// Store collected node IDs
|
|
633
|
-
result._nodes = Array.from(collectedNodes);
|
|
634
|
-
cleanup();
|
|
635
|
-
resolve(result);
|
|
636
|
-
return;
|
|
637
|
-
}
|
|
638
|
-
}
|
|
639
|
-
catch (e) {
|
|
640
|
-
// Continue retrying
|
|
641
|
-
}
|
|
642
|
-
if (retries < 4) {
|
|
643
|
-
await new Promise(r => setTimeout(r, 100));
|
|
644
|
-
}
|
|
645
|
-
}
|
|
646
|
-
// Resolve even if we didn't get all outputs
|
|
647
|
-
result._nodes = Array.from(collectedNodes);
|
|
648
|
-
cleanup();
|
|
649
|
-
resolve(result);
|
|
650
|
-
};
|
|
651
|
-
const executionErrorHandler = (ev) => {
|
|
652
|
-
const eventPromptId = ev.detail.prompt_id;
|
|
653
|
-
if (eventPromptId !== promptId) {
|
|
654
|
-
return;
|
|
655
|
-
}
|
|
656
|
-
console.error(`[SmartPool.waitForExecutionCompletion] Execution error:`, ev.detail);
|
|
657
|
-
cleanup();
|
|
658
|
-
reject(new Error(`Execution failed: ${JSON.stringify(ev.detail)}`));
|
|
659
|
-
};
|
|
660
|
-
const cleanup = () => {
|
|
661
|
-
offExecuted?.();
|
|
662
|
-
offExecutionSuccess?.();
|
|
663
|
-
offExecutionError?.();
|
|
664
|
-
clearTimeout(timeoutHandle);
|
|
665
|
-
};
|
|
666
|
-
const offExecuted = client.on("executed", executedHandler);
|
|
667
|
-
const offExecutionSuccess = client.on("execution_success", executionSuccessHandler);
|
|
668
|
-
const offExecutionError = client.on("execution_error", executionErrorHandler);
|
|
669
|
-
// Timeout after 5 minutes
|
|
670
|
-
const timeoutHandle = setTimeout(() => {
|
|
671
|
-
cleanup();
|
|
672
|
-
reject(new Error("Execution timeout"));
|
|
673
|
-
}, 5 * 60 * 1000);
|
|
674
|
-
});
|
|
675
|
-
}
|
|
676
|
-
}
|
|
1
|
+
import { randomUUID } from "node:crypto";
|
|
2
|
+
import { hashWorkflow } from "../pool/utils/hash.js";
|
|
3
|
+
import { ComfyApi } from "../client.js";
|
|
4
|
+
import { PromptBuilder } from "../prompt-builder.js";
|
|
5
|
+
import { MemoryQueueAdapter } from "./queue/adapters/memory.js";
|
|
6
|
+
import { TypedEventTarget } from "../typed-event-target.js";
|
|
7
|
+
const DEFAULT_SMART_POOL_OPTIONS = {
|
|
8
|
+
connectionTimeoutMs: 10000
|
|
9
|
+
};
|
|
10
|
+
export class SmartPool extends TypedEventTarget {
|
|
11
|
+
// Clients managed by the pool
|
|
12
|
+
clientMap = new Map();
|
|
13
|
+
// Queue state of pool clients
|
|
14
|
+
clientQueueStates = new Map();
|
|
15
|
+
// In-memory store for job records
|
|
16
|
+
jobStore = new Map();
|
|
17
|
+
// Affinities mapping workflow hashes to preferred clients
|
|
18
|
+
affinities = new Map();
|
|
19
|
+
// Server performance metrics tracking
|
|
20
|
+
serverPerformance = new Map();
|
|
21
|
+
// Queue adapter for job persistence
|
|
22
|
+
queueAdapter;
|
|
23
|
+
// Flag to prevent concurrent queue processing
|
|
24
|
+
processingNextJob = false;
|
|
25
|
+
// Pool options
|
|
26
|
+
options;
|
|
27
|
+
// Hooks for pool-wide events
|
|
28
|
+
hooks = {};
|
|
29
|
+
constructor(clients, options) {
|
|
30
|
+
super();
|
|
31
|
+
if (options) {
|
|
32
|
+
this.options = { ...DEFAULT_SMART_POOL_OPTIONS, ...options };
|
|
33
|
+
}
|
|
34
|
+
else {
|
|
35
|
+
this.options = DEFAULT_SMART_POOL_OPTIONS;
|
|
36
|
+
}
|
|
37
|
+
// Initialize queue adapter
|
|
38
|
+
this.queueAdapter = new MemoryQueueAdapter();
|
|
39
|
+
for (const client of clients) {
|
|
40
|
+
if (typeof client === "string") {
|
|
41
|
+
const apiClient = new ComfyApi(client);
|
|
42
|
+
this.clientMap.set(apiClient.apiHost, apiClient);
|
|
43
|
+
}
|
|
44
|
+
else {
|
|
45
|
+
this.clientMap.set(client.apiHost, client);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
emitLegacy(event) {
|
|
50
|
+
if (this.hooks.any) {
|
|
51
|
+
this.hooks.any(event);
|
|
52
|
+
}
|
|
53
|
+
const specificHook = this.hooks[event.type];
|
|
54
|
+
if (specificHook) {
|
|
55
|
+
specificHook(event);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Adds an event listener for the specified event type.
|
|
60
|
+
* Properly typed wrapper around EventTarget.addEventListener.
|
|
61
|
+
*/
|
|
62
|
+
on(type, handler, options) {
|
|
63
|
+
super.on(type, handler, options);
|
|
64
|
+
return () => this.off(type, handler, options);
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Removes an event listener for the specified event type.
|
|
68
|
+
* Properly typed wrapper around EventTarget.removeEventListener.
|
|
69
|
+
*/
|
|
70
|
+
off(type, handler, options) {
|
|
71
|
+
super.off(type, handler, options);
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Adds a one-time event listener for the specified event type.
|
|
75
|
+
*/
|
|
76
|
+
once(type, handler, options) {
|
|
77
|
+
return super.once(type, handler, options);
|
|
78
|
+
}
|
|
79
|
+
async connect() {
|
|
80
|
+
const connectionPromises = [];
|
|
81
|
+
const tRefZero = Date.now();
|
|
82
|
+
for (const [url, client] of this.clientMap.entries()) {
|
|
83
|
+
connectionPromises.push(new Promise(async (resolve, reject) => {
|
|
84
|
+
const timeout = setTimeout(() => {
|
|
85
|
+
client.abortReconnect();
|
|
86
|
+
reject(new Error(`Connection to client at ${url} timed out`));
|
|
87
|
+
}, this.options.connectionTimeoutMs);
|
|
88
|
+
try {
|
|
89
|
+
const comfyApi = await client.init(1);
|
|
90
|
+
comfyApi.on("connected", (event) => {
|
|
91
|
+
if (event.type === "connected") {
|
|
92
|
+
const tRefDone = Date.now();
|
|
93
|
+
const tDelta = tRefDone - tRefZero;
|
|
94
|
+
console.log(`Client at ${url} (${event.target?.osType}) connected via websockets in ${tDelta} ms`);
|
|
95
|
+
resolve(comfyApi);
|
|
96
|
+
}
|
|
97
|
+
});
|
|
98
|
+
}
|
|
99
|
+
catch (reason) {
|
|
100
|
+
console.error(`Failed to connect to client at ${url}:`, reason);
|
|
101
|
+
reject(reason);
|
|
102
|
+
}
|
|
103
|
+
finally {
|
|
104
|
+
clearTimeout(timeout);
|
|
105
|
+
}
|
|
106
|
+
}));
|
|
107
|
+
}
|
|
108
|
+
// Wait for all connection attempts to settle
|
|
109
|
+
const results = await Promise.allSettled(connectionPromises);
|
|
110
|
+
// Check for any rejected connections
|
|
111
|
+
const rejected = results.filter(result => result.status === "rejected");
|
|
112
|
+
// Warn if there are any rejected connections
|
|
113
|
+
if (rejected.length > 0) {
|
|
114
|
+
console.warn(`${rejected.length} client(s) failed to connect.`);
|
|
115
|
+
for (const rejectedClient of rejected) {
|
|
116
|
+
console.warn(`Client rejection reason: ${rejectedClient.reason}`);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
// Sync queue states after connections
|
|
120
|
+
await this.syncQueueStates();
|
|
121
|
+
}
|
|
122
|
+
shutdown() {
|
|
123
|
+
for (const client of this.clientMap.values()) {
|
|
124
|
+
try {
|
|
125
|
+
client.destroy();
|
|
126
|
+
}
|
|
127
|
+
catch (reason) {
|
|
128
|
+
console.error(`Error shutting down client at ${client.apiHost}:`, reason);
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
async syncQueueStates() {
|
|
133
|
+
const promises = Array
|
|
134
|
+
.from(this.clientMap.values())
|
|
135
|
+
.filter(value => value.isReady)
|
|
136
|
+
.map(value => {
|
|
137
|
+
return new Promise(resolve => {
|
|
138
|
+
value.getQueue().then(value1 => {
|
|
139
|
+
this.clientQueueStates.set(value.apiHost, {
|
|
140
|
+
queuedJobs: value1.queue_pending.length,
|
|
141
|
+
runningJobs: value1.queue_running.length
|
|
142
|
+
});
|
|
143
|
+
resolve(true);
|
|
144
|
+
});
|
|
145
|
+
});
|
|
146
|
+
});
|
|
147
|
+
await Promise.allSettled(promises);
|
|
148
|
+
}
|
|
149
|
+
// Add a job record to the pool
|
|
150
|
+
addJob(jobId, jobRecord) {
|
|
151
|
+
this.jobStore.set(jobId, jobRecord);
|
|
152
|
+
}
|
|
153
|
+
// Get a job record from the pool
|
|
154
|
+
getJob(jobId) {
|
|
155
|
+
return this.jobStore.get(jobId);
|
|
156
|
+
}
|
|
157
|
+
// Remove a job record from the pool
|
|
158
|
+
removeJob(jobId) {
|
|
159
|
+
this.jobStore.delete(jobId);
|
|
160
|
+
}
|
|
161
|
+
// Set the affinity for a workflow
|
|
162
|
+
setAffinity(workflow, affinity) {
|
|
163
|
+
const workflowHash = hashWorkflow(workflow);
|
|
164
|
+
this.affinities.set(workflowHash, {
|
|
165
|
+
workflowHash,
|
|
166
|
+
...affinity
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
// Get the affinity for a workflow
|
|
170
|
+
getAffinity(workflowHash) {
|
|
171
|
+
return this.affinities.get(workflowHash);
|
|
172
|
+
}
|
|
173
|
+
// Remove the affinity for a workflow
|
|
174
|
+
removeAffinity(workflowHash) {
|
|
175
|
+
this.affinities.delete(workflowHash);
|
|
176
|
+
}
|
|
177
|
+
/**
|
|
178
|
+
* Track server performance metrics for job execution
|
|
179
|
+
*/
|
|
180
|
+
updateServerPerformance(clientId, executionTimeMs) {
|
|
181
|
+
let metrics = this.serverPerformance.get(clientId);
|
|
182
|
+
if (!metrics) {
|
|
183
|
+
metrics = {
|
|
184
|
+
clientId,
|
|
185
|
+
totalJobsCompleted: 0,
|
|
186
|
+
totalExecutionTimeMs: 0,
|
|
187
|
+
averageExecutionTimeMs: 0,
|
|
188
|
+
lastJobDurationMs: 0
|
|
189
|
+
};
|
|
190
|
+
this.serverPerformance.set(clientId, metrics);
|
|
191
|
+
}
|
|
192
|
+
metrics.totalJobsCompleted++;
|
|
193
|
+
metrics.totalExecutionTimeMs += executionTimeMs;
|
|
194
|
+
metrics.lastJobDurationMs = executionTimeMs;
|
|
195
|
+
metrics.averageExecutionTimeMs = metrics.totalExecutionTimeMs / metrics.totalJobsCompleted;
|
|
196
|
+
}
|
|
197
|
+
/**
|
|
198
|
+
* Get server performance metrics
|
|
199
|
+
*/
|
|
200
|
+
getServerPerformance(clientId) {
|
|
201
|
+
return this.serverPerformance.get(clientId);
|
|
202
|
+
}
|
|
203
|
+
/**
|
|
204
|
+
* Get sorted list of servers by performance (fastest first) within a given set
|
|
205
|
+
*/
|
|
206
|
+
sortServersByPerformance(serverIds) {
|
|
207
|
+
return [...serverIds].sort((a, b) => {
|
|
208
|
+
const metricsA = this.serverPerformance.get(a);
|
|
209
|
+
const metricsB = this.serverPerformance.get(b);
|
|
210
|
+
// Servers with no metrics go to end (untracked/slow startup)
|
|
211
|
+
if (!metricsA)
|
|
212
|
+
return 1;
|
|
213
|
+
if (!metricsB)
|
|
214
|
+
return -1;
|
|
215
|
+
// Sort by average execution time (fastest first)
|
|
216
|
+
return metricsA.averageExecutionTimeMs - metricsB.averageExecutionTimeMs;
|
|
217
|
+
});
|
|
218
|
+
}
|
|
219
|
+
/**
|
|
220
|
+
* Enqueue a workflow for execution by the pool.
|
|
221
|
+
* Auto-triggers processing via setImmediate (batteries included).
|
|
222
|
+
*/
|
|
223
|
+
async enqueue(workflow, opts) {
|
|
224
|
+
const jobId = randomUUID();
|
|
225
|
+
const workflowHash = workflow.structureHash || hashWorkflow(workflow.json || workflow);
|
|
226
|
+
const workflowJson = workflow.json || workflow;
|
|
227
|
+
const outputNodeIds = workflow.outputNodeIds || [];
|
|
228
|
+
const outputAliases = workflow.outputAliases || {};
|
|
229
|
+
// Create job record
|
|
230
|
+
const jobRecord = {
|
|
231
|
+
jobId,
|
|
232
|
+
workflow: workflowJson,
|
|
233
|
+
workflowHash,
|
|
234
|
+
options: {
|
|
235
|
+
maxAttempts: 3,
|
|
236
|
+
retryDelayMs: 1000,
|
|
237
|
+
priority: opts?.priority ?? 0,
|
|
238
|
+
preferredClientIds: opts?.preferredClientIds ?? [],
|
|
239
|
+
excludeClientIds: [],
|
|
240
|
+
metadata: {}
|
|
241
|
+
},
|
|
242
|
+
attempts: 0,
|
|
243
|
+
enqueuedAt: Date.now(),
|
|
244
|
+
workflowMeta: {
|
|
245
|
+
outputNodeIds,
|
|
246
|
+
outputAliases
|
|
247
|
+
},
|
|
248
|
+
status: "queued"
|
|
249
|
+
};
|
|
250
|
+
// Store in job store
|
|
251
|
+
this.jobStore.set(jobId, jobRecord);
|
|
252
|
+
// Create payload for queue adapter
|
|
253
|
+
const payload = jobRecord;
|
|
254
|
+
// Enqueue with priority
|
|
255
|
+
await this.queueAdapter.enqueue(payload, {
|
|
256
|
+
priority: opts?.priority ?? 0
|
|
257
|
+
});
|
|
258
|
+
// Emit queued event
|
|
259
|
+
this.dispatchEvent(new CustomEvent("job:queued", { detail: { job: jobRecord } }));
|
|
260
|
+
// Auto-trigger queue processing immediately (not via setImmediate, so it processes right away)
|
|
261
|
+
setImmediate(() => this.processNextJobQueued());
|
|
262
|
+
return jobId;
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* Entry point for queue processing with deduplication guard.
|
|
266
|
+
* Prevents concurrent processing of jobs.
|
|
267
|
+
* Poll-based approach: check idle servers, collect compatible jobs, enqueue only when slots available.
|
|
268
|
+
*/
|
|
269
|
+
async processNextJobQueued() {
|
|
270
|
+
if (this.processingNextJob) {
|
|
271
|
+
return;
|
|
272
|
+
}
|
|
273
|
+
this.processingNextJob = true;
|
|
274
|
+
try {
|
|
275
|
+
// Continuously sync queue states and process available work
|
|
276
|
+
while (true) {
|
|
277
|
+
// Update queue states from all clients
|
|
278
|
+
await this.syncQueueStates();
|
|
279
|
+
// Find idle servers (not running, not pending)
|
|
280
|
+
const idleServers = this.findIdleServers();
|
|
281
|
+
if (idleServers.length === 0) {
|
|
282
|
+
// No idle servers, wait a bit then check again
|
|
283
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
284
|
+
continue;
|
|
285
|
+
}
|
|
286
|
+
// Try to assign jobs to idle servers
|
|
287
|
+
const jobsAssigned = await this.assignJobsToIdleServers(idleServers);
|
|
288
|
+
if (jobsAssigned === 0) {
|
|
289
|
+
// No jobs could be assigned, wait then try again
|
|
290
|
+
await new Promise(resolve => setTimeout(resolve, 1000));
|
|
291
|
+
continue;
|
|
292
|
+
}
|
|
293
|
+
// Jobs were assigned, give them time to start then re-check
|
|
294
|
+
await new Promise(resolve => setTimeout(resolve, 500));
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
finally {
|
|
298
|
+
this.processingNextJob = false;
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Find servers that are currently idle (no running or pending jobs)
|
|
303
|
+
*/
|
|
304
|
+
findIdleServers() {
|
|
305
|
+
const idleServers = [];
|
|
306
|
+
for (const [clientId, client] of this.clientMap) {
|
|
307
|
+
if (!client.isReady)
|
|
308
|
+
continue;
|
|
309
|
+
const state = this.clientQueueStates.get(clientId);
|
|
310
|
+
if (state && state.queuedJobs === 0 && state.runningJobs === 0) {
|
|
311
|
+
idleServers.push(client);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
return idleServers;
|
|
315
|
+
}
|
|
316
|
+
/**
|
|
317
|
+
* Assign compatible jobs from our queue to idle servers
|
|
318
|
+
* Returns number of jobs assigned
|
|
319
|
+
*/
|
|
320
|
+
async assignJobsToIdleServers(idleServers) {
|
|
321
|
+
let jobsAssigned = 0;
|
|
322
|
+
// Peek at pending jobs
|
|
323
|
+
const pendingJobs = await this.queueAdapter.peek(100);
|
|
324
|
+
if (pendingJobs.length === 0) {
|
|
325
|
+
return 0;
|
|
326
|
+
}
|
|
327
|
+
const matches = [];
|
|
328
|
+
for (const payload of pendingJobs) {
|
|
329
|
+
const job = this.jobStore.get(payload.jobId);
|
|
330
|
+
if (!job)
|
|
331
|
+
continue;
|
|
332
|
+
// Find all compatible idle servers for this job
|
|
333
|
+
const compatibleServers = idleServers.filter(s => this.isJobCompatibleWithServer(payload, job, s));
|
|
334
|
+
if (compatibleServers.length > 0) {
|
|
335
|
+
// Sort compatible servers by performance (fastest first)
|
|
336
|
+
const sortedServers = this.sortServersByPerformance(compatibleServers.map(s => s.apiHost))
|
|
337
|
+
.map(id => idleServers.find(s => s.apiHost === id))
|
|
338
|
+
.filter((s) => s !== undefined);
|
|
339
|
+
matches.push({
|
|
340
|
+
payload,
|
|
341
|
+
job,
|
|
342
|
+
compatibleServers: sortedServers
|
|
343
|
+
});
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
// Sort by selectivity (jobs with fewer compatible servers first)
|
|
347
|
+
matches.sort((a, b) => {
|
|
348
|
+
return a.compatibleServers.length - b.compatibleServers.length;
|
|
349
|
+
});
|
|
350
|
+
// Assign jobs to idle servers
|
|
351
|
+
const assignedServers = new Set();
|
|
352
|
+
for (const match of matches) {
|
|
353
|
+
// Use the fastest compatible server that hasn't been assigned yet
|
|
354
|
+
let targetServer;
|
|
355
|
+
for (const server of match.compatibleServers) {
|
|
356
|
+
if (!assignedServers.has(server.apiHost)) {
|
|
357
|
+
targetServer = server;
|
|
358
|
+
break;
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
if (!targetServer) {
|
|
362
|
+
continue;
|
|
363
|
+
}
|
|
364
|
+
// Reserve this specific job
|
|
365
|
+
const reservation = await this.queueAdapter.reserveById(match.job.jobId);
|
|
366
|
+
if (!reservation) {
|
|
367
|
+
continue;
|
|
368
|
+
}
|
|
369
|
+
try {
|
|
370
|
+
const result = await this.enqueueJobOnServer(match.job, targetServer);
|
|
371
|
+
if (result) {
|
|
372
|
+
assignedServers.add(targetServer.apiHost);
|
|
373
|
+
jobsAssigned++;
|
|
374
|
+
// Commit to our queue
|
|
375
|
+
await this.queueAdapter.commit(reservation.reservationId);
|
|
376
|
+
}
|
|
377
|
+
else {
|
|
378
|
+
// Enqueue failed, retry later
|
|
379
|
+
await this.queueAdapter.retry(reservation.reservationId, { delayMs: 1000 });
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
catch (error) {
|
|
383
|
+
// Retry on error
|
|
384
|
+
await this.queueAdapter.retry(reservation.reservationId, { delayMs: 1000 });
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
return jobsAssigned;
|
|
388
|
+
}
|
|
389
|
+
/**
|
|
390
|
+
* Check if a job is compatible with a server
|
|
391
|
+
*/
|
|
392
|
+
isJobCompatibleWithServer(payload, job, server) {
|
|
393
|
+
// Check preferred client IDs first
|
|
394
|
+
if (payload.options.preferredClientIds && payload.options.preferredClientIds.length > 0) {
|
|
395
|
+
return payload.options.preferredClientIds.includes(server.apiHost);
|
|
396
|
+
}
|
|
397
|
+
// Check workflow affinity
|
|
398
|
+
const affinity = this.getAffinity(payload.workflowHash);
|
|
399
|
+
if (affinity && affinity.preferredClientIds) {
|
|
400
|
+
return affinity.preferredClientIds.includes(server.apiHost);
|
|
401
|
+
}
|
|
402
|
+
// No constraints, compatible with any server
|
|
403
|
+
return true;
|
|
404
|
+
}
|
|
405
|
+
/**
|
|
406
|
+
* Enqueue a job on a specific server
|
|
407
|
+
* Returns true if successful, false if failed
|
|
408
|
+
*/
|
|
409
|
+
async enqueueJobOnServer(job, server) {
|
|
410
|
+
try {
|
|
411
|
+
const workflowJson = job.workflow;
|
|
412
|
+
const outputNodeIds = job.workflowMeta?.outputNodeIds || [];
|
|
413
|
+
// Auto-randomize any seed fields set to -1
|
|
414
|
+
try {
|
|
415
|
+
for (const [_, node] of Object.entries(workflowJson)) {
|
|
416
|
+
const n = node;
|
|
417
|
+
if (n && n.inputs && Object.prototype.hasOwnProperty.call(n.inputs, 'seed')) {
|
|
418
|
+
if (n.inputs.seed === -1) {
|
|
419
|
+
const val = Math.floor(Math.random() * 2_147_483_647);
|
|
420
|
+
n.inputs.seed = val;
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
catch { /* non-fatal */ }
|
|
426
|
+
// Build prompt
|
|
427
|
+
const pb = new PromptBuilder(workflowJson, [], outputNodeIds);
|
|
428
|
+
for (const nodeId of outputNodeIds) {
|
|
429
|
+
pb.setOutputNode(nodeId, nodeId);
|
|
430
|
+
}
|
|
431
|
+
const promptJson = pb.prompt;
|
|
432
|
+
// Queue on client
|
|
433
|
+
const queueResponse = await server.ext.queue.appendPrompt(promptJson);
|
|
434
|
+
const promptId = queueResponse.prompt_id;
|
|
435
|
+
// Update job record
|
|
436
|
+
job.status = "running";
|
|
437
|
+
job.clientId = server.apiHost;
|
|
438
|
+
job.promptId = promptId;
|
|
439
|
+
job.attempts += 1;
|
|
440
|
+
job.startedAt = Date.now(); // Track when job starts executing
|
|
441
|
+
this.dispatchEvent(new CustomEvent("job:accepted", { detail: { job } }));
|
|
442
|
+
this.dispatchEvent(new CustomEvent("job:started", { detail: { job } }));
|
|
443
|
+
// Run execution in background
|
|
444
|
+
this.waitForExecutionCompletion(server, promptId, { json: workflowJson })
|
|
445
|
+
.then((result) => {
|
|
446
|
+
job.status = "completed";
|
|
447
|
+
job.result = result;
|
|
448
|
+
job.completedAt = Date.now();
|
|
449
|
+
// Track server performance
|
|
450
|
+
const executionTimeMs = job.completedAt - (job.startedAt || job.completedAt);
|
|
451
|
+
this.updateServerPerformance(server.apiHost, executionTimeMs);
|
|
452
|
+
this.dispatchEvent(new CustomEvent("job:completed", { detail: { job } }));
|
|
453
|
+
// Trigger next processing since job completed
|
|
454
|
+
setImmediate(() => this.processNextJobQueued());
|
|
455
|
+
})
|
|
456
|
+
.catch((error) => {
|
|
457
|
+
job.status = "failed";
|
|
458
|
+
job.lastError = error;
|
|
459
|
+
job.completedAt = Date.now();
|
|
460
|
+
this.dispatchEvent(new CustomEvent("job:failed", { detail: { job, willRetry: false } }));
|
|
461
|
+
// Trigger next processing since job completed
|
|
462
|
+
setImmediate(() => this.processNextJobQueued());
|
|
463
|
+
});
|
|
464
|
+
return true;
|
|
465
|
+
}
|
|
466
|
+
catch (error) {
|
|
467
|
+
console.error(`[SmartPool] Failed to enqueue job on ${server.apiHost}:`, error);
|
|
468
|
+
return false;
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
/**
|
|
472
|
+
* Retrieve images from a completed job's execution.
|
|
473
|
+
*/
|
|
474
|
+
async getJobOutputImages(jobId, nodeId) {
|
|
475
|
+
const job = this.jobStore.get(jobId);
|
|
476
|
+
if (!job) {
|
|
477
|
+
throw new Error(`Job ${jobId} not found`);
|
|
478
|
+
}
|
|
479
|
+
if (!job.clientId) {
|
|
480
|
+
throw new Error(`Job ${jobId} has no client assigned`);
|
|
481
|
+
}
|
|
482
|
+
if (!job.promptId) {
|
|
483
|
+
throw new Error(`Job ${jobId} has no promptId assigned`);
|
|
484
|
+
}
|
|
485
|
+
const client = this.clientMap.get(job.clientId);
|
|
486
|
+
if (!client) {
|
|
487
|
+
throw new Error(`Client ${job.clientId} not found`);
|
|
488
|
+
}
|
|
489
|
+
// Fetch history
|
|
490
|
+
const historyData = await client.ext.history.getHistory(job.promptId);
|
|
491
|
+
if (!historyData?.outputs) {
|
|
492
|
+
return [];
|
|
493
|
+
}
|
|
494
|
+
const images = [];
|
|
495
|
+
// Find images in specified node or first node with images
|
|
496
|
+
const outputEntries = Object.entries(historyData.outputs);
|
|
497
|
+
for (const [nId, nodeOutput] of outputEntries) {
|
|
498
|
+
if (nodeId && nId !== nodeId) {
|
|
499
|
+
continue;
|
|
500
|
+
}
|
|
501
|
+
const output = nodeOutput;
|
|
502
|
+
if (output.images && Array.isArray(output.images)) {
|
|
503
|
+
for (const imageRef of output.images) {
|
|
504
|
+
try {
|
|
505
|
+
const blob = await client.ext.file.getImage(imageRef);
|
|
506
|
+
images.push({
|
|
507
|
+
filename: imageRef.filename || `image_${nId}`,
|
|
508
|
+
blob
|
|
509
|
+
});
|
|
510
|
+
}
|
|
511
|
+
catch (e) {
|
|
512
|
+
console.error(`Failed to fetch image from node ${nId}:`, e);
|
|
513
|
+
}
|
|
514
|
+
}
|
|
515
|
+
if (nodeId) {
|
|
516
|
+
// Found specified node, stop searching
|
|
517
|
+
break;
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
return images;
|
|
522
|
+
}
|
|
523
|
+
async executeImmediate(workflow, opts) {
|
|
524
|
+
// Enqueue with maximum priority
|
|
525
|
+
const jobId = await this.enqueue(workflow, {
|
|
526
|
+
preferredClientIds: opts.preferableClientIds,
|
|
527
|
+
priority: 1000 // High priority for immediate execution
|
|
528
|
+
});
|
|
529
|
+
// Wait for job completion via event listener
|
|
530
|
+
return new Promise((resolve, reject) => {
|
|
531
|
+
const onComplete = (event) => {
|
|
532
|
+
const customEvent = event;
|
|
533
|
+
if (customEvent.detail.job.jobId === jobId) {
|
|
534
|
+
cleanup();
|
|
535
|
+
const job = customEvent.detail.job;
|
|
536
|
+
this.buildExecuteImmediateResult(job)
|
|
537
|
+
.then(resolve)
|
|
538
|
+
.catch(reject);
|
|
539
|
+
}
|
|
540
|
+
};
|
|
541
|
+
const onFailed = (event) => {
|
|
542
|
+
const customEvent = event;
|
|
543
|
+
if (customEvent.detail.job.jobId === jobId) {
|
|
544
|
+
cleanup();
|
|
545
|
+
reject(new Error(`Job failed: ${JSON.stringify(customEvent.detail.job.lastError)}`));
|
|
546
|
+
}
|
|
547
|
+
};
|
|
548
|
+
let cleanup = () => {
|
|
549
|
+
this.removeEventListener("job:completed", onComplete);
|
|
550
|
+
this.removeEventListener("job:failed", onFailed);
|
|
551
|
+
clearTimeout(timeoutHandle);
|
|
552
|
+
};
|
|
553
|
+
this.addEventListener("job:completed", onComplete);
|
|
554
|
+
this.addEventListener("job:failed", onFailed);
|
|
555
|
+
// Timeout after 5 minutes
|
|
556
|
+
const timeoutHandle = setTimeout(() => {
|
|
557
|
+
cleanup();
|
|
558
|
+
reject(new Error("Execution timeout"));
|
|
559
|
+
}, 5 * 60 * 1000);
|
|
560
|
+
});
|
|
561
|
+
}
|
|
562
|
+
/**
|
|
563
|
+
* Build the return value for executeImmediate() with images and blob.
|
|
564
|
+
*/
|
|
565
|
+
async buildExecuteImmediateResult(job) {
|
|
566
|
+
const images = [];
|
|
567
|
+
let imageBlob;
|
|
568
|
+
// Fetch images from job
|
|
569
|
+
try {
|
|
570
|
+
const jobImages = await this.getJobOutputImages(job.jobId);
|
|
571
|
+
for (const img of jobImages) {
|
|
572
|
+
images.push({
|
|
573
|
+
filename: img.filename
|
|
574
|
+
});
|
|
575
|
+
imageBlob = img.blob;
|
|
576
|
+
}
|
|
577
|
+
}
|
|
578
|
+
catch (e) {
|
|
579
|
+
console.log(`[SmartPool] Failed to fetch images: ${e}`);
|
|
580
|
+
}
|
|
581
|
+
return {
|
|
582
|
+
...job.result,
|
|
583
|
+
images,
|
|
584
|
+
imageBlob,
|
|
585
|
+
_promptId: job.promptId
|
|
586
|
+
};
|
|
587
|
+
}
|
|
588
|
+
async waitForExecutionCompletion(client, promptId, workflow) {
|
|
589
|
+
return new Promise((resolve, reject) => {
|
|
590
|
+
const result = {
|
|
591
|
+
_promptId: promptId,
|
|
592
|
+
_aliases: {},
|
|
593
|
+
_nodes: []
|
|
594
|
+
};
|
|
595
|
+
const collectedNodes = new Set();
|
|
596
|
+
const executedHandler = (ev) => {
|
|
597
|
+
const eventPromptId = ev.detail.prompt_id;
|
|
598
|
+
// Only process events for our specific prompt
|
|
599
|
+
if (eventPromptId !== promptId) {
|
|
600
|
+
return;
|
|
601
|
+
}
|
|
602
|
+
const nodeId = ev.detail.node;
|
|
603
|
+
const output = ev.detail.output;
|
|
604
|
+
// Store output keyed by node ID
|
|
605
|
+
result[nodeId] = output;
|
|
606
|
+
collectedNodes.add(nodeId);
|
|
607
|
+
};
|
|
608
|
+
const executionSuccessHandler = async (ev) => {
|
|
609
|
+
const eventPromptId = ev.detail.prompt_id;
|
|
610
|
+
// Only process events for our specific prompt
|
|
611
|
+
if (eventPromptId !== promptId) {
|
|
612
|
+
return;
|
|
613
|
+
}
|
|
614
|
+
// Try to fetch complete outputs from history
|
|
615
|
+
for (let retries = 0; retries < 5; retries++) {
|
|
616
|
+
try {
|
|
617
|
+
const historyData = await client.ext.history.getHistory(promptId);
|
|
618
|
+
if (historyData?.outputs) {
|
|
619
|
+
// Populate result from history for any nodes we didn't get from websocket
|
|
620
|
+
for (const [nodeIdStr, nodeOutput] of Object.entries(historyData.outputs)) {
|
|
621
|
+
const nodeId = parseInt(nodeIdStr, 10).toString();
|
|
622
|
+
// Only add if we haven't collected this node yet
|
|
623
|
+
if (!collectedNodes.has(nodeId) && nodeOutput) {
|
|
624
|
+
// Extract the actual output value
|
|
625
|
+
const outputValue = Array.isArray(nodeOutput) ? nodeOutput[0] : Object.values(nodeOutput)[0];
|
|
626
|
+
if (outputValue !== undefined) {
|
|
627
|
+
result[nodeId] = outputValue;
|
|
628
|
+
collectedNodes.add(nodeId);
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
// Store collected node IDs
|
|
633
|
+
result._nodes = Array.from(collectedNodes);
|
|
634
|
+
cleanup();
|
|
635
|
+
resolve(result);
|
|
636
|
+
return;
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
catch (e) {
|
|
640
|
+
// Continue retrying
|
|
641
|
+
}
|
|
642
|
+
if (retries < 4) {
|
|
643
|
+
await new Promise(r => setTimeout(r, 100));
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
// Resolve even if we didn't get all outputs
|
|
647
|
+
result._nodes = Array.from(collectedNodes);
|
|
648
|
+
cleanup();
|
|
649
|
+
resolve(result);
|
|
650
|
+
};
|
|
651
|
+
const executionErrorHandler = (ev) => {
|
|
652
|
+
const eventPromptId = ev.detail.prompt_id;
|
|
653
|
+
if (eventPromptId !== promptId) {
|
|
654
|
+
return;
|
|
655
|
+
}
|
|
656
|
+
console.error(`[SmartPool.waitForExecutionCompletion] Execution error:`, ev.detail);
|
|
657
|
+
cleanup();
|
|
658
|
+
reject(new Error(`Execution failed: ${JSON.stringify(ev.detail)}`));
|
|
659
|
+
};
|
|
660
|
+
const cleanup = () => {
|
|
661
|
+
offExecuted?.();
|
|
662
|
+
offExecutionSuccess?.();
|
|
663
|
+
offExecutionError?.();
|
|
664
|
+
clearTimeout(timeoutHandle);
|
|
665
|
+
};
|
|
666
|
+
const offExecuted = client.on("executed", executedHandler);
|
|
667
|
+
const offExecutionSuccess = client.on("execution_success", executionSuccessHandler);
|
|
668
|
+
const offExecutionError = client.on("execution_error", executionErrorHandler);
|
|
669
|
+
// Timeout after 5 minutes
|
|
670
|
+
const timeoutHandle = setTimeout(() => {
|
|
671
|
+
cleanup();
|
|
672
|
+
reject(new Error("Execution timeout"));
|
|
673
|
+
}, 5 * 60 * 1000);
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
}
|
|
677
677
|
//# sourceMappingURL=SmartPool.js.map
|