@supaku/agentfactory-server 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,366 @@
1
+ /**
2
+ * Work Queue Module (Optimized)
3
+ *
4
+ * Manages the queue of pending agent work items in Redis.
5
+ * Workers poll this queue to claim and process work.
6
+ *
7
+ * Data Structures (optimized for high concurrency):
8
+ * - work:items (Hash): sessionId -> JSON work item - O(1) lookup
9
+ * - work:queue (Sorted Set): score = priority, member = sessionId - O(log n) operations
10
+ * - work:claim:{sessionId} (String): workerId with TTL - atomic claims
11
+ *
12
+ * Performance:
13
+ * - queueWork: O(log n) - HSET + ZADD
14
+ * - claimWork: O(log n) - SETNX + HGET + ZREM
15
+ * - peekWork: O(log n + k) - ZRANGEBYSCORE + HMGET where k = limit
16
+ * - getQueueLength: O(1) - ZCARD
17
+ */
18
+ import { redisSetNX, redisDel, redisGet, redisZAdd, redisZRem, redisZRangeByScore, redisZCard, redisHSet, redisHGet, redisHDel, redisHMGet, isRedisConfigured,
19
+ // Legacy list operations for migration
20
+ redisLRange, redisLRem, } from './redis';
21
+ const log = {
22
+ info: (msg, data) => console.log(`[work-queue] ${msg}`, data ? JSON.stringify(data) : ''),
23
+ warn: (msg, data) => console.warn(`[work-queue] ${msg}`, data ? JSON.stringify(data) : ''),
24
+ error: (msg, data) => console.error(`[work-queue] ${msg}`, data ? JSON.stringify(data) : ''),
25
+ debug: (_msg, _data) => { },
26
+ };
27
+ // Redis key constants
28
+ const WORK_QUEUE_KEY = 'work:queue'; // Sorted set: priority queue
29
+ const WORK_ITEMS_KEY = 'work:items'; // Hash: sessionId -> work item
30
+ const WORK_CLAIM_PREFIX = 'work:claim:';
31
+ // Legacy key for migration
32
+ const LEGACY_QUEUE_KEY = 'work:queue:legacy';
33
+ // Default TTL for work claims (1 hour)
34
+ const WORK_CLAIM_TTL = parseInt(process.env.WORK_CLAIM_TTL ?? '3600', 10);
35
+ /**
36
+ * Calculate priority score for sorted set
37
+ * Lower scores = higher priority (processed first)
38
+ * Score = (priority * 1e13) + timestamp
39
+ * This ensures priority is the primary sort key, timestamp is secondary
40
+ */
41
+ function calculateScore(priority, queuedAt) {
42
+ // Clamp priority to 1-9 to ensure score calculation works correctly
43
+ const clampedPriority = Math.max(1, Math.min(9, priority));
44
+ // Use 1e13 multiplier to leave room for timestamps up to year ~2286
45
+ return clampedPriority * 1e13 + queuedAt;
46
+ }
47
+ /**
48
+ * Add work to the queue
49
+ *
50
+ * @param work - Work item to queue
51
+ * @returns true if queued successfully
52
+ */
53
+ export async function queueWork(work) {
54
+ if (!isRedisConfigured()) {
55
+ log.warn('Redis not configured, cannot queue work');
56
+ return false;
57
+ }
58
+ try {
59
+ const score = calculateScore(work.priority, work.queuedAt);
60
+ const serialized = JSON.stringify(work);
61
+ // Store work item in hash (O(1) lookup)
62
+ await redisHSet(WORK_ITEMS_KEY, work.sessionId, serialized);
63
+ // Add to priority queue (O(log n))
64
+ await redisZAdd(WORK_QUEUE_KEY, score, work.sessionId);
65
+ log.info('Work queued', {
66
+ sessionId: work.sessionId,
67
+ issueIdentifier: work.issueIdentifier,
68
+ priority: work.priority,
69
+ score,
70
+ });
71
+ return true;
72
+ }
73
+ catch (error) {
74
+ log.error('Failed to queue work', { error, sessionId: work.sessionId });
75
+ return false;
76
+ }
77
+ }
78
+ /**
79
+ * Peek at pending work without removing from queue
80
+ * Returns items sorted by priority (lowest number = highest priority)
81
+ *
82
+ * @param limit - Maximum number of items to return
83
+ * @returns Array of work items sorted by priority
84
+ */
85
+ export async function peekWork(limit = 10) {
86
+ if (!isRedisConfigured()) {
87
+ return [];
88
+ }
89
+ try {
90
+ // Get session IDs from priority queue (lowest scores first)
91
+ const sessionIds = await redisZRangeByScore(WORK_QUEUE_KEY, '-inf', '+inf', limit);
92
+ if (sessionIds.length === 0) {
93
+ return [];
94
+ }
95
+ // Batch fetch work items from hash
96
+ const items = await redisHMGet(WORK_ITEMS_KEY, sessionIds);
97
+ // Parse and filter out any missing items
98
+ const result = [];
99
+ for (let i = 0; i < items.length; i++) {
100
+ const item = items[i];
101
+ if (item) {
102
+ try {
103
+ result.push(JSON.parse(item));
104
+ }
105
+ catch {
106
+ log.warn('Failed to parse work item', { sessionId: sessionIds[i] });
107
+ }
108
+ }
109
+ }
110
+ return result;
111
+ }
112
+ catch (error) {
113
+ log.error('Failed to peek work queue', { error });
114
+ return [];
115
+ }
116
+ }
117
+ /**
118
+ * Get the number of items in the queue
119
+ */
120
+ export async function getQueueLength() {
121
+ if (!isRedisConfigured()) {
122
+ return 0;
123
+ }
124
+ try {
125
+ return await redisZCard(WORK_QUEUE_KEY);
126
+ }
127
+ catch (error) {
128
+ log.error('Failed to get queue length', { error });
129
+ return 0;
130
+ }
131
+ }
132
+ /**
133
+ * Claim a work item for processing
134
+ *
135
+ * Uses SETNX for atomic claim to prevent race conditions.
136
+ * O(log n) complexity for claim + remove operations.
137
+ *
138
+ * @param sessionId - Session ID to claim
139
+ * @param workerId - Worker claiming the work
140
+ * @returns The work item if claimed successfully, null otherwise
141
+ */
142
+ export async function claimWork(sessionId, workerId) {
143
+ if (!isRedisConfigured()) {
144
+ log.warn('Redis not configured, cannot claim work');
145
+ return null;
146
+ }
147
+ try {
148
+ // Try to atomically set the claim
149
+ const claimKey = `${WORK_CLAIM_PREFIX}${sessionId}`;
150
+ const claimed = await redisSetNX(claimKey, workerId, WORK_CLAIM_TTL);
151
+ if (!claimed) {
152
+ log.debug('Work already claimed', { sessionId, workerId });
153
+ return null;
154
+ }
155
+ // Get work item from hash (O(1))
156
+ const itemJson = await redisHGet(WORK_ITEMS_KEY, sessionId);
157
+ if (!itemJson) {
158
+ // Work item not found - release the claim
159
+ await redisDel(claimKey);
160
+ log.warn('Work item not found in hash after claim', { sessionId });
161
+ return null;
162
+ }
163
+ const work = JSON.parse(itemJson);
164
+ // Remove from priority queue (O(log n))
165
+ await redisZRem(WORK_QUEUE_KEY, sessionId);
166
+ // Remove from items hash (O(1))
167
+ await redisHDel(WORK_ITEMS_KEY, sessionId);
168
+ log.info('Work claimed', {
169
+ sessionId,
170
+ workerId,
171
+ issueIdentifier: work.issueIdentifier,
172
+ });
173
+ return work;
174
+ }
175
+ catch (error) {
176
+ log.error('Failed to claim work', { error, sessionId, workerId });
177
+ return null;
178
+ }
179
+ }
180
+ /**
181
+ * Release a work claim (e.g., on failure or cancellation)
182
+ *
183
+ * @param sessionId - Session ID to release
184
+ * @returns true if released successfully
185
+ */
186
+ export async function releaseClaim(sessionId) {
187
+ if (!isRedisConfigured()) {
188
+ return false;
189
+ }
190
+ try {
191
+ const claimKey = `${WORK_CLAIM_PREFIX}${sessionId}`;
192
+ const deleted = await redisDel(claimKey);
193
+ return deleted > 0;
194
+ }
195
+ catch (error) {
196
+ log.error('Failed to release claim', { error, sessionId });
197
+ return false;
198
+ }
199
+ }
200
+ /**
201
+ * Check which worker has claimed a session
202
+ *
203
+ * @param sessionId - Session ID to check
204
+ * @returns Worker ID if claimed, null otherwise
205
+ */
206
+ export async function getClaimOwner(sessionId) {
207
+ if (!isRedisConfigured()) {
208
+ return null;
209
+ }
210
+ try {
211
+ const claimKey = `${WORK_CLAIM_PREFIX}${sessionId}`;
212
+ return await redisGet(claimKey);
213
+ }
214
+ catch (error) {
215
+ log.error('Failed to get claim owner', { error, sessionId });
216
+ return null;
217
+ }
218
+ }
219
+ /**
220
+ * Check if a session has an entry in the work queue.
221
+ * O(1) check via the work items hash.
222
+ *
223
+ * @param sessionId - Session ID to check
224
+ * @returns true if the session is present in the work queue
225
+ */
226
+ export async function isSessionInQueue(sessionId) {
227
+ if (!isRedisConfigured()) {
228
+ return false;
229
+ }
230
+ try {
231
+ const item = await redisHGet(WORK_ITEMS_KEY, sessionId);
232
+ return item !== null;
233
+ }
234
+ catch (error) {
235
+ log.error('Failed to check if session is in queue', { error, sessionId });
236
+ return false;
237
+ }
238
+ }
239
+ /**
240
+ * Re-queue work that failed or was abandoned
241
+ *
242
+ * @param work - Work item to re-queue
243
+ * @param priorityBoost - Decrease priority number (higher priority) by this amount
244
+ * @returns true if re-queued successfully
245
+ */
246
+ export async function requeueWork(work, priorityBoost = 1) {
247
+ if (!isRedisConfigured()) {
248
+ return false;
249
+ }
250
+ try {
251
+ // Release any existing claim
252
+ await releaseClaim(work.sessionId);
253
+ // Boost priority (lower number = higher priority)
254
+ const newPriority = Math.max(1, work.priority - priorityBoost);
255
+ // Re-queue with updated priority and timestamp
256
+ const updatedWork = {
257
+ ...work,
258
+ priority: newPriority,
259
+ queuedAt: Date.now(),
260
+ };
261
+ return await queueWork(updatedWork);
262
+ }
263
+ catch (error) {
264
+ log.error('Failed to requeue work', { error, sessionId: work.sessionId });
265
+ return false;
266
+ }
267
+ }
268
+ /**
269
+ * Get all pending work items (for dashboard/monitoring)
270
+ * Returns items sorted by priority
271
+ */
272
+ export async function getAllPendingWork() {
273
+ if (!isRedisConfigured()) {
274
+ return [];
275
+ }
276
+ try {
277
+ // Get all session IDs from priority queue
278
+ const sessionIds = await redisZRangeByScore(WORK_QUEUE_KEY, '-inf', '+inf');
279
+ if (sessionIds.length === 0) {
280
+ return [];
281
+ }
282
+ // Batch fetch all work items
283
+ const items = await redisHMGet(WORK_ITEMS_KEY, sessionIds);
284
+ const result = [];
285
+ for (const item of items) {
286
+ if (item) {
287
+ try {
288
+ result.push(JSON.parse(item));
289
+ }
290
+ catch {
291
+ // Skip invalid items
292
+ }
293
+ }
294
+ }
295
+ return result;
296
+ }
297
+ catch (error) {
298
+ log.error('Failed to get all pending work', { error });
299
+ return [];
300
+ }
301
+ }
302
+ /**
303
+ * Remove a work item from queue (without claiming)
304
+ * Used for cleanup operations
305
+ *
306
+ * @param sessionId - Session ID to remove
307
+ * @returns true if removed
308
+ */
309
+ export async function removeFromQueue(sessionId) {
310
+ if (!isRedisConfigured()) {
311
+ return false;
312
+ }
313
+ try {
314
+ // Remove from both data structures
315
+ await redisZRem(WORK_QUEUE_KEY, sessionId);
316
+ await redisHDel(WORK_ITEMS_KEY, sessionId);
317
+ return true;
318
+ }
319
+ catch (error) {
320
+ log.error('Failed to remove from queue', { error, sessionId });
321
+ return false;
322
+ }
323
+ }
324
+ /**
325
+ * Migrate data from legacy list-based queue to new sorted set/hash structure
326
+ * Run this once after deployment to migrate existing data
327
+ */
328
+ export async function migrateFromLegacyQueue() {
329
+ if (!isRedisConfigured()) {
330
+ return { migrated: 0, failed: 0 };
331
+ }
332
+ let migrated = 0;
333
+ let failed = 0;
334
+ try {
335
+ // Check if there's data in the legacy queue (same key, but was a list)
336
+ // Try to read as list first
337
+ const legacyItems = await redisLRange(WORK_QUEUE_KEY, 0, -1);
338
+ if (legacyItems.length === 0) {
339
+ log.info('No legacy queue data to migrate');
340
+ return { migrated: 0, failed: 0 };
341
+ }
342
+ log.info('Migrating legacy queue data', { itemCount: legacyItems.length });
343
+ for (const itemJson of legacyItems) {
344
+ try {
345
+ const work = JSON.parse(itemJson);
346
+ // Add to new data structures
347
+ const score = calculateScore(work.priority, work.queuedAt);
348
+ await redisHSet(WORK_ITEMS_KEY, work.sessionId, itemJson);
349
+ await redisZAdd(WORK_QUEUE_KEY, score, work.sessionId);
350
+ // Remove from legacy list
351
+ await redisLRem(WORK_QUEUE_KEY, 1, itemJson);
352
+ migrated++;
353
+ }
354
+ catch (err) {
355
+ log.warn('Failed to migrate work item', { error: err, itemJson });
356
+ failed++;
357
+ }
358
+ }
359
+ log.info('Legacy queue migration complete', { migrated, failed });
360
+ }
361
+ catch (error) {
362
+ // This might fail if the key doesn't exist as a list (already migrated)
363
+ log.debug('No legacy queue to migrate or already migrated', { error });
364
+ }
365
+ return { migrated, failed };
366
+ }
@@ -0,0 +1,100 @@
1
+ /**
2
+ * Worker Storage Module
3
+ *
4
+ * Manages worker registration and tracking in Redis.
5
+ * Workers register on startup, send periodic heartbeats,
6
+ * and deregister on shutdown.
7
+ */
8
+ /**
9
+ * Worker registration data stored in Redis
10
+ */
11
+ export interface WorkerData {
12
+ id: string;
13
+ hostname: string;
14
+ capacity: number;
15
+ activeCount: number;
16
+ registeredAt: number;
17
+ lastHeartbeat: number;
18
+ status: 'active' | 'draining' | 'offline';
19
+ version?: string;
20
+ }
21
+ /**
22
+ * Worker info returned to API consumers
23
+ */
24
+ export interface WorkerInfo extends WorkerData {
25
+ activeSessions: string[];
26
+ }
27
+ /**
28
+ * Register a new worker
29
+ *
30
+ * @param hostname - Worker's hostname
31
+ * @param capacity - Maximum concurrent agents the worker can handle
32
+ * @param version - Optional worker software version
33
+ * @returns Worker ID and configuration
34
+ */
35
+ export declare function registerWorker(hostname: string, capacity: number, version?: string): Promise<{
36
+ workerId: string;
37
+ heartbeatInterval: number;
38
+ pollInterval: number;
39
+ } | null>;
40
+ /**
41
+ * Update worker heartbeat
42
+ *
43
+ * @param workerId - Worker ID
44
+ * @param activeCount - Current number of active agents
45
+ * @param load - Optional system load metrics
46
+ * @returns Heartbeat acknowledgment or null on failure
47
+ */
48
+ export declare function updateHeartbeat(workerId: string, activeCount: number, load?: {
49
+ cpu: number;
50
+ memory: number;
51
+ }): Promise<{
52
+ acknowledged: boolean;
53
+ serverTime: string;
54
+ pendingWorkCount: number;
55
+ } | null>;
56
+ /**
57
+ * Get worker by ID
58
+ */
59
+ export declare function getWorker(workerId: string): Promise<WorkerInfo | null>;
60
+ /**
61
+ * Deregister a worker
62
+ *
63
+ * @param workerId - Worker ID to deregister
64
+ * @returns List of session IDs that need to be re-queued
65
+ */
66
+ export declare function deregisterWorker(workerId: string): Promise<{
67
+ deregistered: boolean;
68
+ unclaimedSessions: string[];
69
+ }>;
70
+ /**
71
+ * List all registered workers
72
+ */
73
+ export declare function listWorkers(): Promise<WorkerInfo[]>;
74
+ /**
75
+ * Get workers that have missed heartbeats (stale workers)
76
+ */
77
+ export declare function getStaleWorkers(): Promise<WorkerInfo[]>;
78
+ /**
79
+ * Add a session to a worker's active sessions
80
+ *
81
+ * @param workerId - Worker ID
82
+ * @param sessionId - Session ID being processed
83
+ */
84
+ export declare function addWorkerSession(workerId: string, sessionId: string): Promise<boolean>;
85
+ /**
86
+ * Remove a session from a worker's active sessions
87
+ *
88
+ * @param workerId - Worker ID
89
+ * @param sessionId - Session ID to remove
90
+ */
91
+ export declare function removeWorkerSession(workerId: string, sessionId: string): Promise<boolean>;
92
+ /**
93
+ * Get total capacity across all active workers
94
+ */
95
+ export declare function getTotalCapacity(): Promise<{
96
+ totalCapacity: number;
97
+ totalActive: number;
98
+ availableCapacity: number;
99
+ }>;
100
+ //# sourceMappingURL=worker-storage.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"worker-storage.d.ts","sourceRoot":"","sources":["../../src/worker-storage.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AA2CH;;GAEG;AACH,MAAM,WAAW,UAAU;IACzB,EAAE,EAAE,MAAM,CAAA;IACV,QAAQ,EAAE,MAAM,CAAA;IAChB,QAAQ,EAAE,MAAM,CAAA;IAChB,WAAW,EAAE,MAAM,CAAA;IACnB,YAAY,EAAE,MAAM,CAAA;IACpB,aAAa,EAAE,MAAM,CAAA;IACrB,MAAM,EAAE,QAAQ,GAAG,UAAU,GAAG,SAAS,CAAA;IACzC,OAAO,CAAC,EAAE,MAAM,CAAA;CACjB;AAED;;GAEG;AACH,MAAM,WAAW,UAAW,SAAQ,UAAU;IAC5C,cAAc,EAAE,MAAM,EAAE,CAAA;CACzB;AAED;;;;;;;GAOG;AACH,wBAAsB,cAAc,CAClC,QAAQ,EAAE,MAAM,EAChB,QAAQ,EAAE,MAAM,EAChB,OAAO,CAAC,EAAE,MAAM,GACf,OAAO,CAAC;IAAE,QAAQ,EAAE,MAAM,CAAC;IAAC,iBAAiB,EAAE,MAAM,CAAC;IAAC,YAAY,EAAE,MAAM,CAAA;CAAE,GAAG,IAAI,CAAC,CAuCvF;AAED;;;;;;;GAOG;AACH,wBAAsB,eAAe,CACnC,QAAQ,EAAE,MAAM,EAChB,WAAW,EAAE,MAAM,EACnB,IAAI,CAAC,EAAE;IAAE,GAAG,EAAE,MAAM,CAAC;IAAC,MAAM,EAAE,MAAM,CAAA;CAAE,GACrC,OAAO,CAAC;IAAE,YAAY,EAAE,OAAO,CAAC;IAAC,UAAU,EAAE,MAAM,CAAC;IAAC,gBAAgB,EAAE,MAAM,CAAA;CAAE,GAAG,IAAI,CAAC,CAsCzF;AAED;;GAEG;AACH,wBAAsB,SAAS,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC,UAAU,GAAG,IAAI,CAAC,CAyB5E;AAED;;;;;GAKG;AACH,wBAAsB,gBAAgB,CACpC,QAAQ,EAAE,MAAM,GACf,OAAO,CAAC;IAAE,YAAY,EAAE,OAAO,CAAC;IAAC,iBAAiB,EAAE,MAAM,EAAE,CAAA;CAAE,CAAC,CA6BjE;AAED;;GAEG;AACH,wBAAsB,WAAW,IAAI,OAAO,CAAC,UAAU,EAAE,CAAC,CAoCzD;AAED;;GAEG;AACH,wBAAsB,eAAe,IAAI,OAAO,CAAC,UAAU,EAAE,CAAC,CAY7D;AAED;;;;;GAKG;AACH,wBAAsB,gBAAgB,CACpC,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,OAAO,CAAC,CAalB;AAED;;;;;GAKG;AACH,wBAAsB,mBAAmB,CACvC,QAAQ,EAAE,MAAM,EAChB,SAAS,EAAE,MAAM,GAChB,OAAO,CAAC,OAAO,CAAC,CAalB;AAED;;GAEG;AACH,wBAAsB,gBAAgB,IAAI,OAAO,CAAC;IAChD,aAAa,EAAE,MAAM,CAAA;IACrB,WAAW,EAAE,MAAM,CAAA;IACnB,iBAAiB,EAAE,MAAM,CAAA;CAC1B,CAAC,CAqBD"}