@zintrust/workers 0.1.27
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +861 -0
- package/dist/AnomalyDetection.d.ts +102 -0
- package/dist/AnomalyDetection.js +321 -0
- package/dist/AutoScaler.d.ts +127 -0
- package/dist/AutoScaler.js +425 -0
- package/dist/BroadcastWorker.d.ts +21 -0
- package/dist/BroadcastWorker.js +24 -0
- package/dist/CanaryController.d.ts +103 -0
- package/dist/CanaryController.js +380 -0
- package/dist/ChaosEngineering.d.ts +79 -0
- package/dist/ChaosEngineering.js +216 -0
- package/dist/CircuitBreaker.d.ts +106 -0
- package/dist/CircuitBreaker.js +374 -0
- package/dist/ClusterLock.d.ts +90 -0
- package/dist/ClusterLock.js +385 -0
- package/dist/ComplianceManager.d.ts +177 -0
- package/dist/ComplianceManager.js +556 -0
- package/dist/DatacenterOrchestrator.d.ts +133 -0
- package/dist/DatacenterOrchestrator.js +404 -0
- package/dist/DeadLetterQueue.d.ts +122 -0
- package/dist/DeadLetterQueue.js +539 -0
- package/dist/HealthMonitor.d.ts +42 -0
- package/dist/HealthMonitor.js +301 -0
- package/dist/MultiQueueWorker.d.ts +89 -0
- package/dist/MultiQueueWorker.js +277 -0
- package/dist/NotificationWorker.d.ts +21 -0
- package/dist/NotificationWorker.js +23 -0
- package/dist/Observability.d.ts +153 -0
- package/dist/Observability.js +530 -0
- package/dist/PluginManager.d.ts +123 -0
- package/dist/PluginManager.js +392 -0
- package/dist/PriorityQueue.d.ts +117 -0
- package/dist/PriorityQueue.js +244 -0
- package/dist/ResourceMonitor.d.ts +164 -0
- package/dist/ResourceMonitor.js +605 -0
- package/dist/SLAMonitor.d.ts +110 -0
- package/dist/SLAMonitor.js +274 -0
- package/dist/WorkerFactory.d.ts +193 -0
- package/dist/WorkerFactory.js +1507 -0
- package/dist/WorkerInit.d.ts +85 -0
- package/dist/WorkerInit.js +223 -0
- package/dist/WorkerMetrics.d.ts +114 -0
- package/dist/WorkerMetrics.js +509 -0
- package/dist/WorkerRegistry.d.ts +145 -0
- package/dist/WorkerRegistry.js +319 -0
- package/dist/WorkerShutdown.d.ts +61 -0
- package/dist/WorkerShutdown.js +159 -0
- package/dist/WorkerVersioning.d.ts +107 -0
- package/dist/WorkerVersioning.js +300 -0
- package/dist/build-manifest.json +462 -0
- package/dist/config/workerConfig.d.ts +3 -0
- package/dist/config/workerConfig.js +19 -0
- package/dist/createQueueWorker.d.ts +23 -0
- package/dist/createQueueWorker.js +113 -0
- package/dist/dashboard/index.d.ts +1 -0
- package/dist/dashboard/index.js +1 -0
- package/dist/dashboard/types.d.ts +117 -0
- package/dist/dashboard/types.js +1 -0
- package/dist/dashboard/workers-api.d.ts +4 -0
- package/dist/dashboard/workers-api.js +638 -0
- package/dist/dashboard/workers-dashboard-ui.d.ts +3 -0
- package/dist/dashboard/workers-dashboard-ui.js +1026 -0
- package/dist/dashboard/workers-dashboard.d.ts +4 -0
- package/dist/dashboard/workers-dashboard.js +904 -0
- package/dist/helper/index.d.ts +5 -0
- package/dist/helper/index.js +10 -0
- package/dist/http/WorkerApiController.d.ts +38 -0
- package/dist/http/WorkerApiController.js +312 -0
- package/dist/http/WorkerController.d.ts +374 -0
- package/dist/http/WorkerController.js +1351 -0
- package/dist/http/middleware/CustomValidation.d.ts +92 -0
- package/dist/http/middleware/CustomValidation.js +270 -0
- package/dist/http/middleware/DatacenterValidator.d.ts +3 -0
- package/dist/http/middleware/DatacenterValidator.js +94 -0
- package/dist/http/middleware/EditWorkerValidation.d.ts +7 -0
- package/dist/http/middleware/EditWorkerValidation.js +55 -0
- package/dist/http/middleware/FeaturesValidator.d.ts +3 -0
- package/dist/http/middleware/FeaturesValidator.js +60 -0
- package/dist/http/middleware/InfrastructureValidator.d.ts +31 -0
- package/dist/http/middleware/InfrastructureValidator.js +226 -0
- package/dist/http/middleware/OptionsValidator.d.ts +3 -0
- package/dist/http/middleware/OptionsValidator.js +112 -0
- package/dist/http/middleware/PayloadSanitizer.d.ts +7 -0
- package/dist/http/middleware/PayloadSanitizer.js +42 -0
- package/dist/http/middleware/ProcessorPathSanitizer.d.ts +3 -0
- package/dist/http/middleware/ProcessorPathSanitizer.js +74 -0
- package/dist/http/middleware/QueueNameSanitizer.d.ts +3 -0
- package/dist/http/middleware/QueueNameSanitizer.js +45 -0
- package/dist/http/middleware/ValidateDriver.d.ts +7 -0
- package/dist/http/middleware/ValidateDriver.js +20 -0
- package/dist/http/middleware/VersionSanitizer.d.ts +3 -0
- package/dist/http/middleware/VersionSanitizer.js +25 -0
- package/dist/http/middleware/WorkerNameSanitizer.d.ts +3 -0
- package/dist/http/middleware/WorkerNameSanitizer.js +46 -0
- package/dist/http/middleware/WorkerValidationChain.d.ts +27 -0
- package/dist/http/middleware/WorkerValidationChain.js +185 -0
- package/dist/index.d.ts +46 -0
- package/dist/index.js +48 -0
- package/dist/routes/workers.d.ts +12 -0
- package/dist/routes/workers.js +81 -0
- package/dist/storage/WorkerStore.d.ts +45 -0
- package/dist/storage/WorkerStore.js +195 -0
- package/dist/type.d.ts +76 -0
- package/dist/type.js +1 -0
- package/dist/ui/router/ui.d.ts +3 -0
- package/dist/ui/router/ui.js +83 -0
- package/dist/ui/types/worker-ui.d.ts +229 -0
- package/dist/ui/types/worker-ui.js +5 -0
- package/package.json +53 -0
- package/src/AnomalyDetection.ts +434 -0
- package/src/AutoScaler.ts +654 -0
- package/src/BroadcastWorker.ts +34 -0
- package/src/CanaryController.ts +531 -0
- package/src/ChaosEngineering.ts +301 -0
- package/src/CircuitBreaker.ts +495 -0
- package/src/ClusterLock.ts +499 -0
- package/src/ComplianceManager.ts +815 -0
- package/src/DatacenterOrchestrator.ts +561 -0
- package/src/DeadLetterQueue.ts +733 -0
- package/src/HealthMonitor.ts +390 -0
- package/src/MultiQueueWorker.ts +431 -0
- package/src/NotificationWorker.ts +33 -0
- package/src/Observability.ts +696 -0
- package/src/PluginManager.ts +551 -0
- package/src/PriorityQueue.ts +351 -0
- package/src/ResourceMonitor.ts +769 -0
- package/src/SLAMonitor.ts +408 -0
- package/src/WorkerFactory.ts +2108 -0
- package/src/WorkerInit.ts +313 -0
- package/src/WorkerMetrics.ts +709 -0
- package/src/WorkerRegistry.ts +443 -0
- package/src/WorkerShutdown.ts +210 -0
- package/src/WorkerVersioning.ts +422 -0
- package/src/config/workerConfig.ts +25 -0
- package/src/createQueueWorker.ts +174 -0
- package/src/dashboard/index.ts +6 -0
- package/src/dashboard/types.ts +141 -0
- package/src/dashboard/workers-api.ts +785 -0
- package/src/dashboard/zintrust.svg +30 -0
- package/src/helper/index.ts +11 -0
- package/src/http/WorkerApiController.ts +369 -0
- package/src/http/WorkerController.ts +1512 -0
- package/src/http/middleware/CustomValidation.ts +360 -0
- package/src/http/middleware/DatacenterValidator.ts +124 -0
- package/src/http/middleware/EditWorkerValidation.ts +74 -0
- package/src/http/middleware/FeaturesValidator.ts +82 -0
- package/src/http/middleware/InfrastructureValidator.ts +295 -0
- package/src/http/middleware/OptionsValidator.ts +144 -0
- package/src/http/middleware/PayloadSanitizer.ts +52 -0
- package/src/http/middleware/ProcessorPathSanitizer.ts +86 -0
- package/src/http/middleware/QueueNameSanitizer.ts +55 -0
- package/src/http/middleware/ValidateDriver.ts +29 -0
- package/src/http/middleware/VersionSanitizer.ts +30 -0
- package/src/http/middleware/WorkerNameSanitizer.ts +56 -0
- package/src/http/middleware/WorkerValidationChain.ts +230 -0
- package/src/index.ts +98 -0
- package/src/routes/workers.ts +154 -0
- package/src/storage/WorkerStore.ts +240 -0
- package/src/type.ts +89 -0
- package/src/types/queue-monitor.d.ts +38 -0
- package/src/types/queue-redis.d.ts +38 -0
- package/src/ui/README.md +13 -0
- package/src/ui/components/JsonEditor.js +670 -0
- package/src/ui/components/JsonViewer.js +387 -0
- package/src/ui/components/WorkerCard.js +178 -0
- package/src/ui/components/WorkerExpandPanel.js +257 -0
- package/src/ui/components/fetcher.js +42 -0
- package/src/ui/components/sla-scorecard.js +32 -0
- package/src/ui/components/styles.css +30 -0
- package/src/ui/components/table-expander.js +34 -0
- package/src/ui/integration/worker-ui-integration.js +565 -0
- package/src/ui/router/ui.ts +99 -0
- package/src/ui/services/workerApi.js +240 -0
- package/src/ui/types/worker-ui.ts +283 -0
- package/src/ui/utils/jsonValidator.js +444 -0
- package/src/ui/workers/index.html +202 -0
- package/src/ui/workers/main.js +1781 -0
- package/src/ui/workers/styles.css +1350 -0
|
@@ -0,0 +1,385 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cluster Lock Manager
|
|
3
|
+
* Distributed locking using Redis for multi-instance worker coordination
|
|
4
|
+
* Sealed namespace for immutability
|
|
5
|
+
*/
|
|
6
|
+
import { ErrorFactory, Logger, createRedisConnection, generateUuid, } from '@zintrust/core';
|
|
7
|
+
// Generate unique instance ID for this process
|
|
8
|
+
const INSTANCE_ID = `worker-${process.pid}-${Date.now()}-${generateUuid()}`;
|
|
9
|
+
// Redis key prefixes
|
|
10
|
+
const LOCK_PREFIX = 'worker:lock:';
|
|
11
|
+
const AUDIT_PREFIX = 'worker:audit:lock:';
|
|
12
|
+
// Internal state
|
|
13
|
+
let redisClient = null;
|
|
14
|
+
let heartbeatInterval = null;
|
|
15
|
+
const activeLocks = new Map();
|
|
16
|
+
/**
|
|
17
|
+
* Helper: Get full Redis key for lock
|
|
18
|
+
*/
|
|
19
|
+
const getLockKey = (lockKey) => {
|
|
20
|
+
return `${LOCK_PREFIX}${lockKey}`;
|
|
21
|
+
};
|
|
22
|
+
/**
|
|
23
|
+
* Helper: Get full Redis key for audit log
|
|
24
|
+
*/
|
|
25
|
+
const getAuditKey = (lockKey) => {
|
|
26
|
+
return `${AUDIT_PREFIX}${lockKey}`;
|
|
27
|
+
};
|
|
28
|
+
/**
|
|
29
|
+
* Helper: Store audit log entry in Redis
|
|
30
|
+
*/
|
|
31
|
+
const auditLockOperation = async (client, entry) => {
|
|
32
|
+
try {
|
|
33
|
+
const auditKey = getAuditKey(entry.lockKey);
|
|
34
|
+
const auditData = JSON.stringify(entry);
|
|
35
|
+
// Store in sorted set with timestamp as score for easy retrieval
|
|
36
|
+
await client.zadd(auditKey, entry.timestamp.getTime(), auditData);
|
|
37
|
+
// Keep only last 1000 entries per lock
|
|
38
|
+
await client.zremrangebyrank(auditKey, 0, -1001);
|
|
39
|
+
// Expire audit logs after 30 days
|
|
40
|
+
await client.expire(auditKey, 30 * 24 * 60 * 60);
|
|
41
|
+
}
|
|
42
|
+
catch (error) {
|
|
43
|
+
Logger.error('Failed to write lock audit log', error);
|
|
44
|
+
// Don't throw - audit failure shouldn't break lock operations
|
|
45
|
+
}
|
|
46
|
+
};
|
|
47
|
+
/**
|
|
48
|
+
* Helper: Extend lock TTL
|
|
49
|
+
*/
|
|
50
|
+
const extendLockTTL = async (client, lockKey, ttl) => {
|
|
51
|
+
const redisKey = getLockKey(lockKey);
|
|
52
|
+
const value = await client.get(redisKey);
|
|
53
|
+
if (value === null || value !== INSTANCE_ID) {
|
|
54
|
+
return false; // Lock not held by this instance
|
|
55
|
+
}
|
|
56
|
+
const result = await client.expire(redisKey, ttl);
|
|
57
|
+
return result === 1;
|
|
58
|
+
};
|
|
59
|
+
/**
|
|
60
|
+
* Helper: Start heartbeat for lock extension
|
|
61
|
+
*/
|
|
62
|
+
const startHeartbeat = (client) => {
|
|
63
|
+
if (heartbeatInterval) {
|
|
64
|
+
return; // Already running
|
|
65
|
+
}
|
|
66
|
+
heartbeatInterval = setInterval(async () => {
|
|
67
|
+
const lockEntries = Array.from(activeLocks.entries());
|
|
68
|
+
await Promise.allSettled(lockEntries.map(async ([lockKey, info]) => {
|
|
69
|
+
try {
|
|
70
|
+
const now = new Date();
|
|
71
|
+
const timeUntilExpiry = info.expiresAt.getTime() - now.getTime();
|
|
72
|
+
// Extend if less than 30 seconds until expiry
|
|
73
|
+
if (timeUntilExpiry < 30000) {
|
|
74
|
+
const ttl = Math.ceil(timeUntilExpiry / 1000) + 60; // Extend by 60 more seconds
|
|
75
|
+
const extended = await extendLockTTL(client, lockKey, ttl);
|
|
76
|
+
if (extended) {
|
|
77
|
+
info.expiresAt = new Date(now.getTime() + ttl * 1000);
|
|
78
|
+
Logger.debug(`Extended lock "${lockKey}" TTL to ${ttl}s`);
|
|
79
|
+
await auditLockOperation(client, {
|
|
80
|
+
timestamp: now,
|
|
81
|
+
operation: 'extend',
|
|
82
|
+
lockKey,
|
|
83
|
+
instanceId: INSTANCE_ID,
|
|
84
|
+
success: true,
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
else {
|
|
88
|
+
// Lost the lock
|
|
89
|
+
activeLocks.delete(lockKey);
|
|
90
|
+
Logger.warn(`Lost lock "${lockKey}" - it was released or taken by another instance`);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
catch (error) {
|
|
95
|
+
Logger.error(`Failed to extend lock "${lockKey}"`, error);
|
|
96
|
+
}
|
|
97
|
+
}));
|
|
98
|
+
}, 10000); // Check every 10 seconds
|
|
99
|
+
Logger.debug('Lock heartbeat started');
|
|
100
|
+
};
|
|
101
|
+
/**
|
|
102
|
+
* Helper: Stop heartbeat
|
|
103
|
+
*/
|
|
104
|
+
const stopHeartbeat = () => {
|
|
105
|
+
if (heartbeatInterval) {
|
|
106
|
+
clearInterval(heartbeatInterval);
|
|
107
|
+
heartbeatInterval = null;
|
|
108
|
+
Logger.debug('Lock heartbeat stopped');
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
/**
|
|
112
|
+
* Cluster Lock Manager - Sealed namespace
|
|
113
|
+
*/
|
|
114
|
+
export const ClusterLock = Object.freeze({
|
|
115
|
+
/**
|
|
116
|
+
* Initialize the lock manager with Redis connection
|
|
117
|
+
*/
|
|
118
|
+
initialize(config) {
|
|
119
|
+
if (redisClient) {
|
|
120
|
+
Logger.warn('ClusterLock already initialized');
|
|
121
|
+
return;
|
|
122
|
+
}
|
|
123
|
+
redisClient = createRedisConnection(config);
|
|
124
|
+
startHeartbeat(redisClient);
|
|
125
|
+
Logger.info('ClusterLock initialized', { instanceId: INSTANCE_ID });
|
|
126
|
+
},
|
|
127
|
+
/**
|
|
128
|
+
* Acquire a distributed lock
|
|
129
|
+
*/
|
|
130
|
+
async acquire(options) {
|
|
131
|
+
if (!redisClient) {
|
|
132
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized. Call initialize() first.');
|
|
133
|
+
}
|
|
134
|
+
const { lockKey, ttl, region = 'default', userId } = options;
|
|
135
|
+
const redisKey = getLockKey(lockKey);
|
|
136
|
+
const now = new Date();
|
|
137
|
+
try {
|
|
138
|
+
// Try to acquire lock using SET NX EX (set if not exists with expiry)
|
|
139
|
+
const result = await redisClient.set(redisKey, INSTANCE_ID, 'EX', ttl, 'NX');
|
|
140
|
+
const success = result === 'OK';
|
|
141
|
+
if (success) {
|
|
142
|
+
const lockInfo = {
|
|
143
|
+
lockKey,
|
|
144
|
+
instanceId: INSTANCE_ID,
|
|
145
|
+
acquiredAt: now,
|
|
146
|
+
expiresAt: new Date(now.getTime() + ttl * 1000),
|
|
147
|
+
region,
|
|
148
|
+
userId,
|
|
149
|
+
};
|
|
150
|
+
activeLocks.set(lockKey, lockInfo);
|
|
151
|
+
Logger.info(`Acquired lock "${lockKey}"`, {
|
|
152
|
+
region,
|
|
153
|
+
userId,
|
|
154
|
+
ttl,
|
|
155
|
+
expiresAt: lockInfo.expiresAt.toISOString(),
|
|
156
|
+
});
|
|
157
|
+
await auditLockOperation(redisClient, {
|
|
158
|
+
timestamp: now,
|
|
159
|
+
operation: 'acquire',
|
|
160
|
+
lockKey,
|
|
161
|
+
instanceId: INSTANCE_ID,
|
|
162
|
+
userId,
|
|
163
|
+
success: true,
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
else {
|
|
167
|
+
Logger.debug(`Failed to acquire lock "${lockKey}" - already held by another instance`);
|
|
168
|
+
await auditLockOperation(redisClient, {
|
|
169
|
+
timestamp: now,
|
|
170
|
+
operation: 'acquire',
|
|
171
|
+
lockKey,
|
|
172
|
+
instanceId: INSTANCE_ID,
|
|
173
|
+
userId,
|
|
174
|
+
success: false,
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
return success;
|
|
178
|
+
}
|
|
179
|
+
catch (error) {
|
|
180
|
+
Logger.error(`Error acquiring lock "${lockKey}"`, error);
|
|
181
|
+
throw error;
|
|
182
|
+
}
|
|
183
|
+
},
|
|
184
|
+
/**
|
|
185
|
+
* Release a distributed lock
|
|
186
|
+
*/
|
|
187
|
+
async release(lockKey, userId) {
|
|
188
|
+
if (!redisClient) {
|
|
189
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized');
|
|
190
|
+
}
|
|
191
|
+
const redisKey = getLockKey(lockKey);
|
|
192
|
+
const now = new Date();
|
|
193
|
+
try {
|
|
194
|
+
// Only release if we own the lock
|
|
195
|
+
const value = await redisClient.get(redisKey);
|
|
196
|
+
if (value !== INSTANCE_ID) {
|
|
197
|
+
Logger.warn(`Cannot release lock "${lockKey}" - not owned by this instance`);
|
|
198
|
+
return false;
|
|
199
|
+
}
|
|
200
|
+
await redisClient.del(redisKey);
|
|
201
|
+
activeLocks.delete(lockKey);
|
|
202
|
+
Logger.info(`Released lock "${lockKey}"`, { userId });
|
|
203
|
+
await auditLockOperation(redisClient, {
|
|
204
|
+
timestamp: now,
|
|
205
|
+
operation: 'release',
|
|
206
|
+
lockKey,
|
|
207
|
+
instanceId: INSTANCE_ID,
|
|
208
|
+
userId,
|
|
209
|
+
success: true,
|
|
210
|
+
});
|
|
211
|
+
return true;
|
|
212
|
+
}
|
|
213
|
+
catch (error) {
|
|
214
|
+
Logger.error(`Error releasing lock "${lockKey}"`, error);
|
|
215
|
+
throw error;
|
|
216
|
+
}
|
|
217
|
+
},
|
|
218
|
+
/**
|
|
219
|
+
* Extend lock TTL
|
|
220
|
+
*/
|
|
221
|
+
async extend(lockKey, ttl) {
|
|
222
|
+
if (!redisClient) {
|
|
223
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized');
|
|
224
|
+
}
|
|
225
|
+
const extended = await extendLockTTL(redisClient, lockKey, ttl);
|
|
226
|
+
if (extended) {
|
|
227
|
+
const info = activeLocks.get(lockKey);
|
|
228
|
+
if (info) {
|
|
229
|
+
info.expiresAt = new Date(Date.now() + ttl * 1000);
|
|
230
|
+
}
|
|
231
|
+
Logger.debug(`Extended lock "${lockKey}" TTL to ${ttl}s`);
|
|
232
|
+
}
|
|
233
|
+
return extended;
|
|
234
|
+
},
|
|
235
|
+
/**
|
|
236
|
+
* Check if lock is held by this instance
|
|
237
|
+
*/
|
|
238
|
+
async isHeldByMe(lockKey) {
|
|
239
|
+
if (!redisClient) {
|
|
240
|
+
return false;
|
|
241
|
+
}
|
|
242
|
+
const redisKey = getLockKey(lockKey);
|
|
243
|
+
const value = await redisClient.get(redisKey);
|
|
244
|
+
return value === INSTANCE_ID;
|
|
245
|
+
},
|
|
246
|
+
/**
|
|
247
|
+
* Force release a lock (admin operation)
|
|
248
|
+
*/
|
|
249
|
+
async forceRelease(lockKey, userId, reason) {
|
|
250
|
+
if (!redisClient) {
|
|
251
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized');
|
|
252
|
+
}
|
|
253
|
+
const redisKey = getLockKey(lockKey);
|
|
254
|
+
const now = new Date();
|
|
255
|
+
try {
|
|
256
|
+
const currentOwner = await redisClient.get(redisKey);
|
|
257
|
+
if (currentOwner === null) {
|
|
258
|
+
Logger.warn(`Lock "${lockKey}" does not exist`);
|
|
259
|
+
return false;
|
|
260
|
+
}
|
|
261
|
+
await redisClient.del(redisKey);
|
|
262
|
+
// Remove from active locks if we owned it
|
|
263
|
+
if (currentOwner === INSTANCE_ID) {
|
|
264
|
+
activeLocks.delete(lockKey);
|
|
265
|
+
}
|
|
266
|
+
Logger.warn(`Force released lock "${lockKey}"`, {
|
|
267
|
+
userId,
|
|
268
|
+
reason,
|
|
269
|
+
previousOwner: currentOwner,
|
|
270
|
+
});
|
|
271
|
+
await auditLockOperation(redisClient, {
|
|
272
|
+
timestamp: now,
|
|
273
|
+
operation: 'force-release',
|
|
274
|
+
lockKey,
|
|
275
|
+
instanceId: currentOwner,
|
|
276
|
+
userId,
|
|
277
|
+
reason,
|
|
278
|
+
success: true,
|
|
279
|
+
});
|
|
280
|
+
return true;
|
|
281
|
+
}
|
|
282
|
+
catch (error) {
|
|
283
|
+
Logger.error(`Error force releasing lock "${lockKey}"`, error);
|
|
284
|
+
throw error;
|
|
285
|
+
}
|
|
286
|
+
},
|
|
287
|
+
/**
|
|
288
|
+
* List all locks
|
|
289
|
+
*/
|
|
290
|
+
async listLocks() {
|
|
291
|
+
if (!redisClient) {
|
|
292
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized');
|
|
293
|
+
}
|
|
294
|
+
try {
|
|
295
|
+
const pattern = `${LOCK_PREFIX}*`;
|
|
296
|
+
const keys = await redisClient.keys(pattern);
|
|
297
|
+
const locks = await Promise.all(keys.map(async (key) => {
|
|
298
|
+
const owner = await redisClient?.get(key);
|
|
299
|
+
const lockKey = key.replace(LOCK_PREFIX, '');
|
|
300
|
+
const info = activeLocks.get(lockKey);
|
|
301
|
+
return {
|
|
302
|
+
key: lockKey,
|
|
303
|
+
owner: owner ?? 'unknown',
|
|
304
|
+
region: info?.region,
|
|
305
|
+
};
|
|
306
|
+
}));
|
|
307
|
+
return locks;
|
|
308
|
+
}
|
|
309
|
+
catch (error) {
|
|
310
|
+
Logger.error('Error listing locks', error);
|
|
311
|
+
throw error;
|
|
312
|
+
}
|
|
313
|
+
},
|
|
314
|
+
/**
|
|
315
|
+
* Get lock owner
|
|
316
|
+
*/
|
|
317
|
+
async getLockOwner(lockKey) {
|
|
318
|
+
if (!redisClient) {
|
|
319
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized');
|
|
320
|
+
}
|
|
321
|
+
const redisKey = getLockKey(lockKey);
|
|
322
|
+
return redisClient.get(redisKey);
|
|
323
|
+
},
|
|
324
|
+
/**
|
|
325
|
+
* Get locks by region
|
|
326
|
+
*/
|
|
327
|
+
getLocksByRegion(region) {
|
|
328
|
+
const locks = [];
|
|
329
|
+
for (const info of activeLocks.values()) {
|
|
330
|
+
if (info.region === region) {
|
|
331
|
+
locks.push({ ...info });
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
return locks;
|
|
335
|
+
},
|
|
336
|
+
/**
|
|
337
|
+
* Get audit log for a lock
|
|
338
|
+
*/
|
|
339
|
+
async getAuditLog(lockKey, limit = 100) {
|
|
340
|
+
if (!redisClient) {
|
|
341
|
+
throw ErrorFactory.createGeneralError('ClusterLock not initialized');
|
|
342
|
+
}
|
|
343
|
+
try {
|
|
344
|
+
const auditKey = getAuditKey(lockKey);
|
|
345
|
+
// Get latest entries (highest scores = most recent timestamps)
|
|
346
|
+
const entries = await redisClient.zrevrange(auditKey, 0, limit - 1);
|
|
347
|
+
return entries.map((entry) => JSON.parse(entry));
|
|
348
|
+
}
|
|
349
|
+
catch (error) {
|
|
350
|
+
Logger.error(`Error retrieving audit log for "${lockKey}"`, error);
|
|
351
|
+
return [];
|
|
352
|
+
}
|
|
353
|
+
},
|
|
354
|
+
/**
|
|
355
|
+
* Get active locks held by this instance
|
|
356
|
+
*/
|
|
357
|
+
getActiveLocks() {
|
|
358
|
+
return Array.from(activeLocks.values()).map((info) => ({ ...info }));
|
|
359
|
+
},
|
|
360
|
+
/**
|
|
361
|
+
* Get instance ID
|
|
362
|
+
*/
|
|
363
|
+
getInstanceId() {
|
|
364
|
+
return INSTANCE_ID;
|
|
365
|
+
},
|
|
366
|
+
/**
|
|
367
|
+
* Shutdown and release all locks
|
|
368
|
+
*/
|
|
369
|
+
async shutdown() {
|
|
370
|
+
if (!redisClient) {
|
|
371
|
+
return;
|
|
372
|
+
}
|
|
373
|
+
Logger.info('ClusterLock shutting down...');
|
|
374
|
+
stopHeartbeat();
|
|
375
|
+
// Release all active locks
|
|
376
|
+
const releasePromises = Array.from(activeLocks.keys()).map(async (lockKey) => ClusterLock.release(lockKey, 'system-shutdown'));
|
|
377
|
+
await Promise.all(releasePromises);
|
|
378
|
+
if (redisClient !== null) {
|
|
379
|
+
await redisClient.quit();
|
|
380
|
+
redisClient = null;
|
|
381
|
+
}
|
|
382
|
+
Logger.info('ClusterLock shutdown complete');
|
|
383
|
+
},
|
|
384
|
+
});
|
|
385
|
+
// Graceful shutdown handled by WorkerShutdown
|
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Compliance Manager
|
|
3
|
+
* GDPR, HIPAA, and SOC2 compliance enforcement
|
|
4
|
+
* Sealed namespace for immutability
|
|
5
|
+
*/
|
|
6
|
+
import { type RedisConfig } from '@zintrust/core';
|
|
7
|
+
export type ComplianceStandard = 'gdpr' | 'hipaa' | 'soc2';
|
|
8
|
+
export type DataClassification = 'public' | 'internal' | 'confidential' | 'restricted';
|
|
9
|
+
export type ComplianceConfig = {
|
|
10
|
+
gdpr: {
|
|
11
|
+
enabled: boolean;
|
|
12
|
+
dataRetentionDays: number;
|
|
13
|
+
requireConsent: boolean;
|
|
14
|
+
enableRightToForgotten: boolean;
|
|
15
|
+
enableDataPortability: boolean;
|
|
16
|
+
enableAccessRequest: boolean;
|
|
17
|
+
};
|
|
18
|
+
hipaa: {
|
|
19
|
+
enabled: boolean;
|
|
20
|
+
requireEncryptionAtRest: boolean;
|
|
21
|
+
requireEncryptionInTransit: boolean;
|
|
22
|
+
auditRetentionYears: number;
|
|
23
|
+
requireAccessControls: boolean;
|
|
24
|
+
enableBreachNotification: boolean;
|
|
25
|
+
};
|
|
26
|
+
soc2: {
|
|
27
|
+
enabled: boolean;
|
|
28
|
+
requireChangeLogging: boolean;
|
|
29
|
+
requireAccessReviews: boolean;
|
|
30
|
+
accessReviewIntervalDays: number;
|
|
31
|
+
requireIncidentResponse: boolean;
|
|
32
|
+
requireDisasterRecovery: boolean;
|
|
33
|
+
};
|
|
34
|
+
};
|
|
35
|
+
export type DataSubject = {
|
|
36
|
+
id: string;
|
|
37
|
+
email?: string;
|
|
38
|
+
consentGiven: boolean;
|
|
39
|
+
consentDate?: Date;
|
|
40
|
+
consentWithdrawnDate?: Date;
|
|
41
|
+
dataClassification: DataClassification;
|
|
42
|
+
retentionPeriod?: number;
|
|
43
|
+
deletionScheduled?: Date;
|
|
44
|
+
};
|
|
45
|
+
export type ComplianceAuditLog = {
|
|
46
|
+
id: string;
|
|
47
|
+
timestamp: Date;
|
|
48
|
+
standard: ComplianceStandard;
|
|
49
|
+
action: string;
|
|
50
|
+
userId: string;
|
|
51
|
+
userRole?: string;
|
|
52
|
+
dataSubjectId?: string;
|
|
53
|
+
resourceId: string;
|
|
54
|
+
resourceType: string;
|
|
55
|
+
ipAddress?: string;
|
|
56
|
+
userAgent?: string;
|
|
57
|
+
changes?: Record<string, {
|
|
58
|
+
before: unknown;
|
|
59
|
+
after: unknown;
|
|
60
|
+
}>;
|
|
61
|
+
result: 'success' | 'failure' | 'blocked';
|
|
62
|
+
reason?: string;
|
|
63
|
+
severity: 'info' | 'warning' | 'critical';
|
|
64
|
+
};
|
|
65
|
+
export type AccessRequest = {
|
|
66
|
+
id: string;
|
|
67
|
+
dataSubjectId: string;
|
|
68
|
+
requestType: 'access' | 'deletion' | 'portability' | 'rectification';
|
|
69
|
+
requestDate: Date;
|
|
70
|
+
status: 'pending' | 'approved' | 'rejected' | 'completed';
|
|
71
|
+
requestedBy: string;
|
|
72
|
+
approvedBy?: string;
|
|
73
|
+
completedBy?: string;
|
|
74
|
+
completedDate?: Date;
|
|
75
|
+
reason?: string;
|
|
76
|
+
dataExport?: string;
|
|
77
|
+
};
|
|
78
|
+
export type EncryptionMetadata = {
|
|
79
|
+
algorithm: string;
|
|
80
|
+
keyId: string;
|
|
81
|
+
encryptedAt: Date;
|
|
82
|
+
encryptedBy: string;
|
|
83
|
+
};
|
|
84
|
+
export type ComplianceViolation = {
|
|
85
|
+
id: string;
|
|
86
|
+
timestamp: Date;
|
|
87
|
+
standard: ComplianceStandard;
|
|
88
|
+
violationType: string;
|
|
89
|
+
severity: 'low' | 'medium' | 'high' | 'critical';
|
|
90
|
+
description: string;
|
|
91
|
+
affectedResources: string[];
|
|
92
|
+
remediation: string;
|
|
93
|
+
status: 'open' | 'in-progress' | 'resolved' | 'accepted-risk';
|
|
94
|
+
};
|
|
95
|
+
/**
|
|
96
|
+
* Compliance Manager - Sealed namespace
|
|
97
|
+
*/
|
|
98
|
+
export declare const ComplianceManager: Readonly<{
|
|
99
|
+
/**
|
|
100
|
+
* Initialize compliance manager
|
|
101
|
+
*/
|
|
102
|
+
initialize(redisConfig: RedisConfig, config?: Partial<ComplianceConfig>): void;
|
|
103
|
+
/**
|
|
104
|
+
* Register data subject
|
|
105
|
+
*/
|
|
106
|
+
registerDataSubject(subject: DataSubject): Promise<void>;
|
|
107
|
+
/**
|
|
108
|
+
* Record consent
|
|
109
|
+
*/
|
|
110
|
+
recordConsent(dataSubjectId: string, consentGiven: boolean, userId: string): Promise<void>;
|
|
111
|
+
/**
|
|
112
|
+
* Check if action is compliant
|
|
113
|
+
*/
|
|
114
|
+
checkCompliance(action: string, userId: string, dataSubjectId?: string, resourceId?: string): Promise<{
|
|
115
|
+
compliant: boolean;
|
|
116
|
+
violations: string[];
|
|
117
|
+
}>;
|
|
118
|
+
/**
|
|
119
|
+
* Create access request (GDPR Right to Access, Deletion, etc.)
|
|
120
|
+
*/
|
|
121
|
+
createAccessRequest(request: Omit<AccessRequest, "id" | "requestDate" | "status">): Promise<string>;
|
|
122
|
+
/**
|
|
123
|
+
* Process access request
|
|
124
|
+
*/
|
|
125
|
+
processAccessRequest(requestId: string, status: AccessRequest["status"], processedBy: string): Promise<void>;
|
|
126
|
+
/**
|
|
127
|
+
* Encrypt sensitive data (HIPAA compliance)
|
|
128
|
+
*/
|
|
129
|
+
encryptSensitiveData(data: string, userId: string, keyId?: string): {
|
|
130
|
+
encrypted: string;
|
|
131
|
+
metadata: EncryptionMetadata;
|
|
132
|
+
};
|
|
133
|
+
/**
|
|
134
|
+
* Decrypt sensitive data
|
|
135
|
+
*/
|
|
136
|
+
decryptSensitiveData(encryptedPackage: string, userId: string): string;
|
|
137
|
+
/**
|
|
138
|
+
* Record compliance violation
|
|
139
|
+
*/
|
|
140
|
+
recordViolation(violation: Omit<ComplianceViolation, "id" | "timestamp">): Promise<string>;
|
|
141
|
+
/**
|
|
142
|
+
* Get audit logs
|
|
143
|
+
*/
|
|
144
|
+
getAuditLogs(standard: ComplianceStandard, startDate?: Date, endDate?: Date, limit?: number): Promise<ReadonlyArray<ComplianceAuditLog>>;
|
|
145
|
+
/**
|
|
146
|
+
* Get compliance summary
|
|
147
|
+
*/
|
|
148
|
+
getComplianceSummary(): Promise<{
|
|
149
|
+
gdpr: {
|
|
150
|
+
enabled: boolean;
|
|
151
|
+
dataSubjects: number;
|
|
152
|
+
pendingRequests: number;
|
|
153
|
+
};
|
|
154
|
+
hipaa: {
|
|
155
|
+
enabled: boolean;
|
|
156
|
+
encryptedResources: number;
|
|
157
|
+
auditLogRetention: string;
|
|
158
|
+
};
|
|
159
|
+
soc2: {
|
|
160
|
+
enabled: boolean;
|
|
161
|
+
violations: number;
|
|
162
|
+
lastAccessReview?: Date;
|
|
163
|
+
};
|
|
164
|
+
}>;
|
|
165
|
+
/**
|
|
166
|
+
* Get configuration
|
|
167
|
+
*/
|
|
168
|
+
getConfig(): ComplianceConfig | null;
|
|
169
|
+
/**
|
|
170
|
+
* Update configuration
|
|
171
|
+
*/
|
|
172
|
+
updateConfig(config: Partial<ComplianceConfig>): void;
|
|
173
|
+
/**
|
|
174
|
+
* Shutdown
|
|
175
|
+
*/
|
|
176
|
+
shutdown(): Promise<void>;
|
|
177
|
+
}>;
|