@cloud-copilot/iam-lens 0.1.107 → 0.1.109
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cjs/index.d.ts +2 -0
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +3 -1
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/utils/bitset.js +3 -3
- package/dist/cjs/utils/bitset.js.map +1 -1
- package/dist/cjs/whoCan/WhoCanMainThreadWorker.d.ts +65 -3
- package/dist/cjs/whoCan/WhoCanMainThreadWorker.d.ts.map +1 -1
- package/dist/cjs/whoCan/WhoCanMainThreadWorker.js +52 -31
- package/dist/cjs/whoCan/WhoCanMainThreadWorker.js.map +1 -1
- package/dist/cjs/whoCan/WhoCanProcessor.d.ts +371 -0
- package/dist/cjs/whoCan/WhoCanProcessor.d.ts.map +1 -0
- package/dist/cjs/whoCan/WhoCanProcessor.js +980 -0
- package/dist/cjs/whoCan/WhoCanProcessor.js.map +1 -0
- package/dist/cjs/whoCan/WhoCanWorker.d.ts +2 -0
- package/dist/cjs/whoCan/WhoCanWorker.d.ts.map +1 -1
- package/dist/cjs/whoCan/WhoCanWorker.js.map +1 -1
- package/dist/cjs/whoCan/WhoCanWorkerThreadWorker.js +99 -80
- package/dist/cjs/whoCan/WhoCanWorkerThreadWorker.js.map +1 -1
- package/dist/cjs/whoCan/principalArnFilter.d.ts +84 -0
- package/dist/cjs/whoCan/principalArnFilter.d.ts.map +1 -0
- package/dist/cjs/whoCan/principalArnFilter.js +256 -0
- package/dist/cjs/whoCan/principalArnFilter.js.map +1 -0
- package/dist/cjs/whoCan/untrustingActions.d.ts +7 -0
- package/dist/cjs/whoCan/untrustingActions.d.ts.map +1 -0
- package/dist/cjs/whoCan/untrustingActions.js +30 -0
- package/dist/cjs/whoCan/untrustingActions.js.map +1 -0
- package/dist/cjs/whoCan/whoCan.d.ts +35 -2
- package/dist/cjs/whoCan/whoCan.d.ts.map +1 -1
- package/dist/cjs/whoCan/whoCan.js +277 -233
- package/dist/cjs/whoCan/whoCan.js.map +1 -1
- package/dist/esm/index.d.ts +2 -0
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +2 -0
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/utils/bitset.js +3 -3
- package/dist/esm/utils/bitset.js.map +1 -1
- package/dist/esm/whoCan/WhoCanMainThreadWorker.d.ts +65 -3
- package/dist/esm/whoCan/WhoCanMainThreadWorker.d.ts.map +1 -1
- package/dist/esm/whoCan/WhoCanMainThreadWorker.js +53 -34
- package/dist/esm/whoCan/WhoCanMainThreadWorker.js.map +1 -1
- package/dist/esm/whoCan/WhoCanProcessor.d.ts +371 -0
- package/dist/esm/whoCan/WhoCanProcessor.d.ts.map +1 -0
- package/dist/esm/whoCan/WhoCanProcessor.js +970 -0
- package/dist/esm/whoCan/WhoCanProcessor.js.map +1 -0
- package/dist/esm/whoCan/WhoCanWorker.d.ts +2 -0
- package/dist/esm/whoCan/WhoCanWorker.d.ts.map +1 -1
- package/dist/esm/whoCan/WhoCanWorker.js.map +1 -1
- package/dist/esm/whoCan/WhoCanWorkerThreadWorker.js +102 -81
- package/dist/esm/whoCan/WhoCanWorkerThreadWorker.js.map +1 -1
- package/dist/esm/whoCan/principalArnFilter.d.ts +84 -0
- package/dist/esm/whoCan/principalArnFilter.d.ts.map +1 -0
- package/dist/esm/whoCan/principalArnFilter.js +251 -0
- package/dist/esm/whoCan/principalArnFilter.js.map +1 -0
- package/dist/esm/whoCan/untrustingActions.d.ts +7 -0
- package/dist/esm/whoCan/untrustingActions.d.ts.map +1 -0
- package/dist/esm/whoCan/untrustingActions.js +27 -0
- package/dist/esm/whoCan/untrustingActions.js.map +1 -0
- package/dist/esm/whoCan/whoCan.d.ts +35 -2
- package/dist/esm/whoCan/whoCan.d.ts.map +1 -1
- package/dist/esm/whoCan/whoCan.js +278 -237
- package/dist/esm/whoCan/whoCan.js.map +1 -1
- package/package.json +3 -3
|
@@ -0,0 +1,980 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.WhoCanProcessor = void 0;
|
|
4
|
+
const job_1 = require("@cloud-copilot/job");
|
|
5
|
+
const worker_threads_1 = require("worker_threads");
|
|
6
|
+
const collect_js_1 = require("../collect/collect.js");
|
|
7
|
+
const resources_js_1 = require("../resources.js");
|
|
8
|
+
const arn_js_1 = require("../utils/arn.js");
|
|
9
|
+
const workerScript_js_1 = require("../utils/workerScript.js");
|
|
10
|
+
const SharedArrayBufferMainCache_js_1 = require("../workers/SharedArrayBufferMainCache.js");
|
|
11
|
+
const StreamingWorkQueue_js_1 = require("../workers/StreamingWorkQueue.js");
|
|
12
|
+
const WhoCanMainThreadWorker_js_1 = require("./WhoCanMainThreadWorker.js");
|
|
13
|
+
const principalScope_js_1 = require("./principalScope.js");
|
|
14
|
+
const whoCan_js_1 = require("./whoCan.js");
|
|
15
|
+
const iam_utils_1 = require("@cloud-copilot/iam-utils");
|
|
16
|
+
const principalArnFilter_js_1 = require("./principalArnFilter.js");
|
|
17
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
18
|
+
// Helpers
|
|
19
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
20
|
+
let nextRequestId = 0;
|
|
21
|
+
/**
|
|
22
|
+
* Generates a unique request ID for a new request.
|
|
23
|
+
*
|
|
24
|
+
* @returns a unique string ID
|
|
25
|
+
*/
|
|
26
|
+
function generateRequestId() {
|
|
27
|
+
return `req-${++nextRequestId}`;
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Get the number of worker threads to use, defaulting to number of CPUs - 1.
|
|
31
|
+
*
|
|
32
|
+
* @param overrideValue the override value, if any
|
|
33
|
+
* @returns the override value if provided, otherwise number of CPUs - 1
|
|
34
|
+
*/
|
|
35
|
+
function getNumberOfWorkers(overrideValue) {
|
|
36
|
+
if (typeof overrideValue === 'number' && overrideValue >= 0) {
|
|
37
|
+
return Math.floor(overrideValue);
|
|
38
|
+
}
|
|
39
|
+
return Math.max(0, (0, job_1.numberOfCpus)() - 1);
|
|
40
|
+
}
|
|
41
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
42
|
+
// Processor
|
|
43
|
+
// ──────────────────────────────────────────────────────────────────────────────
|
|
44
|
+
/**
|
|
45
|
+
* A queue-first bulk processor that accepts many whoCan requests, expands
|
|
46
|
+
* scenarios on the main thread, and feeds a shared simulation scheduler used
|
|
47
|
+
* by worker threads and an optional main-thread runner.
|
|
48
|
+
*
|
|
49
|
+
* Results are delivered through the {@link WhoCanProcessorConfig.onRequestSettled}
|
|
50
|
+
* callback as each request completes — they are not stored inside the processor.
|
|
51
|
+
*
|
|
52
|
+
* Use {@link enqueueWhoCan} to submit requests, then {@link waitForIdle} to
|
|
53
|
+
* wait for all work to complete. Call {@link shutdown} when done to terminate
|
|
54
|
+
* worker threads.
|
|
55
|
+
*/
|
|
56
|
+
class WhoCanProcessor {
|
|
57
|
+
workers;
|
|
58
|
+
collectClient;
|
|
59
|
+
config;
|
|
60
|
+
isShutdown = false;
|
|
61
|
+
workersDead = false;
|
|
62
|
+
// Admission state
|
|
63
|
+
pendingRequests = [];
|
|
64
|
+
activeRequestOrder = [];
|
|
65
|
+
requestStates = new Map();
|
|
66
|
+
admissionPumpRunning = false;
|
|
67
|
+
draining = false;
|
|
68
|
+
// Preparation queue
|
|
69
|
+
preparationQueue;
|
|
70
|
+
// Idle / drain tracking
|
|
71
|
+
idleWaiters = [];
|
|
72
|
+
settledCallbackErrors = [];
|
|
73
|
+
// Main thread simulation runner
|
|
74
|
+
mainThreadWorker;
|
|
75
|
+
// Processor-fatal error
|
|
76
|
+
fatalError;
|
|
77
|
+
// Tracks a single in-progress shutdown so repeated calls are safe
|
|
78
|
+
shutdownPromise;
|
|
79
|
+
constructor(workers, collectClient, config, preparationQueue) {
|
|
80
|
+
this.workers = workers;
|
|
81
|
+
this.collectClient = collectClient;
|
|
82
|
+
this.config = config;
|
|
83
|
+
this.preparationQueue = preparationQueue;
|
|
84
|
+
}
|
|
85
|
+
/**
|
|
86
|
+
* Creates a new WhoCanProcessor with worker threads, a shared cache, and
|
|
87
|
+
* lifetime-scoped message routing. The processor is ready to accept requests
|
|
88
|
+
* immediately after creation.
|
|
89
|
+
*
|
|
90
|
+
* @param config - The configuration for the processor, including collect configs,
|
|
91
|
+
* partition, simulation options, tuning, and the onRequestSettled callback.
|
|
92
|
+
* @returns a new WhoCanProcessor instance
|
|
93
|
+
*/
|
|
94
|
+
static async create(config) {
|
|
95
|
+
const numWorkers = getNumberOfWorkers(config.tuning?.workerThreads);
|
|
96
|
+
const perWorkerConcurrency = config.tuning?.perWorkerConcurrency ?? 50;
|
|
97
|
+
const workerPath = (0, workerScript_js_1.getWorkerScriptPath)('whoCan/WhoCanWorkerThreadWorker.js');
|
|
98
|
+
const workers = !workerPath
|
|
99
|
+
? []
|
|
100
|
+
: new Array(numWorkers).fill(undefined).map(() => {
|
|
101
|
+
return new worker_threads_1.Worker(workerPath, {
|
|
102
|
+
workerData: {
|
|
103
|
+
collectConfigs: config.collectConfigs,
|
|
104
|
+
partition: config.partition,
|
|
105
|
+
concurrency: perWorkerConcurrency,
|
|
106
|
+
s3AbacOverride: config.s3AbacOverride,
|
|
107
|
+
collectGrantDetails: config.collectGrantDetails,
|
|
108
|
+
clientFactoryPlugin: config.clientFactoryPlugin
|
|
109
|
+
}
|
|
110
|
+
});
|
|
111
|
+
});
|
|
112
|
+
const collectClient = await (0, collect_js_1.getCollectClient)(config.collectConfigs, config.partition, {
|
|
113
|
+
cacheProvider: new SharedArrayBufferMainCache_js_1.SharedArrayBufferMainCache(workers),
|
|
114
|
+
clientFactoryPlugin: config.clientFactoryPlugin
|
|
115
|
+
});
|
|
116
|
+
const preparationConcurrency = config.tuning?.preparationConcurrency ?? Math.min(50, Math.max(1, (0, job_1.numberOfCpus)() * 2));
|
|
117
|
+
const preparationQueue = new job_1.StreamingJobQueue(preparationConcurrency, console, async () => { });
|
|
118
|
+
const processor = new WhoCanProcessor(workers, collectClient, config, preparationQueue);
|
|
119
|
+
processor.installLifetimeWorkerListeners();
|
|
120
|
+
processor.createMainThreadRunner();
|
|
121
|
+
return processor;
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* Enqueues a whoCan request for processing. Returns a unique request ID
|
|
125
|
+
* that will appear in the corresponding {@link WhoCanSettledEvent}.
|
|
126
|
+
*
|
|
127
|
+
* This method never activates a request directly — it appends to
|
|
128
|
+
* pendingRequests and signals the admission pump.
|
|
129
|
+
*
|
|
130
|
+
* @param request - The whoCan request parameters.
|
|
131
|
+
* @returns the unique request ID assigned to this request.
|
|
132
|
+
* @throws if the processor is shut down or draining via waitForIdle.
|
|
133
|
+
*/
|
|
134
|
+
enqueueWhoCan(request) {
|
|
135
|
+
if (this.isShutdown) {
|
|
136
|
+
throw new Error('WhoCanProcessor has been shut down');
|
|
137
|
+
}
|
|
138
|
+
if (this.draining) {
|
|
139
|
+
throw new Error('Cannot enqueue while draining — waitForIdle() is in progress');
|
|
140
|
+
}
|
|
141
|
+
const requestId = generateRequestId();
|
|
142
|
+
this.pendingRequests.push({ requestId, request });
|
|
143
|
+
this.wakeAdmissionPump();
|
|
144
|
+
return requestId;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* Returns a promise that resolves when all pending and active work has
|
|
148
|
+
* completed and all onRequestSettled callbacks have finished.
|
|
149
|
+
*
|
|
150
|
+
* While draining, new calls to {@link enqueueWhoCan} will throw. Once
|
|
151
|
+
* the drain completes, the processor re-opens for new enqueues.
|
|
152
|
+
*
|
|
153
|
+
* @returns a promise that resolves when idle, or rejects if a worker crashes
|
|
154
|
+
* or an onRequestSettled callback throws/rejects.
|
|
155
|
+
*/
|
|
156
|
+
async waitForIdle() {
|
|
157
|
+
if (this.isShutdown) {
|
|
158
|
+
throw new Error('WhoCanProcessor has been shut down');
|
|
159
|
+
}
|
|
160
|
+
// If already idle, return immediately
|
|
161
|
+
if (this.isIdle()) {
|
|
162
|
+
this.rejectIfSettledCallbackErrors();
|
|
163
|
+
return;
|
|
164
|
+
}
|
|
165
|
+
this.draining = true;
|
|
166
|
+
try {
|
|
167
|
+
await new Promise((resolve, reject) => {
|
|
168
|
+
this.idleWaiters.push({ resolve, reject });
|
|
169
|
+
});
|
|
170
|
+
this.rejectIfSettledCallbackErrors();
|
|
171
|
+
}
|
|
172
|
+
finally {
|
|
173
|
+
// Only clear draining when the last waiter has been notified
|
|
174
|
+
if (this.idleWaiters.length === 0) {
|
|
175
|
+
this.draining = false;
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
/**
|
|
180
|
+
* Shuts down the processor by rejecting all pending requests, waiting for
|
|
181
|
+
* active requests to settle, and terminating all worker threads.
|
|
182
|
+
*
|
|
183
|
+
* This method is idempotent — calling it multiple times is safe.
|
|
184
|
+
*/
|
|
185
|
+
async shutdown() {
|
|
186
|
+
// If already shutting down or shut down, return the existing promise
|
|
187
|
+
if (this.shutdownPromise) {
|
|
188
|
+
return this.shutdownPromise;
|
|
189
|
+
}
|
|
190
|
+
this.shutdownPromise = this.executeShutdown();
|
|
191
|
+
return this.shutdownPromise;
|
|
192
|
+
}
|
|
193
|
+
/**
|
|
194
|
+
* Internal shutdown implementation. Rejects pending requests, waits for
|
|
195
|
+
* active requests to drain, then terminates workers.
|
|
196
|
+
*/
|
|
197
|
+
async executeShutdown() {
|
|
198
|
+
this.isShutdown = true;
|
|
199
|
+
// Reject all pending requests that haven't been admitted
|
|
200
|
+
while (this.pendingRequests.length > 0) {
|
|
201
|
+
const submitted = this.pendingRequests.shift();
|
|
202
|
+
try {
|
|
203
|
+
await this.config.onRequestSettled({
|
|
204
|
+
status: 'rejected',
|
|
205
|
+
requestId: submitted.requestId,
|
|
206
|
+
request: submitted.request,
|
|
207
|
+
error: new Error('WhoCanProcessor was shut down before this request was processed')
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
catch (err) {
|
|
211
|
+
this.settledCallbackErrors.push(err instanceof Error ? err : new Error(String(err)));
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
// Wait for active requests to finish naturally (includes draining in-flight work)
|
|
215
|
+
if (this.activeRequestOrder.length > 0) {
|
|
216
|
+
await new Promise((resolve) => {
|
|
217
|
+
if (this.activeRequestOrder.length === 0) {
|
|
218
|
+
resolve();
|
|
219
|
+
}
|
|
220
|
+
else {
|
|
221
|
+
this.idleWaiters.push({ resolve, reject: () => resolve() });
|
|
222
|
+
}
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
if (this.workersDead) {
|
|
226
|
+
return;
|
|
227
|
+
}
|
|
228
|
+
// Drain main thread worker
|
|
229
|
+
if (this.mainThreadWorker) {
|
|
230
|
+
await this.mainThreadWorker.finishAllWork();
|
|
231
|
+
this.mainThreadWorker = undefined;
|
|
232
|
+
}
|
|
233
|
+
// Gracefully shut down workers
|
|
234
|
+
const workerPromises = this.workers.map((worker) => {
|
|
235
|
+
return new Promise((resolve) => {
|
|
236
|
+
worker.on('message', (msg) => {
|
|
237
|
+
if (msg.type === 'finished') {
|
|
238
|
+
worker.terminate().then(() => resolve());
|
|
239
|
+
}
|
|
240
|
+
});
|
|
241
|
+
worker.on('error', () => {
|
|
242
|
+
worker
|
|
243
|
+
.terminate()
|
|
244
|
+
.then(() => resolve())
|
|
245
|
+
.catch(() => resolve());
|
|
246
|
+
});
|
|
247
|
+
worker.postMessage({ type: 'finishWork' });
|
|
248
|
+
});
|
|
249
|
+
});
|
|
250
|
+
await Promise.all(workerPromises);
|
|
251
|
+
this.workersDead = true;
|
|
252
|
+
}
|
|
253
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
254
|
+
// Lifetime worker listeners
|
|
255
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
256
|
+
/**
|
|
257
|
+
* Installs lifetime-scoped message, error, and exit listeners on all workers.
|
|
258
|
+
* Message listeners route simulation results and deny-detail checks to the
|
|
259
|
+
* correct request state using requestId. Error/exit listeners detect crashes
|
|
260
|
+
* and mark the processor as fatally failed.
|
|
261
|
+
*/
|
|
262
|
+
installLifetimeWorkerListeners() {
|
|
263
|
+
for (const worker of this.workers) {
|
|
264
|
+
worker.on('message', (msg) => {
|
|
265
|
+
this.handleWorkerMessage(msg, worker);
|
|
266
|
+
});
|
|
267
|
+
worker.on('error', (err) => {
|
|
268
|
+
if (!this.isShutdown) {
|
|
269
|
+
this.handleWorkerFailure(new Error(`Worker error: ${err.message}`));
|
|
270
|
+
}
|
|
271
|
+
});
|
|
272
|
+
worker.on('exit', (code) => {
|
|
273
|
+
if (!this.isShutdown && code !== 0) {
|
|
274
|
+
this.handleWorkerFailure(new Error(`Worker exited unexpectedly with code ${code}`));
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
/**
|
|
280
|
+
* Routes a message from a worker thread to the appropriate handler based
|
|
281
|
+
* on message type and requestId.
|
|
282
|
+
*
|
|
283
|
+
* @param msg - The message received from the worker.
|
|
284
|
+
* @param worker - The worker that sent the message.
|
|
285
|
+
*/
|
|
286
|
+
handleWorkerMessage(msg, worker) {
|
|
287
|
+
if (msg.type === 'requestTask') {
|
|
288
|
+
const task = this.dequeueNextScenario();
|
|
289
|
+
worker.postMessage({ type: 'task', workerId: msg.workerId, task });
|
|
290
|
+
}
|
|
291
|
+
else if (msg.type === 'result') {
|
|
292
|
+
this.handleSimulationResult(msg.requestId, msg.result, !!msg.denyDetailsCheckWillFollow);
|
|
293
|
+
}
|
|
294
|
+
else if (msg.type === 'checkDenyDetails') {
|
|
295
|
+
this.handleCheckDenyDetails(msg.requestId, msg.checkId, msg.lightAnalysis, worker);
|
|
296
|
+
}
|
|
297
|
+
else if (msg.type === 'denyDetailsResult') {
|
|
298
|
+
this.handleDenyDetailsResult(msg.requestId, msg.denyDetail);
|
|
299
|
+
}
|
|
300
|
+
}
|
|
301
|
+
/**
|
|
302
|
+
* Creates the main-thread simulation runner if mainThreadConcurrency > 0.
|
|
303
|
+
* The runner pulls from the FIFO scheduler and routes results by requestId.
|
|
304
|
+
*/
|
|
305
|
+
createMainThreadRunner() {
|
|
306
|
+
const mainThreadConcurrency = this.config.tuning?.mainThreadConcurrency ?? 50;
|
|
307
|
+
if (mainThreadConcurrency <= 0) {
|
|
308
|
+
return;
|
|
309
|
+
}
|
|
310
|
+
const { collectGrantDetails, s3AbacOverride } = this.config;
|
|
311
|
+
this.mainThreadWorker = (0, WhoCanMainThreadWorker_js_1.createMainThreadStreamingWorkQueue)(() => this.dequeueNextScenario(), (requestId, result) => this.handleSimulationResult(requestId, result), (requestId, lightAnalysis) => {
|
|
312
|
+
const state = this.requestStates.get(requestId);
|
|
313
|
+
if (state && !state.settled) {
|
|
314
|
+
return state.denyDetailsCallback?.(lightAnalysis) ?? false;
|
|
315
|
+
}
|
|
316
|
+
return false;
|
|
317
|
+
}, (requestId, detail) => this.handleDenyDetailsResult(requestId, detail), this.collectClient, s3AbacOverride, collectGrantDetails ?? false, mainThreadConcurrency);
|
|
318
|
+
}
|
|
319
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
320
|
+
// FIFO queue-of-queues scheduler
|
|
321
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
322
|
+
/**
|
|
323
|
+
* Dequeues the next simulation scenario using FIFO request priority.
|
|
324
|
+
* Prefers the oldest active request that has ready scenarios. If the oldest
|
|
325
|
+
* is temporarily empty (still preparing), falls back to the next request
|
|
326
|
+
* with ready scenarios so cores do not idle.
|
|
327
|
+
*
|
|
328
|
+
* @returns the next work item, or undefined if no scenarios are ready.
|
|
329
|
+
*/
|
|
330
|
+
dequeueNextScenario() {
|
|
331
|
+
for (const requestId of this.activeRequestOrder) {
|
|
332
|
+
const state = this.requestStates.get(requestId);
|
|
333
|
+
if (!state || state.settled)
|
|
334
|
+
continue;
|
|
335
|
+
const item = state.scenarios.dequeue();
|
|
336
|
+
if (item) {
|
|
337
|
+
return { ...item, requestId };
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
return undefined;
|
|
341
|
+
}
|
|
342
|
+
/**
|
|
343
|
+
* Notifies all simulation consumers (workers and main thread) that new
|
|
344
|
+
* work may be available in the scheduler.
|
|
345
|
+
*/
|
|
346
|
+
notifySimulationConsumers() {
|
|
347
|
+
this.mainThreadWorker?.notifyWorkAvailable();
|
|
348
|
+
for (const worker of this.workers) {
|
|
349
|
+
worker.postMessage({ type: 'workAvailable' });
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
353
|
+
// Admission pump
|
|
354
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
355
|
+
/**
|
|
356
|
+
* Wakes the admission pump to process pending requests. If the pump is
|
|
357
|
+
* already running, this is a no-op — the running pump will pick up new
|
|
358
|
+
* pending requests on its next iteration.
|
|
359
|
+
*/
|
|
360
|
+
wakeAdmissionPump() {
|
|
361
|
+
if (this.admissionPumpRunning)
|
|
362
|
+
return;
|
|
363
|
+
this.admissionPumpRunning = true;
|
|
364
|
+
// Run asynchronously so enqueueWhoCan returns immediately
|
|
365
|
+
void this.runAdmissionPump();
|
|
366
|
+
}
|
|
367
|
+
/**
|
|
368
|
+
* The admission pump loop. Drains pendingRequests into active processing
|
|
369
|
+
* up to maxRequestsInProgress. Only one instance of this loop runs at a time,
|
|
370
|
+
* guarded by admissionPumpRunning.
|
|
371
|
+
*/
|
|
372
|
+
async runAdmissionPump() {
|
|
373
|
+
const maxActive = this.config.tuning?.maxRequestsInProgress ?? 30;
|
|
374
|
+
try {
|
|
375
|
+
while (this.pendingRequests.length > 0 && this.activeRequestOrder.length < maxActive) {
|
|
376
|
+
if (this.isShutdown)
|
|
377
|
+
break;
|
|
378
|
+
const submitted = this.pendingRequests.shift();
|
|
379
|
+
const state = this.createRequestState(submitted);
|
|
380
|
+
this.requestStates.set(submitted.requestId, state);
|
|
381
|
+
this.activeRequestOrder.push(submitted.requestId);
|
|
382
|
+
// Enqueue the root preparation job for this request
|
|
383
|
+
this.enqueueRootPreparation(state);
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
finally {
|
|
387
|
+
this.admissionPumpRunning = false;
|
|
388
|
+
}
|
|
389
|
+
// After admitting, check if we became idle
|
|
390
|
+
this.checkIdle();
|
|
391
|
+
}
|
|
392
|
+
/**
|
|
393
|
+
* Creates a fresh RequestState for an admitted request.
|
|
394
|
+
*
|
|
395
|
+
* @param submitted - The submitted request to create state for.
|
|
396
|
+
* @returns the new RequestState.
|
|
397
|
+
*/
|
|
398
|
+
createRequestState(submitted) {
|
|
399
|
+
return {
|
|
400
|
+
requestId: submitted.requestId,
|
|
401
|
+
request: submitted.request,
|
|
402
|
+
allScenariosCreated: false,
|
|
403
|
+
scenarios: new StreamingWorkQueue_js_1.StreamingWorkQueue(),
|
|
404
|
+
created: 0,
|
|
405
|
+
completed: 0,
|
|
406
|
+
pendingPreparationJobs: 0,
|
|
407
|
+
allowed: [],
|
|
408
|
+
principalsNotFound: [],
|
|
409
|
+
accountsNotFound: [],
|
|
410
|
+
organizationsNotFound: [],
|
|
411
|
+
organizationalUnitsNotFound: [],
|
|
412
|
+
allAccountsChecked: false,
|
|
413
|
+
denyDetails: [],
|
|
414
|
+
simulationCount: 0,
|
|
415
|
+
denyDetailsCallback: submitted.request.denyDetailsCallback,
|
|
416
|
+
pendingDenyDetailsChecks: 0,
|
|
417
|
+
settled: false,
|
|
418
|
+
callbackInvoked: false,
|
|
419
|
+
simulationErrors: []
|
|
420
|
+
};
|
|
421
|
+
}
|
|
422
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
423
|
+
// Preparation
|
|
424
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
425
|
+
/**
|
|
426
|
+
* Enqueues the root preparation job for a request. This job performs resource
|
|
427
|
+
* account resolution, resource policy lookup, action expansion, principal scope
|
|
428
|
+
* handling, and then enqueues follow-up preparation jobs to enumerate principals.
|
|
429
|
+
*
|
|
430
|
+
* @param state - The request state to prepare.
|
|
431
|
+
*/
|
|
432
|
+
enqueueRootPreparation(state) {
|
|
433
|
+
state.pendingPreparationJobs++;
|
|
434
|
+
this.preparationQueue.enqueue({
|
|
435
|
+
properties: {},
|
|
436
|
+
execute: async () => {
|
|
437
|
+
try {
|
|
438
|
+
await this.executeRootPreparation(state);
|
|
439
|
+
}
|
|
440
|
+
catch (err) {
|
|
441
|
+
this.settleRequestAsError(state, err instanceof Error ? err : new Error(String(err)));
|
|
442
|
+
}
|
|
443
|
+
finally {
|
|
444
|
+
state.pendingPreparationJobs--;
|
|
445
|
+
this.checkRequestCompletion(state);
|
|
446
|
+
}
|
|
447
|
+
}
|
|
448
|
+
});
|
|
449
|
+
}
|
|
450
|
+
/**
|
|
451
|
+
* Executes the root preparation for a request: resolves the resource account,
|
|
452
|
+
* fetches the resource policy, expands actions, determines which accounts and
|
|
453
|
+
* principals to check, and enqueues follow-up preparation jobs.
|
|
454
|
+
*
|
|
455
|
+
* @param state - The request state to prepare.
|
|
456
|
+
*/
|
|
457
|
+
async executeRootPreparation(state) {
|
|
458
|
+
if (state.settled)
|
|
459
|
+
return;
|
|
460
|
+
const { request } = state;
|
|
461
|
+
const { resource } = request;
|
|
462
|
+
const collectClient = this.collectClient;
|
|
463
|
+
if (!request.resourceAccount && !request.resource) {
|
|
464
|
+
throw new Error('Either resourceAccount or resource must be provided in the request.');
|
|
465
|
+
}
|
|
466
|
+
const resourceAccount = request.resourceAccount || (await (0, resources_js_1.getAccountIdForResource)(collectClient, resource));
|
|
467
|
+
if (!resourceAccount) {
|
|
468
|
+
throw new Error(`Could not determine account ID for resource ${resource}. Please use a different ARN or specify resourceAccount.`);
|
|
469
|
+
}
|
|
470
|
+
const actions = await (0, whoCan_js_1.actionsForWhoCan)({
|
|
471
|
+
actions: request.actions,
|
|
472
|
+
resource: request.resource
|
|
473
|
+
});
|
|
474
|
+
if (!actions || actions.length === 0) {
|
|
475
|
+
throw new Error('No valid actions provided or found for the resource.');
|
|
476
|
+
}
|
|
477
|
+
let resourcePolicy = undefined;
|
|
478
|
+
if (resource) {
|
|
479
|
+
resourcePolicy = await (0, resources_js_1.getResourcePolicyForResource)(collectClient, resource, resourceAccount);
|
|
480
|
+
const resourceArn = new arn_js_1.Arn(resource);
|
|
481
|
+
if ((resourceArn.matches({ service: 'iam', resourceType: 'role' }) ||
|
|
482
|
+
resourceArn.matches({ service: 'kms', resourceType: 'key' })) &&
|
|
483
|
+
!resourcePolicy) {
|
|
484
|
+
throw new Error(`Unable to find resource policy for ${resource}. Cannot determine who can access the resource.`);
|
|
485
|
+
}
|
|
486
|
+
}
|
|
487
|
+
const accountsToCheck = await (0, whoCan_js_1.accountsToCheckBasedOnResourcePolicy)(resourcePolicy, resourceAccount);
|
|
488
|
+
const principalArnFilter = (0, principalArnFilter_js_1.buildPrincipalArnFilter)(resourcePolicy);
|
|
489
|
+
const uniqueAccounts = await (0, whoCan_js_1.uniqueAccountsToCheck)(collectClient, accountsToCheck);
|
|
490
|
+
// Store not-found arrays on the request state
|
|
491
|
+
state.accountsNotFound = uniqueAccounts.accountsNotFound;
|
|
492
|
+
state.organizationsNotFound = uniqueAccounts.organizationsNotFound;
|
|
493
|
+
state.organizationalUnitsNotFound = uniqueAccounts.organizationalUnitsNotFound;
|
|
494
|
+
state.allAccountsChecked = request.principalScope ? false : accountsToCheck.allAccounts;
|
|
495
|
+
let accountsForSearch = uniqueAccounts.accounts;
|
|
496
|
+
let principalsForSearch = accountsToCheck.specificPrincipals;
|
|
497
|
+
let scopeIncludesResourceAccount = true;
|
|
498
|
+
if (request.principalScope) {
|
|
499
|
+
const resolved = await (0, principalScope_js_1.resolvePrincipalScope)(collectClient, request.principalScope);
|
|
500
|
+
const intersection = (0, principalScope_js_1.intersectWithPrincipalScope)(uniqueAccounts.accounts, accountsToCheck.specificPrincipals, accountsToCheck.allAccounts, resolved.accounts, resolved.principals);
|
|
501
|
+
accountsForSearch = intersection.accounts;
|
|
502
|
+
principalsForSearch = intersection.principals;
|
|
503
|
+
scopeIncludesResourceAccount = resolved.accounts.has(resourceAccount);
|
|
504
|
+
}
|
|
505
|
+
// Principals explicitly named in the resource policy are enqueued via the
|
|
506
|
+
// specific-principals path (which skips the PrincipalArn filter). Track them
|
|
507
|
+
// so the account-enumeration paths can skip duplicates without needing to
|
|
508
|
+
// store all enumerated principals in memory.
|
|
509
|
+
const specificPrincipalSet = new Set(principalsForSearch);
|
|
510
|
+
// Enqueue follow-up preparation jobs for account/principal enumeration
|
|
511
|
+
const principalIndexExists = !this.config.ignorePrincipalIndex && (await collectClient.principalIndexExists());
|
|
512
|
+
if (principalIndexExists) {
|
|
513
|
+
// Use the principal index to find relevant principals directly
|
|
514
|
+
state.pendingPreparationJobs++;
|
|
515
|
+
this.preparationQueue.enqueue({
|
|
516
|
+
properties: {},
|
|
517
|
+
execute: async () => {
|
|
518
|
+
try {
|
|
519
|
+
if (state.settled)
|
|
520
|
+
return;
|
|
521
|
+
const allFromAccount = scopeIncludesResourceAccount && accountsToCheck.checkAllForCurrentAccount
|
|
522
|
+
? resourceAccount
|
|
523
|
+
: undefined;
|
|
524
|
+
for (const action of actions) {
|
|
525
|
+
const indexedPrincipals = await collectClient.getPrincipalsWithActionAllowed(allFromAccount, accountsForSearch, action);
|
|
526
|
+
for (const principal of indexedPrincipals || []) {
|
|
527
|
+
if (specificPrincipalSet.has(principal))
|
|
528
|
+
continue;
|
|
529
|
+
if (principalArnFilter &&
|
|
530
|
+
!(0, iam_utils_1.isServicePrincipal)(principal) &&
|
|
531
|
+
!(0, principalArnFilter_js_1.principalMatchesFilter)(principal, action, resourceAccount, principalArnFilter)) {
|
|
532
|
+
continue;
|
|
533
|
+
}
|
|
534
|
+
state.scenarios.enqueue({
|
|
535
|
+
resource,
|
|
536
|
+
action,
|
|
537
|
+
principal,
|
|
538
|
+
resourceAccount,
|
|
539
|
+
strictContextKeys: state.request.strictContextKeys,
|
|
540
|
+
collectDenyDetails: !!state.denyDetailsCallback
|
|
541
|
+
});
|
|
542
|
+
state.created++;
|
|
543
|
+
}
|
|
544
|
+
}
|
|
545
|
+
this.notifySimulationConsumers();
|
|
546
|
+
}
|
|
547
|
+
catch (err) {
|
|
548
|
+
this.settleRequestAsError(state, err instanceof Error ? err : new Error(String(err)));
|
|
549
|
+
}
|
|
550
|
+
finally {
|
|
551
|
+
state.pendingPreparationJobs--;
|
|
552
|
+
this.checkRequestCompletion(state);
|
|
553
|
+
}
|
|
554
|
+
}
|
|
555
|
+
});
|
|
556
|
+
}
|
|
557
|
+
else {
|
|
558
|
+
// No principal index — enumerate all principals per account
|
|
559
|
+
for (const account of accountsForSearch) {
|
|
560
|
+
state.pendingPreparationJobs++;
|
|
561
|
+
this.preparationQueue.enqueue({
|
|
562
|
+
properties: {},
|
|
563
|
+
execute: async () => {
|
|
564
|
+
try {
|
|
565
|
+
if (state.settled)
|
|
566
|
+
return;
|
|
567
|
+
const principals = await collectClient.getAllPrincipalsInAccount(account);
|
|
568
|
+
for (const principal of principals) {
|
|
569
|
+
if (specificPrincipalSet.has(principal))
|
|
570
|
+
continue;
|
|
571
|
+
const skipFilter = !principalArnFilter || (0, iam_utils_1.isServicePrincipal)(principal);
|
|
572
|
+
for (const action of actions) {
|
|
573
|
+
if (!skipFilter &&
|
|
574
|
+
!(0, principalArnFilter_js_1.principalMatchesFilter)(principal, action, resourceAccount, principalArnFilter)) {
|
|
575
|
+
continue;
|
|
576
|
+
}
|
|
577
|
+
state.scenarios.enqueue({
|
|
578
|
+
resource,
|
|
579
|
+
action,
|
|
580
|
+
principal,
|
|
581
|
+
resourceAccount,
|
|
582
|
+
strictContextKeys: state.request.strictContextKeys,
|
|
583
|
+
collectDenyDetails: !!state.denyDetailsCallback
|
|
584
|
+
});
|
|
585
|
+
state.created++;
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
this.notifySimulationConsumers();
|
|
589
|
+
}
|
|
590
|
+
catch (err) {
|
|
591
|
+
this.settleRequestAsError(state, err instanceof Error ? err : new Error(String(err)));
|
|
592
|
+
}
|
|
593
|
+
finally {
|
|
594
|
+
state.pendingPreparationJobs--;
|
|
595
|
+
this.checkRequestCompletion(state);
|
|
596
|
+
}
|
|
597
|
+
}
|
|
598
|
+
});
|
|
599
|
+
}
|
|
600
|
+
}
|
|
601
|
+
// Enqueue specific principals from resource policy (iterate the Set to
|
|
602
|
+
// deduplicate — the same principal can appear in the list more than once
|
|
603
|
+
// when multiple statements reference it, e.g. an explicit Principal element
|
|
604
|
+
// and a StringEquals aws:PrincipalArn condition).
|
|
605
|
+
for (const principal of specificPrincipalSet) {
|
|
606
|
+
state.pendingPreparationJobs++;
|
|
607
|
+
this.preparationQueue.enqueue({
|
|
608
|
+
properties: {},
|
|
609
|
+
execute: async () => {
|
|
610
|
+
try {
|
|
611
|
+
if (state.settled)
|
|
612
|
+
return;
|
|
613
|
+
if ((0, iam_utils_1.isServicePrincipal)(principal)) {
|
|
614
|
+
for (const action of actions) {
|
|
615
|
+
state.scenarios.enqueue({
|
|
616
|
+
resource,
|
|
617
|
+
action,
|
|
618
|
+
principal,
|
|
619
|
+
resourceAccount,
|
|
620
|
+
strictContextKeys: state.request.strictContextKeys,
|
|
621
|
+
collectDenyDetails: !!state.denyDetailsCallback
|
|
622
|
+
});
|
|
623
|
+
state.created++;
|
|
624
|
+
}
|
|
625
|
+
}
|
|
626
|
+
else if ((0, iam_utils_1.isIamUserArn)(principal) ||
|
|
627
|
+
(0, iam_utils_1.isIamRoleArn)(principal) ||
|
|
628
|
+
(0, iam_utils_1.isAssumedRoleArn)(principal)) {
|
|
629
|
+
const principalExists = await collectClient.principalExists(principal);
|
|
630
|
+
if (!principalExists) {
|
|
631
|
+
state.principalsNotFound.push(principal);
|
|
632
|
+
}
|
|
633
|
+
else {
|
|
634
|
+
for (const action of actions) {
|
|
635
|
+
state.scenarios.enqueue({
|
|
636
|
+
resource,
|
|
637
|
+
action,
|
|
638
|
+
principal,
|
|
639
|
+
resourceAccount,
|
|
640
|
+
strictContextKeys: state.request.strictContextKeys,
|
|
641
|
+
collectDenyDetails: !!state.denyDetailsCallback
|
|
642
|
+
});
|
|
643
|
+
state.created++;
|
|
644
|
+
}
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
else {
|
|
648
|
+
state.principalsNotFound.push(principal);
|
|
649
|
+
}
|
|
650
|
+
this.notifySimulationConsumers();
|
|
651
|
+
}
|
|
652
|
+
catch (err) {
|
|
653
|
+
this.settleRequestAsError(state, err instanceof Error ? err : new Error(String(err)));
|
|
654
|
+
}
|
|
655
|
+
finally {
|
|
656
|
+
state.pendingPreparationJobs--;
|
|
657
|
+
this.checkRequestCompletion(state);
|
|
658
|
+
}
|
|
659
|
+
}
|
|
660
|
+
});
|
|
661
|
+
}
|
|
662
|
+
// All follow-up prep jobs have been enqueued. Mark scenarios as fully specified
|
|
663
|
+
// once the root prep and all follow-ups complete (tracked by pendingPreparationJobs).
|
|
664
|
+
state.allScenariosCreated = true;
|
|
665
|
+
// Notify consumers that scenarios may be available
|
|
666
|
+
this.notifySimulationConsumers();
|
|
667
|
+
}
|
|
668
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
669
|
+
// Simulation result handling
|
|
670
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
671
|
+
/**
|
|
672
|
+
* Handles a simulation result from a worker or the main thread runner.
|
|
673
|
+
* Routes the result to the correct request state and checks for completion.
|
|
674
|
+
*
|
|
675
|
+
* @param requestId - The ID of the request this result belongs to.
|
|
676
|
+
* @param result - The simulation job result.
|
|
677
|
+
*/
|
|
678
|
+
handleSimulationResult(requestId, result, denyDetailsCheckWillFollow = false) {
|
|
679
|
+
const state = this.requestStates.get(requestId);
|
|
680
|
+
if (!state)
|
|
681
|
+
return;
|
|
682
|
+
state.completed++;
|
|
683
|
+
if (denyDetailsCheckWillFollow) {
|
|
684
|
+
state.pendingDenyDetailsChecks++;
|
|
685
|
+
}
|
|
686
|
+
if (state.settled) {
|
|
687
|
+
// Request already settled (e.g., failed). Still count the result so
|
|
688
|
+
// the drain check can fire, but discard the actual data.
|
|
689
|
+
this.checkRequestCompletion(state);
|
|
690
|
+
return;
|
|
691
|
+
}
|
|
692
|
+
state.simulationCount++;
|
|
693
|
+
if (result.status === 'fulfilled' && result.value) {
|
|
694
|
+
state.allowed.push(result.value);
|
|
695
|
+
}
|
|
696
|
+
else if (result.status === 'rejected') {
|
|
697
|
+
console.error('Error running simulation:', result.reason);
|
|
698
|
+
state.simulationErrors.push(result);
|
|
699
|
+
}
|
|
700
|
+
this.checkRequestCompletion(state);
|
|
701
|
+
}
|
|
702
|
+
/**
|
|
703
|
+
* Handles a checkDenyDetails request from a worker thread. Looks up the
|
|
704
|
+
* request's denyDetailsCallback and responds.
|
|
705
|
+
*
|
|
706
|
+
* @param requestId - The ID of the request.
|
|
707
|
+
* @param checkId - The unique check ID for this deny-details round trip.
|
|
708
|
+
* @param lightAnalysis - The light analysis to pass to the callback.
|
|
709
|
+
* @param worker - The worker to respond to.
|
|
710
|
+
*/
|
|
711
|
+
handleCheckDenyDetails(requestId, checkId, lightAnalysis, worker) {
|
|
712
|
+
const state = this.requestStates.get(requestId);
|
|
713
|
+
const shouldInclude = state && !state.settled ? (state.denyDetailsCallback?.(lightAnalysis) ?? false) : false;
|
|
714
|
+
if (!shouldInclude && state) {
|
|
715
|
+
// No denyDetailsResult message will follow — decrement the counter
|
|
716
|
+
state.pendingDenyDetailsChecks--;
|
|
717
|
+
this.checkRequestCompletion(state);
|
|
718
|
+
}
|
|
719
|
+
worker.postMessage({
|
|
720
|
+
type: 'denyDetailsCheckResult',
|
|
721
|
+
checkId,
|
|
722
|
+
shouldInclude
|
|
723
|
+
});
|
|
724
|
+
}
|
|
725
|
+
/**
|
|
726
|
+
* Handles a deny details result from a worker thread. Decrements the
|
|
727
|
+
* pending deny-details counter and checks for request completion.
|
|
728
|
+
*
|
|
729
|
+
* @param requestId - The ID of the request.
|
|
730
|
+
* @param denyDetail - The deny detail to store.
|
|
731
|
+
*/
|
|
732
|
+
handleDenyDetailsResult(requestId, denyDetail) {
|
|
733
|
+
const state = this.requestStates.get(requestId);
|
|
734
|
+
if (!state)
|
|
735
|
+
return;
|
|
736
|
+
if (state.pendingDenyDetailsChecks > 0) {
|
|
737
|
+
state.pendingDenyDetailsChecks--;
|
|
738
|
+
}
|
|
739
|
+
if (!state.settled) {
|
|
740
|
+
state.denyDetails.push(denyDetail);
|
|
741
|
+
}
|
|
742
|
+
this.checkRequestCompletion(state);
|
|
743
|
+
}
|
|
744
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
745
|
+
// Request completion and settlement
|
|
746
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
747
|
+
/**
|
|
748
|
+
* Checks whether a request has completed all preparation and simulation work.
|
|
749
|
+
* If so, settles the request as successful.
|
|
750
|
+
*
|
|
751
|
+
* @param state - The request state to check.
|
|
752
|
+
*/
|
|
753
|
+
checkRequestCompletion(state) {
|
|
754
|
+
if (state.settled) {
|
|
755
|
+
this.checkRequestDrain(state);
|
|
756
|
+
return;
|
|
757
|
+
}
|
|
758
|
+
if (!state.allScenariosCreated)
|
|
759
|
+
return;
|
|
760
|
+
if (state.pendingPreparationJobs > 0)
|
|
761
|
+
return;
|
|
762
|
+
if (state.created !== state.completed)
|
|
763
|
+
return;
|
|
764
|
+
if (state.pendingDenyDetailsChecks > 0)
|
|
765
|
+
return;
|
|
766
|
+
// All work done — settle as success
|
|
767
|
+
if (state.simulationErrors.length > 0) {
|
|
768
|
+
this.settleRequestAsError(state, new Error(`Completed with ${state.simulationErrors.length} simulation errors. See previous logs.`));
|
|
769
|
+
}
|
|
770
|
+
else {
|
|
771
|
+
this.settleRequestAsSuccess(state);
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
/**
|
|
775
|
+
* Settles a request as successful: builds the WhoCanResponse, awaits
|
|
776
|
+
* onRequestSettled, removes the request from active state, and wakes
|
|
777
|
+
* the admission pump.
|
|
778
|
+
*
|
|
779
|
+
* @param state - The request state to settle.
|
|
780
|
+
*/
|
|
781
|
+
settleRequestAsSuccess(state) {
|
|
782
|
+
if (state.settled)
|
|
783
|
+
return;
|
|
784
|
+
state.settled = true;
|
|
785
|
+
const result = {
|
|
786
|
+
simulationCount: state.simulationCount,
|
|
787
|
+
allowed: state.allowed,
|
|
788
|
+
allAccountsChecked: state.allAccountsChecked,
|
|
789
|
+
accountsNotFound: state.accountsNotFound,
|
|
790
|
+
organizationsNotFound: state.organizationsNotFound,
|
|
791
|
+
organizationalUnitsNotFound: state.organizationalUnitsNotFound,
|
|
792
|
+
principalsNotFound: state.principalsNotFound,
|
|
793
|
+
denyDetails: state.denyDetailsCallback ? state.denyDetails : undefined
|
|
794
|
+
};
|
|
795
|
+
if (state.request.sort) {
|
|
796
|
+
(0, whoCan_js_1.sortWhoCanResults)(result);
|
|
797
|
+
}
|
|
798
|
+
void this.invokeSettledCallbackAndCleanup(state, {
|
|
799
|
+
status: 'fulfilled',
|
|
800
|
+
requestId: state.requestId,
|
|
801
|
+
request: state.request,
|
|
802
|
+
result
|
|
803
|
+
});
|
|
804
|
+
}
|
|
805
|
+
/**
|
|
806
|
+
* Settles a request as failed: invokes onRequestSettled with the error
|
|
807
|
+
* immediately, but keeps the request in active state until all in-flight
|
|
808
|
+
* work drains (created === completed). Late results for settled requests
|
|
809
|
+
* are discarded but still counted so the drain completes.
|
|
810
|
+
*
|
|
811
|
+
* @param state - The request state to settle.
|
|
812
|
+
* @param error - The error that caused the failure.
|
|
813
|
+
*/
|
|
814
|
+
settleRequestAsError(state, error) {
|
|
815
|
+
if (state.settled)
|
|
816
|
+
return;
|
|
817
|
+
state.settled = true;
|
|
818
|
+
// Await the callback (backpressure), then mark it done so checkRequestDrain
|
|
819
|
+
// can free the slot once all in-flight work also completes.
|
|
820
|
+
void (async () => {
|
|
821
|
+
await this.invokeSettledCallback({
|
|
822
|
+
status: 'rejected',
|
|
823
|
+
requestId: state.requestId,
|
|
824
|
+
request: state.request,
|
|
825
|
+
error
|
|
826
|
+
});
|
|
827
|
+
state.callbackInvoked = true;
|
|
828
|
+
this.checkRequestDrain(state);
|
|
829
|
+
})();
|
|
830
|
+
}
|
|
831
|
+
/**
|
|
832
|
+
* Invokes the onRequestSettled callback and accumulates any errors for
|
|
833
|
+
* later surfacing via waitForIdle.
|
|
834
|
+
*
|
|
835
|
+
* @param event - The settlement event to deliver.
|
|
836
|
+
*/
|
|
837
|
+
async invokeSettledCallback(event) {
|
|
838
|
+
try {
|
|
839
|
+
await this.config.onRequestSettled(event);
|
|
840
|
+
}
|
|
841
|
+
catch (err) {
|
|
842
|
+
this.settledCallbackErrors.push(err instanceof Error ? err : new Error(String(err)));
|
|
843
|
+
}
|
|
844
|
+
}
|
|
845
|
+
/**
|
|
846
|
+
* Awaits the onRequestSettled callback, then removes the request from
|
|
847
|
+
* active state and wakes the admission pump. Used for successful settlements
|
|
848
|
+
* where all work is already complete.
|
|
849
|
+
*
|
|
850
|
+
* @param state - The request state being settled.
|
|
851
|
+
* @param event - The settlement event to deliver.
|
|
852
|
+
*/
|
|
853
|
+
async invokeSettledCallbackAndCleanup(state, event) {
|
|
854
|
+
await this.invokeSettledCallback(event);
|
|
855
|
+
this.removeFromActiveState(state);
|
|
856
|
+
}
|
|
857
|
+
/**
|
|
858
|
+
* Checks whether a settled request has fully drained: the onRequestSettled
|
|
859
|
+
* callback has been awaited, all preparation jobs have finished, all
|
|
860
|
+
* simulation results have been received, and all deny-detail round trips
|
|
861
|
+
* have completed. Only then is the request removed from active state.
|
|
862
|
+
*
|
|
863
|
+
* @param state - The request state to check.
|
|
864
|
+
*/
|
|
865
|
+
checkRequestDrain(state) {
|
|
866
|
+
if (!state.settled)
|
|
867
|
+
return;
|
|
868
|
+
if (!state.callbackInvoked)
|
|
869
|
+
return;
|
|
870
|
+
if (state.pendingPreparationJobs > 0)
|
|
871
|
+
return;
|
|
872
|
+
if (state.created !== state.completed)
|
|
873
|
+
return;
|
|
874
|
+
if (state.pendingDenyDetailsChecks > 0)
|
|
875
|
+
return;
|
|
876
|
+
this.removeFromActiveState(state);
|
|
877
|
+
}
|
|
878
|
+
/**
|
|
879
|
+
* Removes a request from active state, wakes the admission pump to fill
|
|
880
|
+
* the freed slot, and checks if the processor is now idle.
|
|
881
|
+
*
|
|
882
|
+
* @param state - The request state to remove.
|
|
883
|
+
*/
|
|
884
|
+
removeFromActiveState(state) {
|
|
885
|
+
const idx = this.activeRequestOrder.indexOf(state.requestId);
|
|
886
|
+
if (idx !== -1) {
|
|
887
|
+
this.activeRequestOrder.splice(idx, 1);
|
|
888
|
+
}
|
|
889
|
+
this.requestStates.delete(state.requestId);
|
|
890
|
+
this.wakeAdmissionPump();
|
|
891
|
+
this.checkIdle();
|
|
892
|
+
}
|
|
893
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
894
|
+
// Idle checking
|
|
895
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
896
|
+
/**
|
|
897
|
+
* Returns true if the processor has no pending, active, or in-flight work.
|
|
898
|
+
*
|
|
899
|
+
* @returns true if fully idle.
|
|
900
|
+
*/
|
|
901
|
+
isIdle() {
|
|
902
|
+
return this.pendingRequests.length === 0 && this.activeRequestOrder.length === 0;
|
|
903
|
+
}
|
|
904
|
+
/**
|
|
905
|
+
* Checks whether the processor has become idle and resolves or rejects the
|
|
906
|
+
* waitForIdle promise if so.
|
|
907
|
+
*/
|
|
908
|
+
checkIdle() {
|
|
909
|
+
if (!this.isIdle())
|
|
910
|
+
return;
|
|
911
|
+
if (this.idleWaiters.length === 0)
|
|
912
|
+
return;
|
|
913
|
+
const waiters = this.idleWaiters.splice(0);
|
|
914
|
+
if (this.fatalError) {
|
|
915
|
+
for (const waiter of waiters) {
|
|
916
|
+
waiter.reject(this.fatalError);
|
|
917
|
+
}
|
|
918
|
+
}
|
|
919
|
+
else {
|
|
920
|
+
for (const waiter of waiters) {
|
|
921
|
+
waiter.resolve();
|
|
922
|
+
}
|
|
923
|
+
}
|
|
924
|
+
}
|
|
925
|
+
/**
|
|
926
|
+
* If any onRequestSettled callbacks threw, throws the first error.
|
|
927
|
+
* Called after waitForIdle resolves to surface callback errors.
|
|
928
|
+
*/
|
|
929
|
+
rejectIfSettledCallbackErrors() {
|
|
930
|
+
if (this.settledCallbackErrors.length > 0) {
|
|
931
|
+
const error = this.settledCallbackErrors[0];
|
|
932
|
+
this.settledCallbackErrors = [];
|
|
933
|
+
throw error;
|
|
934
|
+
}
|
|
935
|
+
}
|
|
936
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
937
|
+
// Worker failure handling
|
|
938
|
+
// ──────────────────────────────────────────────────────────────────────────
|
|
939
|
+
/**
|
|
940
|
+
* Handles an unexpected worker failure by marking the processor as dead,
|
|
941
|
+
* terminating remaining workers, and rejecting all active and pending requests.
|
|
942
|
+
*
|
|
943
|
+
* @param error - The error that caused the worker failure.
|
|
944
|
+
*/
|
|
945
|
+
handleWorkerFailure(error) {
|
|
946
|
+
this.workersDead = true;
|
|
947
|
+
this.isShutdown = true;
|
|
948
|
+
this.fatalError = error;
|
|
949
|
+
// Terminate remaining workers (fire-and-forget)
|
|
950
|
+
for (const worker of this.workers) {
|
|
951
|
+
worker.terminate().catch(() => { });
|
|
952
|
+
}
|
|
953
|
+
// Settle all active requests as failed
|
|
954
|
+
for (const requestId of [...this.activeRequestOrder]) {
|
|
955
|
+
const state = this.requestStates.get(requestId);
|
|
956
|
+
if (state && !state.settled) {
|
|
957
|
+
this.settleRequestAsError(state, error);
|
|
958
|
+
}
|
|
959
|
+
}
|
|
960
|
+
// Reject all pending requests
|
|
961
|
+
while (this.pendingRequests.length > 0) {
|
|
962
|
+
const submitted = this.pendingRequests.shift();
|
|
963
|
+
void this.config
|
|
964
|
+
.onRequestSettled({
|
|
965
|
+
status: 'rejected',
|
|
966
|
+
requestId: submitted.requestId,
|
|
967
|
+
request: submitted.request,
|
|
968
|
+
error
|
|
969
|
+
})
|
|
970
|
+
.catch(() => { });
|
|
971
|
+
}
|
|
972
|
+
// Reject all idle waiters
|
|
973
|
+
const waiters = this.idleWaiters.splice(0);
|
|
974
|
+
for (const waiter of waiters) {
|
|
975
|
+
waiter.reject(error);
|
|
976
|
+
}
|
|
977
|
+
}
|
|
978
|
+
}
|
|
979
|
+
exports.WhoCanProcessor = WhoCanProcessor;
|
|
980
|
+
//# sourceMappingURL=WhoCanProcessor.js.map
|