@juspay/yama 1.5.0 → 1.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/core/providers/BitbucketProvider.js +36 -23
- package/dist/features/CodeReviewer.d.ts +12 -0
- package/dist/features/CodeReviewer.js +183 -4
- package/dist/features/MultiInstanceProcessor.js +3 -2
- package/dist/types/index.d.ts +37 -0
- package/dist/types/index.js +65 -0
- package/dist/utils/Cache.d.ts +8 -2
- package/dist/utils/Cache.js +190 -10
- package/dist/utils/ParallelProcessing.d.ts +28 -0
- package/dist/utils/ParallelProcessing.js +108 -3
- package/dist/utils/RetryManager.d.ts +78 -0
- package/dist/utils/RetryManager.js +205 -0
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,3 +1,9 @@
|
|
|
1
|
+
## [1.5.1](https://github.com/juspay/yama/compare/v1.5.0...v1.5.1) (2025-09-24)
|
|
2
|
+
|
|
3
|
+
### Bug Fixes
|
|
4
|
+
|
|
5
|
+
- **allocation:** Added fix for batch token allocation ([11f7192](https://github.com/juspay/yama/commit/11f719257a75ba946c45612e336db69a17cf278d))
|
|
6
|
+
|
|
1
7
|
# [1.5.0](https://github.com/juspay/yama/compare/v1.4.1...v1.5.0) (2025-09-19)
|
|
2
8
|
|
|
3
9
|
### Features
|
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
import { ProviderError, } from "../../types/index.js";
|
|
6
6
|
import { logger } from "../../utils/Logger.js";
|
|
7
7
|
import { cache, Cache } from "../../utils/Cache.js";
|
|
8
|
+
import { RetryManager } from "../../utils/RetryManager.js";
|
|
8
9
|
export class BitbucketProvider {
|
|
9
10
|
apiClient;
|
|
10
11
|
branchHandlers;
|
|
@@ -85,14 +86,14 @@ export class BitbucketProvider {
|
|
|
85
86
|
throw new ProviderError("Branch name is required");
|
|
86
87
|
}
|
|
87
88
|
const cacheKey = Cache.keys.branchInfo(workspace, repository, branch);
|
|
88
|
-
return cache.
|
|
89
|
+
return cache.getOrSetResilient(cacheKey, async () => {
|
|
89
90
|
logger.debug(`Finding PR for branch: ${workspace}/${repository}@${branch}`);
|
|
90
|
-
const rawBranchData = await this.branchHandlers.handleGetBranch({
|
|
91
|
+
const rawBranchData = await RetryManager.withRetry(() => this.branchHandlers.handleGetBranch({
|
|
91
92
|
workspace,
|
|
92
93
|
repository,
|
|
93
94
|
branch_name: branch,
|
|
94
95
|
include_merged_prs: false,
|
|
95
|
-
});
|
|
96
|
+
}), `Find PR for branch ${workspace}/${repository}@${branch}`);
|
|
96
97
|
const branchData = this.parseMCPResponse(rawBranchData);
|
|
97
98
|
// Direct data extraction
|
|
98
99
|
if (branchData.open_pull_requests &&
|
|
@@ -131,13 +132,13 @@ export class BitbucketProvider {
|
|
|
131
132
|
throw new ProviderError("Pull request ID is required");
|
|
132
133
|
}
|
|
133
134
|
const cacheKey = Cache.keys.prInfo(workspace, repository, pullRequestId);
|
|
134
|
-
return cache.
|
|
135
|
+
return cache.getOrSetResilient(cacheKey, async () => {
|
|
135
136
|
logger.debug(`Getting PR details: ${workspace}/${repository}#${pullRequestId}`);
|
|
136
|
-
const rawPRDetails = await this.pullRequestHandlers.handleGetPullRequest({
|
|
137
|
+
const rawPRDetails = await RetryManager.withRetry(() => this.pullRequestHandlers.handleGetPullRequest({
|
|
137
138
|
workspace,
|
|
138
139
|
repository,
|
|
139
140
|
pull_request_id: pullRequestId,
|
|
140
|
-
});
|
|
141
|
+
}), `Get PR details ${workspace}/${repository}#${pullRequestId}`);
|
|
141
142
|
const prData = this.parseMCPResponse(rawPRDetails);
|
|
142
143
|
// Debug author data structure
|
|
143
144
|
logger.debug(`PR Details author data structure: ${JSON.stringify(prData.author, null, 2)}`);
|
|
@@ -175,7 +176,7 @@ export class BitbucketProvider {
|
|
|
175
176
|
const cacheKey = includePatterns && includePatterns.length === 1
|
|
176
177
|
? `file-diff:${workspace}:${repository}:${pullRequestId}:${includePatterns[0]}`
|
|
177
178
|
: Cache.keys.prDiff(workspace, repository, pullRequestId);
|
|
178
|
-
return cache.
|
|
179
|
+
return cache.getOrSetResilient(cacheKey, async () => {
|
|
179
180
|
logger.debug(`Getting PR diff: ${workspace}/${repository}#${pullRequestId}`);
|
|
180
181
|
if (includePatterns) {
|
|
181
182
|
logger.debug(`Include patterns: ${includePatterns.join(", ")}`);
|
|
@@ -191,7 +192,7 @@ export class BitbucketProvider {
|
|
|
191
192
|
if (includePatterns) {
|
|
192
193
|
args.include_patterns = includePatterns;
|
|
193
194
|
}
|
|
194
|
-
const rawDiff = await this.reviewHandlers.handleGetPullRequestDiff(args);
|
|
195
|
+
const rawDiff = await RetryManager.withRetry(() => this.reviewHandlers.handleGetPullRequestDiff(args), `Get PR diff ${workspace}/${repository}#${pullRequestId}`);
|
|
195
196
|
const diffData = this.parseMCPResponse(rawDiff);
|
|
196
197
|
return {
|
|
197
198
|
diff: diffData.diff || "",
|
|
@@ -207,18 +208,28 @@ export class BitbucketProvider {
|
|
|
207
208
|
async getFileContent(workspace, repository, filePath, branch) {
|
|
208
209
|
await this.initialize();
|
|
209
210
|
const cacheKey = Cache.keys.fileContent(workspace, repository, filePath, branch);
|
|
210
|
-
return cache.
|
|
211
|
+
return cache.getOrSetResilient(cacheKey, async () => {
|
|
211
212
|
logger.debug(`Getting file content: ${workspace}/${repository}/${filePath}@${branch}`);
|
|
212
|
-
const result = await this.fileHandlers.handleGetFileContent({
|
|
213
|
+
const result = await RetryManager.withRetry(() => this.fileHandlers.handleGetFileContent({
|
|
213
214
|
workspace,
|
|
214
215
|
repository,
|
|
215
216
|
file_path: filePath,
|
|
216
217
|
branch,
|
|
217
|
-
});
|
|
218
|
-
// Handle file content response
|
|
219
|
-
if (result.content &&
|
|
220
|
-
|
|
221
|
-
|
|
218
|
+
}), `Get file content ${workspace}/${repository}/${filePath}@${branch}`);
|
|
219
|
+
// Handle file content response with proper error handling for plain text files
|
|
220
|
+
if (result.content &&
|
|
221
|
+
result.content[0] &&
|
|
222
|
+
result.content[0].text) {
|
|
223
|
+
try {
|
|
224
|
+
const fileResponse = JSON.parse(result.content[0].text);
|
|
225
|
+
return fileResponse.content || "";
|
|
226
|
+
}
|
|
227
|
+
catch (parseError) {
|
|
228
|
+
// If JSON parsing fails, the content might be plain text (like .clinerules)
|
|
229
|
+
// Return the text content directly
|
|
230
|
+
logger.debug(`JSON parsing failed for ${filePath}, treating as plain text: ${parseError.message}`);
|
|
231
|
+
return result.content[0].text || "";
|
|
232
|
+
}
|
|
222
233
|
}
|
|
223
234
|
// Handle direct response format
|
|
224
235
|
return result.content || "";
|
|
@@ -230,14 +241,14 @@ export class BitbucketProvider {
|
|
|
230
241
|
async listDirectoryContent(workspace, repository, path, branch) {
|
|
231
242
|
await this.initialize();
|
|
232
243
|
const cacheKey = Cache.keys.directoryContent(workspace, repository, path, branch);
|
|
233
|
-
return cache.
|
|
244
|
+
return cache.getOrSetResilient(cacheKey, async () => {
|
|
234
245
|
logger.debug(`Listing directory: ${workspace}/${repository}/${path}@${branch}`);
|
|
235
|
-
const result = await this.fileHandlers.handleListDirectoryContent({
|
|
246
|
+
const result = await RetryManager.withRetry(() => this.fileHandlers.handleListDirectoryContent({
|
|
236
247
|
workspace,
|
|
237
248
|
repository,
|
|
238
249
|
path,
|
|
239
250
|
branch,
|
|
240
|
-
});
|
|
251
|
+
}), `List directory ${workspace}/${repository}/${path}@${branch}`);
|
|
241
252
|
const dirData = this.parseMCPResponse(result);
|
|
242
253
|
return dirData.contents || [];
|
|
243
254
|
}, 3600);
|
|
@@ -254,12 +265,12 @@ export class BitbucketProvider {
|
|
|
254
265
|
try {
|
|
255
266
|
logger.debug(`Updating PR description: ${workspace}/${repository}#${pullRequestId}`);
|
|
256
267
|
logger.debug(`Description length: ${description.length} characters`);
|
|
257
|
-
const result = await this.pullRequestHandlers.handleUpdatePullRequest({
|
|
268
|
+
const result = await RetryManager.withRetry(() => this.pullRequestHandlers.handleUpdatePullRequest({
|
|
258
269
|
workspace,
|
|
259
270
|
repository,
|
|
260
271
|
pull_request_id: pullRequestId,
|
|
261
272
|
description: description,
|
|
262
|
-
});
|
|
273
|
+
}), `Update PR description ${workspace}/${repository}#${pullRequestId}`);
|
|
263
274
|
// Log the raw MCP response
|
|
264
275
|
logger.debug(`Raw MCP update response: ${JSON.stringify(result, null, 2)}`);
|
|
265
276
|
const updateData = this.parseMCPResponse(result);
|
|
@@ -335,10 +346,12 @@ export class BitbucketProvider {
|
|
|
335
346
|
logger.debug(` Type: ${options.lineType || "CONTEXT"}`);
|
|
336
347
|
}
|
|
337
348
|
logger.debug(`🔍 MCP addComment args: ${JSON.stringify(args, null, 2)}`);
|
|
338
|
-
const result = await this.pullRequestHandlers.handleAddComment(args);
|
|
349
|
+
const result = await RetryManager.withRetry(() => this.pullRequestHandlers.handleAddComment(args), `Add comment to PR ${workspace}/${repository}#${pullRequestId}`);
|
|
339
350
|
// Parse response exactly like pr-police.js
|
|
340
351
|
let commentData;
|
|
341
|
-
if (result.content &&
|
|
352
|
+
if (result.content &&
|
|
353
|
+
result.content[0] &&
|
|
354
|
+
result.content[0].text) {
|
|
342
355
|
commentData = JSON.parse(result.content[0].text);
|
|
343
356
|
}
|
|
344
357
|
else {
|
|
@@ -361,7 +374,7 @@ export class BitbucketProvider {
|
|
|
361
374
|
pull_request_id: pullRequestId,
|
|
362
375
|
comment_text: `**File: ${options.filePath}**\n\n${commentText}`,
|
|
363
376
|
};
|
|
364
|
-
const fallbackResult = await this.pullRequestHandlers.handleAddComment(fallbackArgs);
|
|
377
|
+
const fallbackResult = await RetryManager.withRetry(() => this.pullRequestHandlers.handleAddComment(fallbackArgs), `Add fallback comment to PR ${workspace}/${repository}#${pullRequestId}`);
|
|
365
378
|
let fallbackData;
|
|
366
379
|
if (fallbackResult.content &&
|
|
367
380
|
fallbackResult.content[0] &&
|
|
@@ -108,6 +108,18 @@ export declare class CodeReviewer {
|
|
|
108
108
|
* Process batches serially (original implementation)
|
|
109
109
|
*/
|
|
110
110
|
private processSerially;
|
|
111
|
+
/**
|
|
112
|
+
* Pre-allocate tokens based on distribution strategy with proper integer arithmetic
|
|
113
|
+
*/
|
|
114
|
+
private preAllocateTokens;
|
|
115
|
+
/**
|
|
116
|
+
* Try weighted allocation for batches
|
|
117
|
+
*/
|
|
118
|
+
private tryWeightedAllocation;
|
|
119
|
+
/**
|
|
120
|
+
* Try equal allocation for batches
|
|
121
|
+
*/
|
|
122
|
+
private tryEqualAllocation;
|
|
111
123
|
/**
|
|
112
124
|
* Process a single batch with concurrency control
|
|
113
125
|
*/
|
|
@@ -922,7 +922,25 @@ ${recommendation}
|
|
|
922
922
|
// Initialize concurrency control
|
|
923
923
|
const semaphore = new Semaphore(optimalConcurrency);
|
|
924
924
|
const tokenBudget = new TokenBudgetManager(this.getSafeTokenLimit() * 0.8); // 80% for safety
|
|
925
|
-
|
|
925
|
+
// NEW: Pre-allocate tokens based on distribution strategy
|
|
926
|
+
const distributionStrategy = parallelConfig.tokenBudgetDistribution || "equal";
|
|
927
|
+
logger.info(`🎯 Using ${distributionStrategy} token distribution strategy for ${batches.length} batches`);
|
|
928
|
+
const tokenAllocations = this.preAllocateTokens(batches, tokenBudget, distributionStrategy);
|
|
929
|
+
if (!tokenAllocations) {
|
|
930
|
+
const totalRequired = batches.reduce((sum, b) => sum + b.estimatedTokens, 0);
|
|
931
|
+
const totalBudget = tokenBudget.getTotalBudget();
|
|
932
|
+
throw new Error(`Insufficient token budget: required ${totalRequired}, available ${totalBudget}. ` +
|
|
933
|
+
`Consider reducing batch count (current: ${batches.length}) or increasing token limit.`);
|
|
934
|
+
}
|
|
935
|
+
// Apply pre-allocated tokens to the budget manager
|
|
936
|
+
if (!tokenBudget.preAllocateAllBatches(tokenAllocations)) {
|
|
937
|
+
throw new Error("Failed to pre-allocate tokens for all batches");
|
|
938
|
+
}
|
|
939
|
+
logger.info(`🎯 Parallel processing: ${optimalConcurrency} concurrent batches, ${tokenBudget.getTotalBudget()} token budget (${distributionStrategy} distribution)`);
|
|
940
|
+
// Log allocation details
|
|
941
|
+
tokenAllocations.forEach((tokens, batchIndex) => {
|
|
942
|
+
logger.debug(`Batch ${batchIndex + 1}: ${tokens} tokens allocated`);
|
|
943
|
+
});
|
|
926
944
|
const batchResults = new Array(batches.length);
|
|
927
945
|
const allViolations = [];
|
|
928
946
|
const processingPromises = [];
|
|
@@ -1006,6 +1024,149 @@ ${recommendation}
|
|
|
1006
1024
|
logger.success(`🎯 Serial processing completed: ${allViolations.length} total violations from ${batches.length} batches in ${Math.round(totalTime / 1000)}s (avg ${avgBatchSize.toFixed(1)} files/batch)`);
|
|
1007
1025
|
return { violations: allViolations, batchResults };
|
|
1008
1026
|
}
|
|
1027
|
+
/**
|
|
1028
|
+
* Pre-allocate tokens based on distribution strategy with proper integer arithmetic
|
|
1029
|
+
*/
|
|
1030
|
+
preAllocateTokens(batches, tokenBudget, strategy) {
|
|
1031
|
+
// Ensure we're working with integer budget to avoid floating-point issues
|
|
1032
|
+
const totalBudget = Math.floor(tokenBudget.getTotalBudget());
|
|
1033
|
+
const allocations = new Map();
|
|
1034
|
+
if (strategy === "equal") {
|
|
1035
|
+
// Equal distribution: divide budget equally among all batches with proper remainder handling
|
|
1036
|
+
const baseTokens = Math.floor(totalBudget / batches.length);
|
|
1037
|
+
const remainder = totalBudget % batches.length;
|
|
1038
|
+
if (baseTokens < 1000) {
|
|
1039
|
+
// Minimum viable tokens per batch
|
|
1040
|
+
logger.error(`Equal distribution would give ${baseTokens} tokens per batch, which is insufficient`);
|
|
1041
|
+
return null;
|
|
1042
|
+
}
|
|
1043
|
+
let totalAllocated = 0;
|
|
1044
|
+
for (let i = 0; i < batches.length; i++) {
|
|
1045
|
+
// Distribute remainder to first few batches
|
|
1046
|
+
const tokens = baseTokens + (i < remainder ? 1 : 0);
|
|
1047
|
+
allocations.set(i, tokens);
|
|
1048
|
+
totalAllocated += tokens;
|
|
1049
|
+
}
|
|
1050
|
+
// Double-check that we haven't exceeded budget due to any calculation errors
|
|
1051
|
+
if (totalAllocated > totalBudget) {
|
|
1052
|
+
logger.error(`Equal distribution calculation error: ${totalAllocated} > ${totalBudget}`);
|
|
1053
|
+
// Adjust the last batch to fit within budget
|
|
1054
|
+
const lastBatchIndex = batches.length - 1;
|
|
1055
|
+
const lastBatchTokens = allocations.get(lastBatchIndex);
|
|
1056
|
+
const adjustment = totalAllocated - totalBudget;
|
|
1057
|
+
const newLastBatchTokens = lastBatchTokens - adjustment;
|
|
1058
|
+
if (newLastBatchTokens < 1000) {
|
|
1059
|
+
logger.error(`Adjustment would result in last batch having ${newLastBatchTokens} tokens, which is below the minimum threshold (1000). Aborting allocation.`);
|
|
1060
|
+
return null;
|
|
1061
|
+
}
|
|
1062
|
+
allocations.set(lastBatchIndex, newLastBatchTokens);
|
|
1063
|
+
totalAllocated = totalBudget;
|
|
1064
|
+
logger.warn(`Adjusted last batch by -${adjustment} tokens to fit budget`);
|
|
1065
|
+
}
|
|
1066
|
+
logger.info(`Equal distribution: ${baseTokens} tokens per batch for ${batches.length} batches`);
|
|
1067
|
+
logger.debug(`Pre-allocated ${totalAllocated} tokens across ${batches.length} batches (${totalBudget - totalAllocated} remaining)`);
|
|
1068
|
+
}
|
|
1069
|
+
else if (strategy === "weighted") {
|
|
1070
|
+
// Weighted distribution: try weighted first, automatically fallback to equal if needed
|
|
1071
|
+
logger.debug(`Attempting weighted distribution...`);
|
|
1072
|
+
const weightedResult = this.tryWeightedAllocation(batches, totalBudget);
|
|
1073
|
+
if (weightedResult) {
|
|
1074
|
+
// Weighted allocation succeeded
|
|
1075
|
+
weightedResult.forEach((tokens, batchIndex) => {
|
|
1076
|
+
allocations.set(batchIndex, tokens);
|
|
1077
|
+
});
|
|
1078
|
+
logger.info(`✅ Weighted distribution: optimal allocation successful`);
|
|
1079
|
+
logger.debug(`Pre-allocated ${Array.from(weightedResult.values()).reduce((sum, tokens) => sum + tokens, 0)} tokens across ${batches.length} batches`);
|
|
1080
|
+
}
|
|
1081
|
+
else {
|
|
1082
|
+
// Weighted allocation failed, automatically fallback to equal distribution
|
|
1083
|
+
logger.warn(`⚠️ Weighted distribution: insufficient budget for optimal allocation, falling back to equal distribution`);
|
|
1084
|
+
const equalResult = this.tryEqualAllocation(batches, totalBudget);
|
|
1085
|
+
if (!equalResult) {
|
|
1086
|
+
logger.error(`Weighted distribution: both optimal and equal allocation failed`);
|
|
1087
|
+
return null;
|
|
1088
|
+
}
|
|
1089
|
+
equalResult.forEach((tokens, batchIndex) => {
|
|
1090
|
+
allocations.set(batchIndex, tokens);
|
|
1091
|
+
});
|
|
1092
|
+
logger.info(`✅ Weighted distribution: equal allocation fallback successful`);
|
|
1093
|
+
logger.debug(`Pre-allocated ${Array.from(equalResult.values()).reduce((sum, tokens) => sum + tokens, 0)} tokens across ${batches.length} batches`);
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
// Final validation with strict integer checking
|
|
1097
|
+
const totalAllocated = Array.from(allocations.values()).reduce((sum, tokens) => sum + tokens, 0);
|
|
1098
|
+
if (totalAllocated > totalBudget) {
|
|
1099
|
+
logger.error(`CRITICAL: Total allocation (${totalAllocated}) exceeds budget (${totalBudget}) - this should never happen`);
|
|
1100
|
+
logger.error(`Budget type: ${typeof totalBudget}, Allocation type: ${typeof totalAllocated}`);
|
|
1101
|
+
logger.error(`Individual allocations: ${Array.from(allocations.entries())
|
|
1102
|
+
.map(([i, tokens]) => `batch${i}:${tokens}`)
|
|
1103
|
+
.join(", ")}`);
|
|
1104
|
+
throw new Error(`Total allocation (${totalAllocated}) exceeds budget (${totalBudget}) - this should never happen`);
|
|
1105
|
+
}
|
|
1106
|
+
return allocations;
|
|
1107
|
+
}
|
|
1108
|
+
/**
|
|
1109
|
+
* Try weighted allocation for batches
|
|
1110
|
+
*/
|
|
1111
|
+
tryWeightedAllocation(batches, totalBudget) {
|
|
1112
|
+
const totalEstimated = batches.reduce((sum, batch) => sum + batch.estimatedTokens, 0);
|
|
1113
|
+
if (totalEstimated > totalBudget) {
|
|
1114
|
+
logger.debug(`Total estimated tokens (${totalEstimated}) exceed budget (${totalBudget})`);
|
|
1115
|
+
return null;
|
|
1116
|
+
}
|
|
1117
|
+
let totalAllocated = 0;
|
|
1118
|
+
const minTokensPerBatch = 1000;
|
|
1119
|
+
const allocations = new Map();
|
|
1120
|
+
for (let i = 0; i < batches.length; i++) {
|
|
1121
|
+
const batch = batches[i];
|
|
1122
|
+
const weight = batch.estimatedTokens / totalEstimated;
|
|
1123
|
+
const allocation = Math.floor(weight * totalBudget);
|
|
1124
|
+
const finalAllocation = Math.max(allocation, minTokensPerBatch);
|
|
1125
|
+
allocations.set(i, finalAllocation);
|
|
1126
|
+
totalAllocated += finalAllocation;
|
|
1127
|
+
}
|
|
1128
|
+
// Check if we exceeded budget due to minimum allocations
|
|
1129
|
+
if (totalAllocated > totalBudget) {
|
|
1130
|
+
logger.debug(`Weighted allocation with minimums (${totalAllocated}) exceeds budget (${totalBudget})`);
|
|
1131
|
+
return null;
|
|
1132
|
+
}
|
|
1133
|
+
return allocations;
|
|
1134
|
+
}
|
|
1135
|
+
/**
|
|
1136
|
+
* Try equal allocation for batches
|
|
1137
|
+
*/
|
|
1138
|
+
tryEqualAllocation(batches, totalBudget) {
|
|
1139
|
+
const baseTokens = Math.floor(totalBudget / batches.length);
|
|
1140
|
+
const remainder = totalBudget % batches.length;
|
|
1141
|
+
if (baseTokens < 1000) {
|
|
1142
|
+
// Minimum viable tokens per batch
|
|
1143
|
+
logger.debug(`Equal distribution would give ${baseTokens} tokens per batch, which is insufficient`);
|
|
1144
|
+
return null;
|
|
1145
|
+
}
|
|
1146
|
+
const allocations = new Map();
|
|
1147
|
+
let totalAllocated = 0;
|
|
1148
|
+
for (let i = 0; i < batches.length; i++) {
|
|
1149
|
+
// Distribute remainder to first few batches
|
|
1150
|
+
const tokens = baseTokens + (i < remainder ? 1 : 0);
|
|
1151
|
+
allocations.set(i, tokens);
|
|
1152
|
+
totalAllocated += tokens;
|
|
1153
|
+
}
|
|
1154
|
+
// Double-check that we haven't exceeded budget due to any calculation errors
|
|
1155
|
+
if (totalAllocated > totalBudget) {
|
|
1156
|
+
logger.debug(`Equal distribution calculation error: ${totalAllocated} > ${totalBudget}`);
|
|
1157
|
+
// Adjust the last batch to fit within budget
|
|
1158
|
+
const lastBatchIndex = batches.length - 1;
|
|
1159
|
+
const lastBatchTokens = allocations.get(lastBatchIndex);
|
|
1160
|
+
const adjustment = totalAllocated - totalBudget;
|
|
1161
|
+
const newLastBatchTokens = lastBatchTokens - adjustment;
|
|
1162
|
+
if (newLastBatchTokens < 1000) {
|
|
1163
|
+
logger.error(`Adjustment would result in last batch having ${newLastBatchTokens} tokens, which is below the minimum threshold (1000). Aborting allocation.`);
|
|
1164
|
+
return null;
|
|
1165
|
+
}
|
|
1166
|
+
allocations.set(lastBatchIndex, newLastBatchTokens);
|
|
1167
|
+
}
|
|
1168
|
+
return allocations;
|
|
1169
|
+
}
|
|
1009
1170
|
/**
|
|
1010
1171
|
* Process a single batch with concurrency control
|
|
1011
1172
|
*/
|
|
@@ -1013,9 +1174,22 @@ ${recommendation}
|
|
|
1013
1174
|
// Acquire semaphore permit
|
|
1014
1175
|
await semaphore.acquire();
|
|
1015
1176
|
try {
|
|
1016
|
-
//
|
|
1017
|
-
if (
|
|
1018
|
-
|
|
1177
|
+
// NEW: In pre-allocation mode, tokens are already allocated, just verify and mark as processing
|
|
1178
|
+
if (tokenBudget.isPreAllocationMode()) {
|
|
1179
|
+
const batchState = tokenBudget.getBatchState(batchIndex);
|
|
1180
|
+
if (batchState !== "pending") {
|
|
1181
|
+
throw new Error(`Batch ${batchIndex + 1} is not in pending state (current: ${batchState})`);
|
|
1182
|
+
}
|
|
1183
|
+
// Mark as processing (this is handled in allocateForBatch for pre-allocation mode)
|
|
1184
|
+
if (!tokenBudget.allocateForBatch(batchIndex, batch.estimatedTokens)) {
|
|
1185
|
+
throw new Error(`Failed to mark batch ${batchIndex + 1} as processing`);
|
|
1186
|
+
}
|
|
1187
|
+
}
|
|
1188
|
+
else {
|
|
1189
|
+
// Legacy mode: allocate tokens dynamically
|
|
1190
|
+
if (!tokenBudget.allocateForBatch(batchIndex, batch.estimatedTokens)) {
|
|
1191
|
+
throw new Error(`Insufficient token budget for batch ${batchIndex + 1}`);
|
|
1192
|
+
}
|
|
1019
1193
|
}
|
|
1020
1194
|
logger.info(`🔄 Processing batch ${batchIndex + 1}/${totalBatches} (${batch.files.length} files, parallel)`);
|
|
1021
1195
|
// Process the batch (existing logic)
|
|
@@ -1023,6 +1197,11 @@ ${recommendation}
|
|
|
1023
1197
|
logger.info(`✅ Batch ${batchIndex + 1} completed: ${result.violations.length} violations in ${Math.round(result.processingTime / 1000)}s`);
|
|
1024
1198
|
return result;
|
|
1025
1199
|
}
|
|
1200
|
+
catch (error) {
|
|
1201
|
+
// Mark batch as failed in token budget
|
|
1202
|
+
tokenBudget.markBatchFailed(batchIndex, error.message);
|
|
1203
|
+
throw error;
|
|
1204
|
+
}
|
|
1026
1205
|
finally {
|
|
1027
1206
|
// Always release resources
|
|
1028
1207
|
tokenBudget.releaseBatch(batchIndex);
|
|
@@ -237,8 +237,9 @@ export class MultiInstanceProcessor {
|
|
|
237
237
|
minLimit = Math.min(minLimit, instanceLimit);
|
|
238
238
|
}
|
|
239
239
|
// Total budget is the sum of all instance limits, but with safety margin
|
|
240
|
-
|
|
241
|
-
|
|
240
|
+
// Use Math.floor to ensure integer result and avoid floating-point precision issues
|
|
241
|
+
const totalBudget = Math.floor(instances.length * minLimit * 0.8); // 80% safety margin
|
|
242
|
+
logger.debug(`Calculated total token budget: ${totalBudget} (${instances.length} instances × ${minLimit} × 0.8, floored)`);
|
|
242
243
|
return totalBudget;
|
|
243
244
|
}
|
|
244
245
|
/**
|
package/dist/types/index.d.ts
CHANGED
|
@@ -569,6 +569,7 @@ export interface TokenBudgetManagerInterface {
|
|
|
569
569
|
getAvailableBudget(): number;
|
|
570
570
|
getTotalBudget(): number;
|
|
571
571
|
getUsedTokens(): number;
|
|
572
|
+
preAllocateAllBatches(allocations: Map<number, number>): boolean;
|
|
572
573
|
}
|
|
573
574
|
export declare class GuardianError extends Error {
|
|
574
575
|
code: string;
|
|
@@ -584,4 +585,40 @@ export declare class ProviderError extends GuardianError {
|
|
|
584
585
|
export declare class ValidationError extends GuardianError {
|
|
585
586
|
constructor(message: string, context?: any);
|
|
586
587
|
}
|
|
588
|
+
export declare enum CacheErrorCode {
|
|
589
|
+
CACHE_SYSTEM_FAILURE = "CACHE_SYSTEM_FAILURE",
|
|
590
|
+
CACHE_MEMORY_EXHAUSTED = "CACHE_MEMORY_EXHAUSTED",
|
|
591
|
+
CACHE_INITIALIZATION_FAILED = "CACHE_INITIALIZATION_FAILED",
|
|
592
|
+
CACHE_STORAGE_FULL = "CACHE_STORAGE_FULL",
|
|
593
|
+
CACHE_STORAGE_PERMISSION = "CACHE_STORAGE_PERMISSION",
|
|
594
|
+
CACHE_STORAGE_CORRUPTION = "CACHE_STORAGE_CORRUPTION",
|
|
595
|
+
CACHE_NETWORK_CONNECTION = "CACHE_NETWORK_CONNECTION",
|
|
596
|
+
CACHE_NETWORK_TIMEOUT = "CACHE_NETWORK_TIMEOUT",
|
|
597
|
+
CACHE_NETWORK_AUTH = "CACHE_NETWORK_AUTH",
|
|
598
|
+
CACHE_CONFIG_INVALID = "CACHE_CONFIG_INVALID",
|
|
599
|
+
CACHE_CONFIG_MISSING = "CACHE_CONFIG_MISSING",
|
|
600
|
+
CACHE_OPERATION_FAILED = "CACHE_OPERATION_FAILED",
|
|
601
|
+
CACHE_SERIALIZATION_ERROR = "CACHE_SERIALIZATION_ERROR",
|
|
602
|
+
CACHE_KEY_INVALID = "CACHE_KEY_INVALID"
|
|
603
|
+
}
|
|
604
|
+
export declare abstract class CacheError extends GuardianError {
|
|
605
|
+
operation?: string | undefined;
|
|
606
|
+
key?: string | undefined;
|
|
607
|
+
constructor(code: CacheErrorCode, message: string, operation?: string | undefined, key?: string | undefined, context?: any);
|
|
608
|
+
}
|
|
609
|
+
export declare class CacheSystemError extends CacheError {
|
|
610
|
+
constructor(message: string, operation?: string, key?: string, context?: any);
|
|
611
|
+
}
|
|
612
|
+
export declare class CacheStorageError extends CacheError {
|
|
613
|
+
constructor(code: CacheErrorCode | undefined, message: string, operation?: string, key?: string, context?: any);
|
|
614
|
+
}
|
|
615
|
+
export declare class CacheNetworkError extends CacheError {
|
|
616
|
+
constructor(code: CacheErrorCode | undefined, message: string, operation?: string, key?: string, context?: any);
|
|
617
|
+
}
|
|
618
|
+
export declare class CacheConfigurationError extends CacheError {
|
|
619
|
+
constructor(code: CacheErrorCode | undefined, message: string, operation?: string, key?: string, context?: any);
|
|
620
|
+
}
|
|
621
|
+
export declare class CacheOperationError extends CacheError {
|
|
622
|
+
constructor(code: CacheErrorCode | undefined, message: string, operation?: string, key?: string, context?: any);
|
|
623
|
+
}
|
|
587
624
|
//# sourceMappingURL=index.d.ts.map
|
package/dist/types/index.js
CHANGED
|
@@ -34,6 +34,71 @@ export class ValidationError extends GuardianError {
|
|
|
34
34
|
}
|
|
35
35
|
}
|
|
36
36
|
// ============================================================================
|
|
37
|
+
// Cache Error Types
|
|
38
|
+
// ============================================================================
|
|
39
|
+
export var CacheErrorCode;
|
|
40
|
+
(function (CacheErrorCode) {
|
|
41
|
+
// System-level cache errors
|
|
42
|
+
CacheErrorCode["CACHE_SYSTEM_FAILURE"] = "CACHE_SYSTEM_FAILURE";
|
|
43
|
+
CacheErrorCode["CACHE_MEMORY_EXHAUSTED"] = "CACHE_MEMORY_EXHAUSTED";
|
|
44
|
+
CacheErrorCode["CACHE_INITIALIZATION_FAILED"] = "CACHE_INITIALIZATION_FAILED";
|
|
45
|
+
// Storage-related errors
|
|
46
|
+
CacheErrorCode["CACHE_STORAGE_FULL"] = "CACHE_STORAGE_FULL";
|
|
47
|
+
CacheErrorCode["CACHE_STORAGE_PERMISSION"] = "CACHE_STORAGE_PERMISSION";
|
|
48
|
+
CacheErrorCode["CACHE_STORAGE_CORRUPTION"] = "CACHE_STORAGE_CORRUPTION";
|
|
49
|
+
// Network-related errors (for future Redis support)
|
|
50
|
+
CacheErrorCode["CACHE_NETWORK_CONNECTION"] = "CACHE_NETWORK_CONNECTION";
|
|
51
|
+
CacheErrorCode["CACHE_NETWORK_TIMEOUT"] = "CACHE_NETWORK_TIMEOUT";
|
|
52
|
+
CacheErrorCode["CACHE_NETWORK_AUTH"] = "CACHE_NETWORK_AUTH";
|
|
53
|
+
// Configuration errors
|
|
54
|
+
CacheErrorCode["CACHE_CONFIG_INVALID"] = "CACHE_CONFIG_INVALID";
|
|
55
|
+
CacheErrorCode["CACHE_CONFIG_MISSING"] = "CACHE_CONFIG_MISSING";
|
|
56
|
+
// Operation errors
|
|
57
|
+
CacheErrorCode["CACHE_OPERATION_FAILED"] = "CACHE_OPERATION_FAILED";
|
|
58
|
+
CacheErrorCode["CACHE_SERIALIZATION_ERROR"] = "CACHE_SERIALIZATION_ERROR";
|
|
59
|
+
CacheErrorCode["CACHE_KEY_INVALID"] = "CACHE_KEY_INVALID";
|
|
60
|
+
})(CacheErrorCode || (CacheErrorCode = {}));
|
|
61
|
+
export class CacheError extends GuardianError {
|
|
62
|
+
operation;
|
|
63
|
+
key;
|
|
64
|
+
constructor(code, message, operation, key, context) {
|
|
65
|
+
super(code, message, context);
|
|
66
|
+
this.operation = operation;
|
|
67
|
+
this.key = key;
|
|
68
|
+
this.name = "CacheError";
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
export class CacheSystemError extends CacheError {
|
|
72
|
+
constructor(message, operation, key, context) {
|
|
73
|
+
super(CacheErrorCode.CACHE_SYSTEM_FAILURE, message, operation, key, context);
|
|
74
|
+
this.name = "CacheSystemError";
|
|
75
|
+
}
|
|
76
|
+
}
|
|
77
|
+
export class CacheStorageError extends CacheError {
|
|
78
|
+
constructor(code = CacheErrorCode.CACHE_STORAGE_FULL, message, operation, key, context) {
|
|
79
|
+
super(code, message, operation, key, context);
|
|
80
|
+
this.name = "CacheStorageError";
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
export class CacheNetworkError extends CacheError {
|
|
84
|
+
constructor(code = CacheErrorCode.CACHE_NETWORK_CONNECTION, message, operation, key, context) {
|
|
85
|
+
super(code, message, operation, key, context);
|
|
86
|
+
this.name = "CacheNetworkError";
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
export class CacheConfigurationError extends CacheError {
|
|
90
|
+
constructor(code = CacheErrorCode.CACHE_CONFIG_INVALID, message, operation, key, context) {
|
|
91
|
+
super(code, message, operation, key, context);
|
|
92
|
+
this.name = "CacheConfigurationError";
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
export class CacheOperationError extends CacheError {
|
|
96
|
+
constructor(code = CacheErrorCode.CACHE_OPERATION_FAILED, message, operation, key, context) {
|
|
97
|
+
super(code, message, operation, key, context);
|
|
98
|
+
this.name = "CacheOperationError";
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
// ============================================================================
|
|
37
102
|
// Export all types - Main file, no re-exports needed
|
|
38
103
|
// ============================================================================
|
|
39
104
|
//# sourceMappingURL=index.js.map
|
package/dist/utils/Cache.d.ts
CHANGED
|
@@ -8,7 +8,7 @@ export declare class Cache implements ICache {
|
|
|
8
8
|
private statsData;
|
|
9
9
|
constructor(options?: CacheOptions);
|
|
10
10
|
/**
|
|
11
|
-
* Get value from cache
|
|
11
|
+
* Get value from cache with resilient error handling
|
|
12
12
|
*/
|
|
13
13
|
get<T>(key: string): T | undefined;
|
|
14
14
|
/**
|
|
@@ -39,15 +39,21 @@ export declare class Cache implements ICache {
|
|
|
39
39
|
misses: number;
|
|
40
40
|
keys: number;
|
|
41
41
|
size: number;
|
|
42
|
+
cacheErrors: number;
|
|
43
|
+
nonCacheErrors: number;
|
|
42
44
|
};
|
|
43
45
|
/**
|
|
44
46
|
* Get detailed cache statistics from node-cache
|
|
45
47
|
*/
|
|
46
48
|
getDetailedStats(): any;
|
|
47
49
|
/**
|
|
48
|
-
* Get or set pattern
|
|
50
|
+
* Get or set pattern with automatic fallback on cache failures
|
|
49
51
|
*/
|
|
50
52
|
getOrSet<T>(key: string, fetchFn: () => Promise<T>, ttl?: number): Promise<T>;
|
|
53
|
+
/**
|
|
54
|
+
* Resilient get or set pattern that bypasses cache entirely on cache system failures
|
|
55
|
+
*/
|
|
56
|
+
getOrSetResilient<T>(key: string, fetchFn: () => Promise<T>, ttl?: number): Promise<T>;
|
|
51
57
|
/**
|
|
52
58
|
* Cache with tags for group invalidation
|
|
53
59
|
*/
|
package/dist/utils/Cache.js
CHANGED
|
@@ -3,12 +3,146 @@
|
|
|
3
3
|
* Provides intelligent caching for PR data, file contents, and AI responses
|
|
4
4
|
*/
|
|
5
5
|
import NodeCache from "node-cache";
|
|
6
|
+
import { CacheError, } from "../types/index.js";
|
|
6
7
|
import { logger } from "./Logger.js";
|
|
8
|
+
/**
|
|
9
|
+
* Enhanced cache error detection utility
|
|
10
|
+
* Provides multi-layer error classification to avoid false positives
|
|
11
|
+
*/
|
|
12
|
+
class CacheErrorDetector {
|
|
13
|
+
/**
|
|
14
|
+
* Detect if an error is cache-related using multiple strategies
|
|
15
|
+
*/
|
|
16
|
+
static isCacheError(error, operation, key) {
|
|
17
|
+
// Strategy 1: Check error type/class (most reliable)
|
|
18
|
+
if (error instanceof CacheError) {
|
|
19
|
+
return true;
|
|
20
|
+
}
|
|
21
|
+
// Strategy 2: Check for specific cache error patterns in NodeCache
|
|
22
|
+
if (error instanceof Error) {
|
|
23
|
+
const errorMessage = error.message.toLowerCase();
|
|
24
|
+
const stackTrace = error.stack?.toLowerCase() || "";
|
|
25
|
+
// Check for NodeCache-specific error patterns
|
|
26
|
+
const nodeCachePatterns = [
|
|
27
|
+
/node_modules\/node-cache/,
|
|
28
|
+
/cache\.js:\d+/,
|
|
29
|
+
/nodecache/,
|
|
30
|
+
];
|
|
31
|
+
const isNodeCacheError = nodeCachePatterns.some((pattern) => pattern.test(stackTrace));
|
|
32
|
+
if (isNodeCacheError) {
|
|
33
|
+
return true;
|
|
34
|
+
}
|
|
35
|
+
// Strategy 3: Check for specific cache-related error messages (more targeted)
|
|
36
|
+
const cacheSpecificPatterns = [
|
|
37
|
+
/cache.*(?:full|exhausted|limit)/,
|
|
38
|
+
/memory.*(?:cache|allocation).*(?:failed|error)/,
|
|
39
|
+
/storage.*(?:cache|quota).*(?:exceeded|full)/,
|
|
40
|
+
/cache.*(?:initialization|setup).*(?:failed|error)/,
|
|
41
|
+
/ttl.*(?:invalid|expired)/,
|
|
42
|
+
/cache.*(?:key|value).*(?:invalid|malformed)/,
|
|
43
|
+
];
|
|
44
|
+
const hasCacheSpecificError = cacheSpecificPatterns.some((pattern) => pattern.test(errorMessage));
|
|
45
|
+
if (hasCacheSpecificError) {
|
|
46
|
+
return true;
|
|
47
|
+
}
|
|
48
|
+
// Strategy 4: Context-aware detection
|
|
49
|
+
if (operation && key) {
|
|
50
|
+
// If we're in a cache operation and get memory/storage errors, likely cache-related
|
|
51
|
+
const cacheOperations = [
|
|
52
|
+
"get",
|
|
53
|
+
"set",
|
|
54
|
+
"del",
|
|
55
|
+
"clear",
|
|
56
|
+
"has",
|
|
57
|
+
"getorset",
|
|
58
|
+
"getorsetresilient",
|
|
59
|
+
];
|
|
60
|
+
const isCacheOperation = cacheOperations.includes(operation.toLowerCase());
|
|
61
|
+
const contextualPatterns = [
|
|
62
|
+
/^out of memory$/,
|
|
63
|
+
/storage quota exceeded/,
|
|
64
|
+
/disk full/,
|
|
65
|
+
];
|
|
66
|
+
if (isCacheOperation &&
|
|
67
|
+
contextualPatterns.some((pattern) => pattern.test(errorMessage))) {
|
|
68
|
+
return true;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
return false;
|
|
73
|
+
}
|
|
74
|
+
/**
|
|
75
|
+
* Classify cache error for better handling and logging
|
|
76
|
+
*/
|
|
77
|
+
static classifyError(error, operation, key) {
|
|
78
|
+
if (!this.isCacheError(error, operation, key)) {
|
|
79
|
+
return {
|
|
80
|
+
isCache: false,
|
|
81
|
+
category: "unknown",
|
|
82
|
+
confidence: "high",
|
|
83
|
+
reason: "Not identified as cache-related error",
|
|
84
|
+
};
|
|
85
|
+
}
|
|
86
|
+
if (error instanceof CacheError) {
|
|
87
|
+
const category = error.code.includes("STORAGE")
|
|
88
|
+
? "storage"
|
|
89
|
+
: error.code.includes("NETWORK")
|
|
90
|
+
? "network"
|
|
91
|
+
: error.code.includes("SYSTEM")
|
|
92
|
+
? "system"
|
|
93
|
+
: "operation";
|
|
94
|
+
return {
|
|
95
|
+
isCache: true,
|
|
96
|
+
category,
|
|
97
|
+
confidence: "high",
|
|
98
|
+
reason: `Explicit cache error: ${error.code}`,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
if (error instanceof Error) {
|
|
102
|
+
const message = error.message.toLowerCase();
|
|
103
|
+
const stack = error.stack?.toLowerCase() || "";
|
|
104
|
+
// High confidence patterns
|
|
105
|
+
if (/node_modules\/node-cache/.test(stack)) {
|
|
106
|
+
return {
|
|
107
|
+
isCache: true,
|
|
108
|
+
category: "system",
|
|
109
|
+
confidence: "high",
|
|
110
|
+
reason: "NodeCache stack trace detected",
|
|
111
|
+
};
|
|
112
|
+
}
|
|
113
|
+
// Medium confidence patterns
|
|
114
|
+
if (/cache.*(?:full|exhausted)/.test(message)) {
|
|
115
|
+
return {
|
|
116
|
+
isCache: true,
|
|
117
|
+
category: "storage",
|
|
118
|
+
confidence: "medium",
|
|
119
|
+
reason: "Cache capacity error pattern",
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
if (/memory.*cache.*failed/.test(message)) {
|
|
123
|
+
return {
|
|
124
|
+
isCache: true,
|
|
125
|
+
category: "system",
|
|
126
|
+
confidence: "medium",
|
|
127
|
+
reason: "Memory allocation error in cache context",
|
|
128
|
+
};
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
return {
|
|
132
|
+
isCache: true,
|
|
133
|
+
category: "unknown",
|
|
134
|
+
confidence: "low",
|
|
135
|
+
reason: "Fallback detection",
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
}
|
|
7
139
|
export class Cache {
|
|
8
140
|
cache;
|
|
9
141
|
statsData = {
|
|
10
142
|
hits: 0,
|
|
11
143
|
misses: 0,
|
|
144
|
+
cacheErrors: 0,
|
|
145
|
+
nonCacheErrors: 0,
|
|
12
146
|
};
|
|
13
147
|
constructor(options = {}) {
|
|
14
148
|
const { ttl = 3600, // 1 hour default
|
|
@@ -33,18 +167,25 @@ export class Cache {
|
|
|
33
167
|
});
|
|
34
168
|
}
|
|
35
169
|
/**
|
|
36
|
-
* Get value from cache
|
|
170
|
+
* Get value from cache with resilient error handling
|
|
37
171
|
*/
|
|
38
172
|
get(key) {
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
173
|
+
try {
|
|
174
|
+
const value = this.cache.get(key);
|
|
175
|
+
if (value !== undefined) {
|
|
176
|
+
this.statsData.hits++;
|
|
177
|
+
logger.debug(`Cache HIT: ${key}`);
|
|
178
|
+
return value;
|
|
179
|
+
}
|
|
180
|
+
else {
|
|
181
|
+
this.statsData.misses++;
|
|
182
|
+
logger.debug(`Cache MISS: ${key}`);
|
|
183
|
+
return undefined;
|
|
184
|
+
}
|
|
44
185
|
}
|
|
45
|
-
|
|
186
|
+
catch (error) {
|
|
46
187
|
this.statsData.misses++;
|
|
47
|
-
logger.
|
|
188
|
+
logger.warn(`Cache GET error for ${key}, treating as miss:`, error);
|
|
48
189
|
return undefined;
|
|
49
190
|
}
|
|
50
191
|
}
|
|
@@ -105,6 +246,8 @@ export class Cache {
|
|
|
105
246
|
misses: this.statsData.misses,
|
|
106
247
|
keys: this.cache.keys().length,
|
|
107
248
|
size: this.cache.getStats().keys,
|
|
249
|
+
cacheErrors: this.statsData.cacheErrors,
|
|
250
|
+
nonCacheErrors: this.statsData.nonCacheErrors,
|
|
108
251
|
};
|
|
109
252
|
}
|
|
110
253
|
/**
|
|
@@ -114,9 +257,10 @@ export class Cache {
|
|
|
114
257
|
return this.cache.getStats();
|
|
115
258
|
}
|
|
116
259
|
/**
|
|
117
|
-
* Get or set pattern
|
|
260
|
+
* Get or set pattern with automatic fallback on cache failures
|
|
118
261
|
*/
|
|
119
262
|
async getOrSet(key, fetchFn, ttl) {
|
|
263
|
+
// Try to get from cache with resilient error handling
|
|
120
264
|
const cached = this.get(key);
|
|
121
265
|
if (cached !== undefined) {
|
|
122
266
|
return cached;
|
|
@@ -124,7 +268,13 @@ export class Cache {
|
|
|
124
268
|
try {
|
|
125
269
|
logger.debug(`Cache FETCH: ${key}`);
|
|
126
270
|
const value = await fetchFn();
|
|
127
|
-
|
|
271
|
+
// Try to cache the result, but don't fail if caching fails
|
|
272
|
+
try {
|
|
273
|
+
this.set(key, value, ttl);
|
|
274
|
+
}
|
|
275
|
+
catch (cacheError) {
|
|
276
|
+
logger.warn(`Cache SET failed for ${key}, continuing without cache:`, cacheError);
|
|
277
|
+
}
|
|
128
278
|
return value;
|
|
129
279
|
}
|
|
130
280
|
catch (error) {
|
|
@@ -132,6 +282,36 @@ export class Cache {
|
|
|
132
282
|
throw error;
|
|
133
283
|
}
|
|
134
284
|
}
|
|
285
|
+
/**
|
|
286
|
+
* Resilient get or set pattern that bypasses cache entirely on cache system failures
|
|
287
|
+
*/
|
|
288
|
+
async getOrSetResilient(key, fetchFn, ttl) {
|
|
289
|
+
try {
|
|
290
|
+
// Try normal cache flow first
|
|
291
|
+
return await this.getOrSet(key, fetchFn, ttl);
|
|
292
|
+
}
|
|
293
|
+
catch (error) {
|
|
294
|
+
// Use enhanced error detection to determine if this is a cache-related error
|
|
295
|
+
const errorClassification = CacheErrorDetector.classifyError(error, "getOrSet", key);
|
|
296
|
+
if (errorClassification.isCache) {
|
|
297
|
+
// Track cache error statistics
|
|
298
|
+
this.statsData.cacheErrors++;
|
|
299
|
+
logger.warn(`Cache system error detected for ${key} (${errorClassification.confidence} confidence: ${errorClassification.reason}), bypassing cache entirely`, {
|
|
300
|
+
error: error instanceof Error ? error.message : String(error),
|
|
301
|
+
category: errorClassification.category,
|
|
302
|
+
confidence: errorClassification.confidence,
|
|
303
|
+
key,
|
|
304
|
+
operation: "getOrSet",
|
|
305
|
+
});
|
|
306
|
+
// Bypass cache completely and just fetch the data
|
|
307
|
+
return await fetchFn();
|
|
308
|
+
}
|
|
309
|
+
// Track non-cache errors for debugging
|
|
310
|
+
this.statsData.nonCacheErrors++;
|
|
311
|
+
// Re-throw non-cache errors
|
|
312
|
+
throw error;
|
|
313
|
+
}
|
|
314
|
+
}
|
|
135
315
|
/**
|
|
136
316
|
* Cache with tags for group invalidation
|
|
137
317
|
*/
|
|
@@ -46,6 +46,9 @@ export declare class TokenBudgetManager implements TokenBudgetManagerInterface {
|
|
|
46
46
|
private usedTokens;
|
|
47
47
|
private batchAllocations;
|
|
48
48
|
private reservedTokens;
|
|
49
|
+
private preAllocationMode;
|
|
50
|
+
private preAllocatedBatches;
|
|
51
|
+
private batchStates;
|
|
49
52
|
constructor(totalBudget: number);
|
|
50
53
|
/**
|
|
51
54
|
* Allocate tokens for a specific batch
|
|
@@ -92,6 +95,31 @@ export declare class TokenBudgetManager implements TokenBudgetManagerInterface {
|
|
|
92
95
|
* Reset the budget manager (for testing or reuse)
|
|
93
96
|
*/
|
|
94
97
|
reset(): void;
|
|
98
|
+
/**
|
|
99
|
+
* Pre-allocate tokens for all batches upfront
|
|
100
|
+
* This ensures all batches have guaranteed token allocation before processing starts
|
|
101
|
+
*/
|
|
102
|
+
preAllocateAllBatches(allocations: Map<number, number>): boolean;
|
|
103
|
+
/**
|
|
104
|
+
* Mark a batch as failed and handle cleanup
|
|
105
|
+
*/
|
|
106
|
+
markBatchFailed(batchIndex: number, error?: string): void;
|
|
107
|
+
/**
|
|
108
|
+
* Get the current state of a batch
|
|
109
|
+
*/
|
|
110
|
+
getBatchState(batchIndex: number): "pending" | "processing" | "completed" | "failed" | undefined;
|
|
111
|
+
/**
|
|
112
|
+
* Check if pre-allocation mode is active
|
|
113
|
+
*/
|
|
114
|
+
isPreAllocationMode(): boolean;
|
|
115
|
+
/**
|
|
116
|
+
* Get all batch states for debugging
|
|
117
|
+
*/
|
|
118
|
+
getAllBatchStates(): Map<number, "pending" | "processing" | "completed" | "failed">;
|
|
119
|
+
/**
|
|
120
|
+
* Disable pre-allocation mode and clean up
|
|
121
|
+
*/
|
|
122
|
+
disablePreAllocationMode(): void;
|
|
95
123
|
/**
|
|
96
124
|
* Update the total budget (useful for dynamic adjustment)
|
|
97
125
|
*/
|
|
@@ -77,12 +77,18 @@ export class TokenBudgetManager {
|
|
|
77
77
|
usedTokens = 0;
|
|
78
78
|
batchAllocations = new Map();
|
|
79
79
|
reservedTokens = 0; // Tokens allocated but not yet used
|
|
80
|
+
// NEW: Pre-allocation mode tracking
|
|
81
|
+
preAllocationMode = false;
|
|
82
|
+
preAllocatedBatches = new Set();
|
|
83
|
+
batchStates = new Map();
|
|
80
84
|
constructor(totalBudget) {
|
|
81
85
|
if (totalBudget <= 0) {
|
|
82
86
|
throw new Error("Token budget must be greater than 0");
|
|
83
87
|
}
|
|
84
|
-
|
|
85
|
-
|
|
88
|
+
// Floor the budget to ensure integer arithmetic and avoid floating-point precision issues.
|
|
89
|
+
// The fractional part is discarded, so the budget is always rounded down.
|
|
90
|
+
this.totalBudget = Math.floor(totalBudget);
|
|
91
|
+
logger.debug(`TokenBudgetManager created with budget of ${this.totalBudget} tokens (original: ${totalBudget})`);
|
|
86
92
|
}
|
|
87
93
|
/**
|
|
88
94
|
* Allocate tokens for a specific batch
|
|
@@ -93,8 +99,31 @@ export class TokenBudgetManager {
|
|
|
93
99
|
logger.warn(`Invalid token estimate for batch ${batchIndex}: ${estimatedTokens}`);
|
|
94
100
|
return false;
|
|
95
101
|
}
|
|
96
|
-
//
|
|
102
|
+
// NEW: Handle pre-allocation mode
|
|
103
|
+
if (this.preAllocationMode && this.preAllocatedBatches.has(batchIndex)) {
|
|
104
|
+
// Check if batch is already being processed
|
|
105
|
+
const currentState = this.batchStates.get(batchIndex);
|
|
106
|
+
if (currentState === "processing") {
|
|
107
|
+
logger.warn(`Batch ${batchIndex} is already being processed`);
|
|
108
|
+
return false;
|
|
109
|
+
}
|
|
110
|
+
if (currentState !== "pending") {
|
|
111
|
+
logger.warn(`Batch ${batchIndex} is not in pending state (current: ${currentState})`);
|
|
112
|
+
return false;
|
|
113
|
+
}
|
|
114
|
+
// In pre-allocation mode, just mark batch as processing and return success
|
|
115
|
+
this.batchStates.set(batchIndex, "processing");
|
|
116
|
+
logger.debug(`Batch ${batchIndex} using pre-allocated tokens (${this.batchAllocations.get(batchIndex)} tokens)`);
|
|
117
|
+
return true;
|
|
118
|
+
}
|
|
119
|
+
// Check if we already have an allocation for this batch (non-pre-allocation mode)
|
|
97
120
|
if (this.batchAllocations.has(batchIndex)) {
|
|
121
|
+
// Check if batch is already being processed
|
|
122
|
+
const currentState = this.batchStates.get(batchIndex);
|
|
123
|
+
if (currentState === "processing") {
|
|
124
|
+
logger.warn(`Batch ${batchIndex} is already being processed`);
|
|
125
|
+
return false;
|
|
126
|
+
}
|
|
98
127
|
logger.warn(`Batch ${batchIndex} already has token allocation`);
|
|
99
128
|
return false;
|
|
100
129
|
}
|
|
@@ -108,6 +137,7 @@ export class TokenBudgetManager {
|
|
|
108
137
|
// Allocate the tokens
|
|
109
138
|
this.reservedTokens += estimatedTokens;
|
|
110
139
|
this.batchAllocations.set(batchIndex, estimatedTokens);
|
|
140
|
+
this.batchStates.set(batchIndex, "processing");
|
|
111
141
|
logger.debug(`Allocated ${estimatedTokens} tokens for batch ${batchIndex} ` +
|
|
112
142
|
`(${this.getAvailableBudget()} remaining)`);
|
|
113
143
|
return true;
|
|
@@ -122,10 +152,19 @@ export class TokenBudgetManager {
|
|
|
122
152
|
logger.warn(`No token allocation found for batch ${batchIndex}`);
|
|
123
153
|
return;
|
|
124
154
|
}
|
|
155
|
+
// Update batch state to completed only if not already failed
|
|
156
|
+
const currentState = this.batchStates.get(batchIndex);
|
|
157
|
+
if (currentState !== "failed") {
|
|
158
|
+
this.batchStates.set(batchIndex, "completed");
|
|
159
|
+
}
|
|
125
160
|
// Move from reserved to used (assuming the tokens were actually used)
|
|
126
161
|
this.reservedTokens -= allocated;
|
|
127
162
|
this.usedTokens += allocated;
|
|
128
163
|
this.batchAllocations.delete(batchIndex);
|
|
164
|
+
// Clean up pre-allocation tracking
|
|
165
|
+
if (this.preAllocationMode) {
|
|
166
|
+
this.preAllocatedBatches.delete(batchIndex);
|
|
167
|
+
}
|
|
129
168
|
logger.debug(`Released ${allocated} tokens from batch ${batchIndex} ` +
|
|
130
169
|
`(${this.getAvailableBudget()} now available)`);
|
|
131
170
|
}
|
|
@@ -182,6 +221,72 @@ export class TokenBudgetManager {
|
|
|
182
221
|
this.batchAllocations.clear();
|
|
183
222
|
logger.debug("TokenBudgetManager reset");
|
|
184
223
|
}
|
|
224
|
+
/**
|
|
225
|
+
* Pre-allocate tokens for all batches upfront
|
|
226
|
+
* This ensures all batches have guaranteed token allocation before processing starts
|
|
227
|
+
*/
|
|
228
|
+
preAllocateAllBatches(allocations) {
|
|
229
|
+
const totalRequired = Array.from(allocations.values()).reduce((sum, tokens) => sum + tokens, 0);
|
|
230
|
+
if (totalRequired > this.totalBudget) {
|
|
231
|
+
logger.error(`Pre-allocation failed: total required (${totalRequired}) exceeds budget (${this.totalBudget})`);
|
|
232
|
+
return false;
|
|
233
|
+
}
|
|
234
|
+
// Clear any existing allocations and reset state
|
|
235
|
+
this.batchAllocations.clear();
|
|
236
|
+
this.reservedTokens = 0;
|
|
237
|
+
this.batchStates.clear();
|
|
238
|
+
this.preAllocatedBatches.clear();
|
|
239
|
+
// Enable pre-allocation mode
|
|
240
|
+
this.preAllocationMode = true;
|
|
241
|
+
// Reserve all tokens upfront
|
|
242
|
+
allocations.forEach((tokens, batchIndex) => {
|
|
243
|
+
this.batchAllocations.set(batchIndex, tokens);
|
|
244
|
+
this.reservedTokens += tokens;
|
|
245
|
+
this.preAllocatedBatches.add(batchIndex);
|
|
246
|
+
this.batchStates.set(batchIndex, "pending");
|
|
247
|
+
});
|
|
248
|
+
logger.info(`Pre-allocated ${totalRequired} tokens across ${allocations.size} batches ` +
|
|
249
|
+
`(${this.getAvailableBudget()} remaining)`);
|
|
250
|
+
return true;
|
|
251
|
+
}
|
|
252
|
+
/**
|
|
253
|
+
* Mark a batch as failed and handle cleanup
|
|
254
|
+
*/
|
|
255
|
+
markBatchFailed(batchIndex, error) {
|
|
256
|
+
this.batchStates.set(batchIndex, "failed");
|
|
257
|
+
if (error) {
|
|
258
|
+
logger.debug(`Batch ${batchIndex} marked as failed: ${error}`);
|
|
259
|
+
}
|
|
260
|
+
else {
|
|
261
|
+
logger.debug(`Batch ${batchIndex} marked as failed`);
|
|
262
|
+
}
|
|
263
|
+
}
|
|
264
|
+
/**
|
|
265
|
+
* Get the current state of a batch
|
|
266
|
+
*/
|
|
267
|
+
getBatchState(batchIndex) {
|
|
268
|
+
return this.batchStates.get(batchIndex);
|
|
269
|
+
}
|
|
270
|
+
/**
|
|
271
|
+
* Check if pre-allocation mode is active
|
|
272
|
+
*/
|
|
273
|
+
isPreAllocationMode() {
|
|
274
|
+
return this.preAllocationMode;
|
|
275
|
+
}
|
|
276
|
+
/**
|
|
277
|
+
* Get all batch states for debugging
|
|
278
|
+
*/
|
|
279
|
+
getAllBatchStates() {
|
|
280
|
+
return new Map(this.batchStates);
|
|
281
|
+
}
|
|
282
|
+
/**
|
|
283
|
+
* Disable pre-allocation mode and clean up
|
|
284
|
+
*/
|
|
285
|
+
disablePreAllocationMode() {
|
|
286
|
+
this.preAllocationMode = false;
|
|
287
|
+
this.preAllocatedBatches.clear();
|
|
288
|
+
logger.debug("Pre-allocation mode disabled");
|
|
289
|
+
}
|
|
185
290
|
/**
|
|
186
291
|
* Update the total budget (useful for dynamic adjustment)
|
|
187
292
|
*/
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Retry Manager for Yama
|
|
3
|
+
* Provides intelligent retry logic with exponential backoff for handling transient failures
|
|
4
|
+
*/
|
|
5
|
+
export interface RetryOptions {
|
|
6
|
+
maxAttempts?: number;
|
|
7
|
+
baseDelayMs?: number;
|
|
8
|
+
maxDelayMs?: number;
|
|
9
|
+
backoffMultiplier?: number;
|
|
10
|
+
jitterMs?: number;
|
|
11
|
+
retryableErrors?: string[];
|
|
12
|
+
}
|
|
13
|
+
export interface RetryContext {
|
|
14
|
+
operation: string;
|
|
15
|
+
attempt: number;
|
|
16
|
+
maxAttempts: number;
|
|
17
|
+
lastError?: Error;
|
|
18
|
+
totalElapsed: number;
|
|
19
|
+
}
|
|
20
|
+
export declare class RetryManager {
|
|
21
|
+
private static readonly DEFAULT_OPTIONS;
|
|
22
|
+
/**
|
|
23
|
+
* Execute an operation with retry logic
|
|
24
|
+
*/
|
|
25
|
+
static withRetry<T>(operation: () => Promise<T>, context: string, options?: RetryOptions): Promise<T>;
|
|
26
|
+
/**
|
|
27
|
+
* Check if an error is retryable based on error patterns
|
|
28
|
+
*/
|
|
29
|
+
private static isRetryableError;
|
|
30
|
+
/**
|
|
31
|
+
* Calculate delay with exponential backoff and jitter
|
|
32
|
+
*/
|
|
33
|
+
private static calculateDelay;
|
|
34
|
+
/**
|
|
35
|
+
* Sleep for specified milliseconds
|
|
36
|
+
*/
|
|
37
|
+
private static sleep;
|
|
38
|
+
/**
|
|
39
|
+
* Create a retry wrapper function for a specific operation
|
|
40
|
+
*/
|
|
41
|
+
static createRetryWrapper<T extends any[], R>(fn: (...args: T) => Promise<R>, context: string, options?: RetryOptions): (...args: T) => Promise<R>;
|
|
42
|
+
/**
|
|
43
|
+
* Batch retry operations with individual retry logic
|
|
44
|
+
*/
|
|
45
|
+
static batchWithRetry<T>(operations: Array<{
|
|
46
|
+
fn: () => Promise<T>;
|
|
47
|
+
context: string;
|
|
48
|
+
}>, options?: RetryOptions & {
|
|
49
|
+
continueOnError?: boolean;
|
|
50
|
+
}): Promise<Array<{
|
|
51
|
+
success: boolean;
|
|
52
|
+
data?: T;
|
|
53
|
+
error?: Error;
|
|
54
|
+
context: string;
|
|
55
|
+
}>>;
|
|
56
|
+
/**
|
|
57
|
+
* Get retry statistics for monitoring
|
|
58
|
+
*/
|
|
59
|
+
static getRetryStats(results: Array<{
|
|
60
|
+
success: boolean;
|
|
61
|
+
context: string;
|
|
62
|
+
}>): {
|
|
63
|
+
total: number;
|
|
64
|
+
successful: number;
|
|
65
|
+
failed: number;
|
|
66
|
+
successRate: number;
|
|
67
|
+
failuresByContext: Record<string, number>;
|
|
68
|
+
};
|
|
69
|
+
/**
|
|
70
|
+
* Create a circuit breaker pattern (simple implementation)
|
|
71
|
+
*/
|
|
72
|
+
static createCircuitBreaker<T extends any[], R>(fn: (...args: T) => Promise<R>, context: string, options?: {
|
|
73
|
+
failureThreshold?: number;
|
|
74
|
+
recoveryTimeoutMs?: number;
|
|
75
|
+
retryOptions?: RetryOptions;
|
|
76
|
+
}): (...args: T) => Promise<R>;
|
|
77
|
+
}
|
|
78
|
+
//# sourceMappingURL=RetryManager.d.ts.map
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Retry Manager for Yama
|
|
3
|
+
* Provides intelligent retry logic with exponential backoff for handling transient failures
|
|
4
|
+
*/
|
|
5
|
+
import { logger } from "./Logger.js";
|
|
6
|
+
export class RetryManager {
|
|
7
|
+
static DEFAULT_OPTIONS = {
|
|
8
|
+
maxAttempts: 3,
|
|
9
|
+
baseDelayMs: 1000,
|
|
10
|
+
maxDelayMs: 10000,
|
|
11
|
+
backoffMultiplier: 2.0,
|
|
12
|
+
jitterMs: 100,
|
|
13
|
+
retryableErrors: [
|
|
14
|
+
"provider_error",
|
|
15
|
+
"network",
|
|
16
|
+
"timeout",
|
|
17
|
+
"connection",
|
|
18
|
+
"econnreset",
|
|
19
|
+
"etimedout",
|
|
20
|
+
"enotfound",
|
|
21
|
+
"econnrefused",
|
|
22
|
+
"socket hang up",
|
|
23
|
+
"request timeout",
|
|
24
|
+
"service unavailable",
|
|
25
|
+
"bad gateway",
|
|
26
|
+
"gateway timeout",
|
|
27
|
+
"temporary failure",
|
|
28
|
+
"rate limit",
|
|
29
|
+
],
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Execute an operation with retry logic
|
|
33
|
+
*/
|
|
34
|
+
static async withRetry(operation, context, options = {}) {
|
|
35
|
+
const opts = { ...RetryManager.DEFAULT_OPTIONS, ...options };
|
|
36
|
+
const startTime = Date.now();
|
|
37
|
+
let lastError;
|
|
38
|
+
for (let attempt = 1; attempt <= opts.maxAttempts; attempt++) {
|
|
39
|
+
try {
|
|
40
|
+
const result = await operation();
|
|
41
|
+
if (attempt > 1) {
|
|
42
|
+
const elapsed = Date.now() - startTime;
|
|
43
|
+
logger.info(`${context} succeeded on attempt ${attempt} after ${elapsed}ms`);
|
|
44
|
+
}
|
|
45
|
+
return result;
|
|
46
|
+
}
|
|
47
|
+
catch (error) {
|
|
48
|
+
lastError = error;
|
|
49
|
+
const isLastAttempt = attempt === opts.maxAttempts;
|
|
50
|
+
const isRetryable = RetryManager.isRetryableError(lastError, opts.retryableErrors);
|
|
51
|
+
const elapsed = Date.now() - startTime;
|
|
52
|
+
const retryContext = {
|
|
53
|
+
operation: context,
|
|
54
|
+
attempt,
|
|
55
|
+
maxAttempts: opts.maxAttempts,
|
|
56
|
+
lastError,
|
|
57
|
+
totalElapsed: elapsed,
|
|
58
|
+
};
|
|
59
|
+
if (isLastAttempt || !isRetryable) {
|
|
60
|
+
if (isLastAttempt) {
|
|
61
|
+
logger.error(`${context} failed after ${opts.maxAttempts} attempts (${elapsed}ms total):`, lastError);
|
|
62
|
+
}
|
|
63
|
+
else {
|
|
64
|
+
logger.error(`${context} failed with non-retryable error:`, lastError);
|
|
65
|
+
}
|
|
66
|
+
throw lastError;
|
|
67
|
+
}
|
|
68
|
+
const delay = RetryManager.calculateDelay(attempt, opts);
|
|
69
|
+
logger.warn(`${context} failed (attempt ${attempt}/${opts.maxAttempts}), retrying in ${delay}ms:`, lastError.message);
|
|
70
|
+
await RetryManager.sleep(delay);
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
// This should never be reached, but TypeScript requires it
|
|
74
|
+
throw (lastError ||
|
|
75
|
+
new Error(`${context} failed after ${opts.maxAttempts} attempts`));
|
|
76
|
+
}
|
|
77
|
+
/**
|
|
78
|
+
* Check if an error is retryable based on error patterns
|
|
79
|
+
*/
|
|
80
|
+
static isRetryableError(error, retryablePatterns) {
|
|
81
|
+
if (!error) {
|
|
82
|
+
return false;
|
|
83
|
+
}
|
|
84
|
+
const errorMessage = error.message?.toLowerCase() || "";
|
|
85
|
+
const errorCode = error.code?.toLowerCase() || "";
|
|
86
|
+
const errorName = error.name?.toLowerCase() || "";
|
|
87
|
+
// Check if any retryable pattern matches the error
|
|
88
|
+
return retryablePatterns.some((pattern) => {
|
|
89
|
+
const lowerPattern = pattern.toLowerCase();
|
|
90
|
+
return (errorMessage.includes(lowerPattern) ||
|
|
91
|
+
errorCode.includes(lowerPattern) ||
|
|
92
|
+
errorName.includes(lowerPattern));
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
/**
|
|
96
|
+
* Calculate delay with exponential backoff and jitter
|
|
97
|
+
*/
|
|
98
|
+
static calculateDelay(attempt, options) {
|
|
99
|
+
// Exponential backoff: baseDelay * (multiplier ^ (attempt - 1))
|
|
100
|
+
const exponentialDelay = options.baseDelayMs * Math.pow(options.backoffMultiplier, attempt - 1);
|
|
101
|
+
// Apply maximum delay cap
|
|
102
|
+
const cappedDelay = Math.min(exponentialDelay, options.maxDelayMs);
|
|
103
|
+
// Add jitter to prevent thundering herd
|
|
104
|
+
const jitter = Math.random() * options.jitterMs;
|
|
105
|
+
return Math.floor(cappedDelay + jitter);
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Sleep for specified milliseconds
|
|
109
|
+
*/
|
|
110
|
+
static sleep(ms) {
|
|
111
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
112
|
+
}
|
|
113
|
+
/**
|
|
114
|
+
* Create a retry wrapper function for a specific operation
|
|
115
|
+
*/
|
|
116
|
+
static createRetryWrapper(fn, context, options = {}) {
|
|
117
|
+
return async (...args) => {
|
|
118
|
+
return RetryManager.withRetry(() => fn(...args), context, options);
|
|
119
|
+
};
|
|
120
|
+
}
|
|
121
|
+
/**
|
|
122
|
+
* Batch retry operations with individual retry logic
|
|
123
|
+
*/
|
|
124
|
+
static async batchWithRetry(operations, options = {}) {
|
|
125
|
+
const { continueOnError = true, ...retryOptions } = options;
|
|
126
|
+
const results = [];
|
|
127
|
+
for (const { fn, context } of operations) {
|
|
128
|
+
try {
|
|
129
|
+
const data = await RetryManager.withRetry(fn, context, retryOptions);
|
|
130
|
+
results.push({ success: true, data, context });
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
const err = error;
|
|
134
|
+
results.push({ success: false, error: err, context });
|
|
135
|
+
if (!continueOnError) {
|
|
136
|
+
throw error;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
return results;
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Get retry statistics for monitoring
|
|
144
|
+
*/
|
|
145
|
+
static getRetryStats(results) {
|
|
146
|
+
const total = results.length;
|
|
147
|
+
const successful = results.filter((r) => r.success).length;
|
|
148
|
+
const failed = total - successful;
|
|
149
|
+
const successRate = total > 0 ? successful / total : 0;
|
|
150
|
+
const failuresByContext = {};
|
|
151
|
+
results
|
|
152
|
+
.filter((r) => !r.success)
|
|
153
|
+
.forEach((r) => {
|
|
154
|
+
failuresByContext[r.context] = (failuresByContext[r.context] || 0) + 1;
|
|
155
|
+
});
|
|
156
|
+
return {
|
|
157
|
+
total,
|
|
158
|
+
successful,
|
|
159
|
+
failed,
|
|
160
|
+
successRate,
|
|
161
|
+
failuresByContext,
|
|
162
|
+
};
|
|
163
|
+
}
|
|
164
|
+
/**
|
|
165
|
+
* Create a circuit breaker pattern (simple implementation)
|
|
166
|
+
*/
|
|
167
|
+
static createCircuitBreaker(fn, context, options = {}) {
|
|
168
|
+
const { failureThreshold = 5, recoveryTimeoutMs = 30000, retryOptions = {}, } = options;
|
|
169
|
+
let failureCount = 0;
|
|
170
|
+
let lastFailureTime = 0;
|
|
171
|
+
let state = "CLOSED";
|
|
172
|
+
return async (...args) => {
|
|
173
|
+
const now = Date.now();
|
|
174
|
+
// Check if we should attempt recovery
|
|
175
|
+
if (state === "OPEN" && now - lastFailureTime > recoveryTimeoutMs) {
|
|
176
|
+
state = "HALF_OPEN";
|
|
177
|
+
logger.debug(`Circuit breaker for ${context} entering HALF_OPEN state`);
|
|
178
|
+
}
|
|
179
|
+
// Reject immediately if circuit is open
|
|
180
|
+
if (state === "OPEN") {
|
|
181
|
+
throw new Error(`Circuit breaker OPEN for ${context} (${failureCount} failures)`);
|
|
182
|
+
}
|
|
183
|
+
try {
|
|
184
|
+
const result = await RetryManager.withRetry(() => fn(...args), context, retryOptions);
|
|
185
|
+
// Success - reset circuit breaker
|
|
186
|
+
if (state === "HALF_OPEN") {
|
|
187
|
+
state = "CLOSED";
|
|
188
|
+
failureCount = 0;
|
|
189
|
+
logger.info(`Circuit breaker for ${context} recovered to CLOSED state`);
|
|
190
|
+
}
|
|
191
|
+
return result;
|
|
192
|
+
}
|
|
193
|
+
catch (error) {
|
|
194
|
+
failureCount++;
|
|
195
|
+
lastFailureTime = now;
|
|
196
|
+
if (failureCount >= failureThreshold) {
|
|
197
|
+
state = "OPEN";
|
|
198
|
+
logger.error(`Circuit breaker OPEN for ${context} after ${failureCount} failures`);
|
|
199
|
+
}
|
|
200
|
+
throw error;
|
|
201
|
+
}
|
|
202
|
+
};
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
//# sourceMappingURL=RetryManager.js.map
|