@juspay/yama 1.3.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,112 @@
1
+ /**
2
+ * Parallel Processing Utilities for Batch Processing
3
+ * Provides concurrency control and resource management for parallel batch execution
4
+ */
5
+ import { SemaphoreInterface, TokenBudgetManagerInterface } from "../types/index.js";
6
+ /**
7
+ * Semaphore for controlling concurrent access to resources
8
+ * Limits the number of concurrent operations that can run simultaneously
9
+ */
10
+ export declare class Semaphore implements SemaphoreInterface {
11
+ private permits;
12
+ private waiting;
13
+ constructor(permits: number);
14
+ /**
15
+ * Acquire a permit from the semaphore
16
+ * If no permits are available, the caller will wait until one becomes available
17
+ */
18
+ acquire(): Promise<void>;
19
+ /**
20
+ * Release a permit back to the semaphore
21
+ * This will allow waiting operations to proceed
22
+ */
23
+ release(): void;
24
+ /**
25
+ * Get the number of available permits
26
+ */
27
+ getAvailablePermits(): number;
28
+ /**
29
+ * Get the number of operations waiting for permits
30
+ */
31
+ getWaitingCount(): number;
32
+ /**
33
+ * Get semaphore status for debugging
34
+ */
35
+ getStatus(): {
36
+ available: number;
37
+ waiting: number;
38
+ };
39
+ }
40
+ /**
41
+ * Token Budget Manager for controlling AI token usage across parallel batches
42
+ * Ensures that the total token usage doesn't exceed the configured limits
43
+ */
44
+ export declare class TokenBudgetManager implements TokenBudgetManagerInterface {
45
+ private totalBudget;
46
+ private usedTokens;
47
+ private batchAllocations;
48
+ private reservedTokens;
49
+ constructor(totalBudget: number);
50
+ /**
51
+ * Allocate tokens for a specific batch
52
+ * Returns true if allocation was successful, false if insufficient budget
53
+ */
54
+ allocateForBatch(batchIndex: number, estimatedTokens: number): boolean;
55
+ /**
56
+ * Release tokens allocated to a batch
57
+ * This should be called when a batch completes (successfully or with error)
58
+ */
59
+ releaseBatch(batchIndex: number): void;
60
+ /**
61
+ * Get the available token budget (not yet allocated or used)
62
+ */
63
+ getAvailableBudget(): number;
64
+ /**
65
+ * Get the total token budget
66
+ */
67
+ getTotalBudget(): number;
68
+ /**
69
+ * Get the number of tokens actually used (completed batches)
70
+ */
71
+ getUsedTokens(): number;
72
+ /**
73
+ * Get the number of tokens reserved (allocated but not yet used)
74
+ */
75
+ getReservedTokens(): number;
76
+ /**
77
+ * Get the number of active batch allocations
78
+ */
79
+ getActiveBatches(): number;
80
+ /**
81
+ * Get detailed budget status for monitoring
82
+ */
83
+ getBudgetStatus(): {
84
+ total: number;
85
+ used: number;
86
+ reserved: number;
87
+ available: number;
88
+ activeBatches: number;
89
+ utilizationPercent: number;
90
+ };
91
+ /**
92
+ * Reset the budget manager (for testing or reuse)
93
+ */
94
+ reset(): void;
95
+ /**
96
+ * Update the total budget (useful for dynamic adjustment)
97
+ */
98
+ updateBudget(newBudget: number): void;
99
+ }
100
+ /**
101
+ * Factory function to create a Semaphore with validation
102
+ */
103
+ export declare function createSemaphore(permits: number): Semaphore;
104
+ /**
105
+ * Factory function to create a TokenBudgetManager with validation
106
+ */
107
+ export declare function createTokenBudgetManager(totalBudget: number): TokenBudgetManager;
108
+ /**
109
+ * Utility function to calculate optimal concurrency based on available resources
110
+ */
111
+ export declare function calculateOptimalConcurrency(totalBatches: number, maxConcurrent: number, averageTokensPerBatch: number, totalTokenBudget: number): number;
112
+ //# sourceMappingURL=ParallelProcessing.d.ts.map
@@ -0,0 +1,228 @@
1
+ /**
2
+ * Parallel Processing Utilities for Batch Processing
3
+ * Provides concurrency control and resource management for parallel batch execution
4
+ */
5
+ import { logger } from "./Logger.js";
6
+ /**
7
+ * Semaphore for controlling concurrent access to resources
8
+ * Limits the number of concurrent operations that can run simultaneously
9
+ */
10
+ export class Semaphore {
11
+ permits;
12
+ waiting = [];
13
+ constructor(permits) {
14
+ if (permits <= 0) {
15
+ throw new Error("Semaphore permits must be greater than 0");
16
+ }
17
+ this.permits = permits;
18
+ logger.debug(`Semaphore created with ${permits} permits`);
19
+ }
20
+ /**
21
+ * Acquire a permit from the semaphore
22
+ * If no permits are available, the caller will wait until one becomes available
23
+ */
24
+ async acquire() {
25
+ if (this.permits > 0) {
26
+ this.permits--;
27
+ logger.debug(`Semaphore permit acquired, ${this.permits} remaining`);
28
+ return;
29
+ }
30
+ logger.debug(`Semaphore permit requested, waiting in queue (${this.waiting.length} waiting)`);
31
+ return new Promise((resolve) => {
32
+ this.waiting.push(resolve);
33
+ });
34
+ }
35
+ /**
36
+ * Release a permit back to the semaphore
37
+ * This will allow waiting operations to proceed
38
+ */
39
+ release() {
40
+ this.permits++;
41
+ logger.debug(`Semaphore permit released, ${this.permits} available`);
42
+ if (this.waiting.length > 0) {
43
+ const resolve = this.waiting.shift();
44
+ this.permits--;
45
+ logger.debug(`Semaphore permit granted to waiting operation, ${this.permits} remaining`);
46
+ resolve();
47
+ }
48
+ }
49
+ /**
50
+ * Get the number of available permits
51
+ */
52
+ getAvailablePermits() {
53
+ return this.permits;
54
+ }
55
+ /**
56
+ * Get the number of operations waiting for permits
57
+ */
58
+ getWaitingCount() {
59
+ return this.waiting.length;
60
+ }
61
+ /**
62
+ * Get semaphore status for debugging
63
+ */
64
+ getStatus() {
65
+ return {
66
+ available: this.permits,
67
+ waiting: this.waiting.length,
68
+ };
69
+ }
70
+ }
71
+ /**
72
+ * Token Budget Manager for controlling AI token usage across parallel batches
73
+ * Ensures that the total token usage doesn't exceed the configured limits
74
+ */
75
+ export class TokenBudgetManager {
76
+ totalBudget;
77
+ usedTokens = 0;
78
+ batchAllocations = new Map();
79
+ reservedTokens = 0; // Tokens allocated but not yet used
80
+ constructor(totalBudget) {
81
+ if (totalBudget <= 0) {
82
+ throw new Error("Token budget must be greater than 0");
83
+ }
84
+ this.totalBudget = totalBudget;
85
+ logger.debug(`TokenBudgetManager created with budget of ${totalBudget} tokens`);
86
+ }
87
+ /**
88
+ * Allocate tokens for a specific batch
89
+ * Returns true if allocation was successful, false if insufficient budget
90
+ */
91
+ allocateForBatch(batchIndex, estimatedTokens) {
92
+ if (estimatedTokens <= 0) {
93
+ logger.warn(`Invalid token estimate for batch ${batchIndex}: ${estimatedTokens}`);
94
+ return false;
95
+ }
96
+ // Check if we already have an allocation for this batch
97
+ if (this.batchAllocations.has(batchIndex)) {
98
+ logger.warn(`Batch ${batchIndex} already has token allocation`);
99
+ return false;
100
+ }
101
+ // Check if we have enough budget
102
+ const totalAllocated = this.usedTokens + this.reservedTokens + estimatedTokens;
103
+ if (totalAllocated > this.totalBudget) {
104
+ logger.debug(`Insufficient token budget for batch ${batchIndex}: ` +
105
+ `need ${estimatedTokens}, available ${this.getAvailableBudget()}`);
106
+ return false;
107
+ }
108
+ // Allocate the tokens
109
+ this.reservedTokens += estimatedTokens;
110
+ this.batchAllocations.set(batchIndex, estimatedTokens);
111
+ logger.debug(`Allocated ${estimatedTokens} tokens for batch ${batchIndex} ` +
112
+ `(${this.getAvailableBudget()} remaining)`);
113
+ return true;
114
+ }
115
+ /**
116
+ * Release tokens allocated to a batch
117
+ * This should be called when a batch completes (successfully or with error)
118
+ */
119
+ releaseBatch(batchIndex) {
120
+ const allocated = this.batchAllocations.get(batchIndex);
121
+ if (!allocated) {
122
+ logger.warn(`No token allocation found for batch ${batchIndex}`);
123
+ return;
124
+ }
125
+ // Move from reserved to used (assuming the tokens were actually used)
126
+ this.reservedTokens -= allocated;
127
+ this.usedTokens += allocated;
128
+ this.batchAllocations.delete(batchIndex);
129
+ logger.debug(`Released ${allocated} tokens from batch ${batchIndex} ` +
130
+ `(${this.getAvailableBudget()} now available)`);
131
+ }
132
+ /**
133
+ * Get the available token budget (not yet allocated or used)
134
+ */
135
+ getAvailableBudget() {
136
+ return this.totalBudget - this.usedTokens - this.reservedTokens;
137
+ }
138
+ /**
139
+ * Get the total token budget
140
+ */
141
+ getTotalBudget() {
142
+ return this.totalBudget;
143
+ }
144
+ /**
145
+ * Get the number of tokens actually used (completed batches)
146
+ */
147
+ getUsedTokens() {
148
+ return this.usedTokens;
149
+ }
150
+ /**
151
+ * Get the number of tokens reserved (allocated but not yet used)
152
+ */
153
+ getReservedTokens() {
154
+ return this.reservedTokens;
155
+ }
156
+ /**
157
+ * Get the number of active batch allocations
158
+ */
159
+ getActiveBatches() {
160
+ return this.batchAllocations.size;
161
+ }
162
+ /**
163
+ * Get detailed budget status for monitoring
164
+ */
165
+ getBudgetStatus() {
166
+ const utilizationPercent = ((this.usedTokens + this.reservedTokens) / this.totalBudget) * 100;
167
+ return {
168
+ total: this.totalBudget,
169
+ used: this.usedTokens,
170
+ reserved: this.reservedTokens,
171
+ available: this.getAvailableBudget(),
172
+ activeBatches: this.batchAllocations.size,
173
+ utilizationPercent: Math.round(utilizationPercent * 100) / 100,
174
+ };
175
+ }
176
+ /**
177
+ * Reset the budget manager (for testing or reuse)
178
+ */
179
+ reset() {
180
+ this.usedTokens = 0;
181
+ this.reservedTokens = 0;
182
+ this.batchAllocations.clear();
183
+ logger.debug("TokenBudgetManager reset");
184
+ }
185
+ /**
186
+ * Update the total budget (useful for dynamic adjustment)
187
+ */
188
+ updateBudget(newBudget) {
189
+ if (newBudget <= 0) {
190
+ throw new Error("Token budget must be greater than 0");
191
+ }
192
+ const oldBudget = this.totalBudget;
193
+ this.totalBudget = newBudget;
194
+ logger.debug(`Token budget updated from ${oldBudget} to ${newBudget}`);
195
+ // Log warning if new budget is less than current usage
196
+ if (newBudget < this.usedTokens + this.reservedTokens) {
197
+ logger.warn(`New budget (${newBudget}) is less than current usage (${this.usedTokens + this.reservedTokens})`);
198
+ }
199
+ }
200
+ }
201
+ /**
202
+ * Factory function to create a Semaphore with validation
203
+ */
204
+ export function createSemaphore(permits) {
205
+ return new Semaphore(permits);
206
+ }
207
+ /**
208
+ * Factory function to create a TokenBudgetManager with validation
209
+ */
210
+ export function createTokenBudgetManager(totalBudget) {
211
+ return new TokenBudgetManager(totalBudget);
212
+ }
213
+ /**
214
+ * Utility function to calculate optimal concurrency based on available resources
215
+ */
216
+ export function calculateOptimalConcurrency(totalBatches, maxConcurrent, averageTokensPerBatch, totalTokenBudget) {
217
+ // Don't exceed the configured maximum
218
+ let optimal = Math.min(maxConcurrent, totalBatches);
219
+ // Don't exceed what the token budget can support
220
+ const tokenBasedLimit = Math.floor(totalTokenBudget / averageTokensPerBatch);
221
+ optimal = Math.min(optimal, tokenBasedLimit);
222
+ // Ensure at least 1
223
+ optimal = Math.max(1, optimal);
224
+ logger.debug(`Calculated optimal concurrency: ${optimal} ` +
225
+ `(max: ${maxConcurrent}, batches: ${totalBatches}, token-limited: ${tokenBasedLimit})`);
226
+ return optimal;
227
+ }
228
+ //# sourceMappingURL=ParallelProcessing.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/yama",
3
- "version": "1.3.0",
3
+ "version": "1.4.0",
4
4
  "description": "Enterprise-grade Pull Request automation toolkit with AI-powered code review and description enhancement",
5
5
  "keywords": [
6
6
  "pr",
@@ -83,7 +83,7 @@
83
83
  "check:all": "npm run lint && npm run format --check && npm run validate && npm run validate:commit"
84
84
  },
85
85
  "dependencies": {
86
- "@juspay/neurolink": "^5.1.0",
86
+ "@juspay/neurolink": "^7.40.1",
87
87
  "@nexus2520/bitbucket-mcp-server": "^0.10.0",
88
88
  "chalk": "^4.1.2",
89
89
  "commander": "^11.0.0",
@@ -97,36 +97,36 @@
97
97
  "yaml": "^2.3.0"
98
98
  },
99
99
  "devDependencies": {
100
+ "@changesets/changelog-github": "^0.5.1",
101
+ "@changesets/cli": "^2.26.2",
102
+ "@eslint/js": "^9.0.0",
103
+ "@semantic-release/changelog": "^6.0.3",
104
+ "@semantic-release/commit-analyzer": "^13.0.0",
105
+ "@semantic-release/git": "^10.0.1",
106
+ "@semantic-release/github": "^11.0.0",
107
+ "@semantic-release/npm": "^12.0.1",
108
+ "@semantic-release/release-notes-generator": "^14.0.1",
100
109
  "@types/commander": "^2.12.5",
101
110
  "@types/inquirer": "^9.0.8",
102
111
  "@types/jest": "^29.0.0",
103
112
  "@types/lodash": "^4.14.0",
104
113
  "@types/node": "^20.0.0",
105
- "@eslint/js": "^9.0.0",
106
114
  "@typescript-eslint/eslint-plugin": "^8.0.0",
107
115
  "@typescript-eslint/parser": "^8.0.0",
108
116
  "eslint": "^9.0.0",
117
+ "husky": "^9.0.0",
109
118
  "jest": "^29.0.0",
119
+ "lint-staged": "^15.0.0",
120
+ "prettier": "^3.0.0",
121
+ "publint": "^0.3.0",
110
122
  "rimraf": "^5.0.0",
123
+ "semantic-release": "^24.0.0",
111
124
  "ts-jest": "^29.0.0",
112
125
  "ts-node": "^10.0.0",
113
126
  "ts-node-dev": "^2.0.0",
114
127
  "tsc-alias": "^1.8.0",
115
128
  "typedoc": "^0.25.0",
116
- "typescript": "^5.0.0",
117
- "@changesets/changelog-github": "^0.5.1",
118
- "@changesets/cli": "^2.26.2",
119
- "@semantic-release/changelog": "^6.0.3",
120
- "@semantic-release/commit-analyzer": "^13.0.0",
121
- "@semantic-release/git": "^10.0.1",
122
- "@semantic-release/github": "^11.0.0",
123
- "@semantic-release/npm": "^12.0.1",
124
- "@semantic-release/release-notes-generator": "^14.0.1",
125
- "semantic-release": "^24.0.0",
126
- "prettier": "^3.0.0",
127
- "publint": "^0.3.0",
128
- "husky": "^9.0.0",
129
- "lint-staged": "^15.0.0"
129
+ "typescript": "^5.0.0"
130
130
  },
131
131
  "peerDependencies": {
132
132
  "typescript": ">=4.5.0"
@@ -51,16 +51,54 @@ features:
51
51
  - "Performance bottlenecks"
52
52
  - "Error handling"
53
53
  - "Code quality"
54
-
54
+
55
55
  # NEW: Batch Processing Configuration
56
56
  batchProcessing:
57
57
  enabled: true # Enable batch processing for large PRs
58
58
  maxFilesPerBatch: 3 # Maximum files to process in each batch
59
59
  prioritizeSecurityFiles: true # Process security-sensitive files first
60
- parallelBatches: false # Process batches sequentially for reliability
60
+ parallelBatches: false # Keep for backward compatibility
61
61
  batchDelayMs: 1000 # Delay between batches in milliseconds
62
62
  singleRequestThreshold: 5 # Use single request for PRs with ≤5 files
63
63
 
64
+ # NEW: Parallel Processing Configuration
65
+ parallel:
66
+ enabled: true # Enable parallel processing by default
67
+ maxConcurrentBatches: 3 # Maximum concurrent batches
68
+ rateLimitStrategy: "fixed" # Options: fixed, adaptive
69
+ tokenBudgetDistribution: "equal" # Options: equal, weighted
70
+ failureHandling: "continue" # Options: continue, stop-all
71
+
72
+ # Multi-Instance Processing Configuration
73
+ multiInstance:
74
+ enabled: true # Enable multi-instance review
75
+ instanceCount: 2 # Number of instances to run in parallel
76
+ instances:
77
+ - name: "primary"
78
+ provider: "vertex"
79
+ model: "gemini-2.5-pro"
80
+ temperature: 0.3
81
+ weight: 1.0
82
+ - name: "secondary"
83
+ provider: "vertex"
84
+ model: "gemini-2.5-pro"
85
+ temperature: 0.1
86
+ weight: 1.0
87
+ deduplication:
88
+ enabled: true
89
+ similarityThreshold: 40 # Similarity percentage threshold (0-100)
90
+ maxCommentsToPost: 30
91
+ prioritizeBy: "severity"
92
+
93
+ # NEW: Semantic Comment Deduplication Configuration
94
+ semanticDeduplication:
95
+ enabled: true # Enable AI-powered semantic similarity analysis
96
+ similarityThreshold: 70 # Similarity percentage threshold (0-100)
97
+ batchSize: 15 # Number of violations to process per batch
98
+ timeout: "5m" # Timeout for similarity analysis
99
+ fallbackOnError: true # Return all violations if AI analysis fails
100
+ logMatches: true # Log detailed similarity match information
101
+
64
102
  # Description Enhancement Configuration
65
103
  descriptionEnhancement:
66
104
  enabled: true
@@ -154,8 +192,8 @@ monitoring:
154
192
  # Memory Bank Configuration
155
193
  memoryBank:
156
194
  enabled: true
157
- path: "memory-bank" # Primary path to look for memory bank files
158
- fallbackPaths: # Optional fallback paths if primary doesn't exist
195
+ path: "memory-bank" # Primary path to look for memory bank files
196
+ fallbackPaths: # Optional fallback paths if primary doesn't exist
159
197
  - "docs/memory-bank"
160
198
  - ".memory-bank"
161
199
  - "project-docs/context"