@juspay/yama 1.5.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ # [1.6.0](https://github.com/juspay/yama/compare/v1.5.1...v1.6.0) (2025-10-24)
2
+
3
+ ### Features
4
+
5
+ - added support for system prompt and fixed required section check in description enhancer ([c22d1ff](https://github.com/juspay/yama/commit/c22d1ff15a165379dece65145123433f7c1d6b98))
6
+
7
+ ## [1.5.1](https://github.com/juspay/yama/compare/v1.5.0...v1.5.1) (2025-09-24)
8
+
9
+ ### Bug Fixes
10
+
11
+ - **allocation:** Added fix for batch token allocation ([11f7192](https://github.com/juspay/yama/commit/11f719257a75ba946c45612e336db69a17cf278d))
12
+
1
13
  # [1.5.0](https://github.com/juspay/yama/compare/v1.4.1...v1.5.0) (2025-09-19)
2
14
 
3
15
  ### Features
@@ -59,7 +59,13 @@ export class Guardian {
59
59
  fallbackPaths: ["docs/memory-bank", ".memory-bank"],
60
60
  });
61
61
  this.codeReviewer = new CodeReviewer(this.bitbucketProvider, this.config.providers.ai, this.config.features.codeReview);
62
- this.descriptionEnhancer = new DescriptionEnhancer(this.bitbucketProvider, this.config.providers.ai);
62
+ this.descriptionEnhancer = new DescriptionEnhancer(this.bitbucketProvider, this.config.providers.ai, this.config.features.descriptionEnhancement);
63
+ logger.debug("Description Enhancement Configuration:");
64
+ logger.debug(` - Enabled: ${this.config.features.descriptionEnhancement.enabled}`);
65
+ logger.debug(` - Required Sections: ${this.config.features.descriptionEnhancement.requiredSections.length} (${this.config.features.descriptionEnhancement.requiredSections.map((s) => s.key).join(", ")})`);
66
+ logger.debug(` - Custom systemPrompt: ${this.config.features.descriptionEnhancement.systemPrompt ? "Yes" : "No (using default)"}`);
67
+ logger.debug(` - Custom enhancementInstructions: ${this.config.features.descriptionEnhancement.enhancementInstructions ? "Yes" : "No (using default)"}`);
68
+ logger.debug(` - outputTemplate: ${this.config.features.descriptionEnhancement.outputTemplate ? "Provided" : "Not provided"}`);
63
69
  this.initialized = true;
64
70
  logger.success("✅ Yama initialized successfully");
65
71
  }
@@ -5,6 +5,7 @@
5
5
  import { ProviderError, } from "../../types/index.js";
6
6
  import { logger } from "../../utils/Logger.js";
7
7
  import { cache, Cache } from "../../utils/Cache.js";
8
+ import { RetryManager } from "../../utils/RetryManager.js";
8
9
  export class BitbucketProvider {
9
10
  apiClient;
10
11
  branchHandlers;
@@ -85,14 +86,14 @@ export class BitbucketProvider {
85
86
  throw new ProviderError("Branch name is required");
86
87
  }
87
88
  const cacheKey = Cache.keys.branchInfo(workspace, repository, branch);
88
- return cache.getOrSet(cacheKey, async () => {
89
+ return cache.getOrSetResilient(cacheKey, async () => {
89
90
  logger.debug(`Finding PR for branch: ${workspace}/${repository}@${branch}`);
90
- const rawBranchData = await this.branchHandlers.handleGetBranch({
91
+ const rawBranchData = await RetryManager.withRetry(() => this.branchHandlers.handleGetBranch({
91
92
  workspace,
92
93
  repository,
93
94
  branch_name: branch,
94
95
  include_merged_prs: false,
95
- });
96
+ }), `Find PR for branch ${workspace}/${repository}@${branch}`);
96
97
  const branchData = this.parseMCPResponse(rawBranchData);
97
98
  // Direct data extraction
98
99
  if (branchData.open_pull_requests &&
@@ -131,13 +132,13 @@ export class BitbucketProvider {
131
132
  throw new ProviderError("Pull request ID is required");
132
133
  }
133
134
  const cacheKey = Cache.keys.prInfo(workspace, repository, pullRequestId);
134
- return cache.getOrSet(cacheKey, async () => {
135
+ return cache.getOrSetResilient(cacheKey, async () => {
135
136
  logger.debug(`Getting PR details: ${workspace}/${repository}#${pullRequestId}`);
136
- const rawPRDetails = await this.pullRequestHandlers.handleGetPullRequest({
137
+ const rawPRDetails = await RetryManager.withRetry(() => this.pullRequestHandlers.handleGetPullRequest({
137
138
  workspace,
138
139
  repository,
139
140
  pull_request_id: pullRequestId,
140
- });
141
+ }), `Get PR details ${workspace}/${repository}#${pullRequestId}`);
141
142
  const prData = this.parseMCPResponse(rawPRDetails);
142
143
  // Debug author data structure
143
144
  logger.debug(`PR Details author data structure: ${JSON.stringify(prData.author, null, 2)}`);
@@ -175,7 +176,7 @@ export class BitbucketProvider {
175
176
  const cacheKey = includePatterns && includePatterns.length === 1
176
177
  ? `file-diff:${workspace}:${repository}:${pullRequestId}:${includePatterns[0]}`
177
178
  : Cache.keys.prDiff(workspace, repository, pullRequestId);
178
- return cache.getOrSet(cacheKey, async () => {
179
+ return cache.getOrSetResilient(cacheKey, async () => {
179
180
  logger.debug(`Getting PR diff: ${workspace}/${repository}#${pullRequestId}`);
180
181
  if (includePatterns) {
181
182
  logger.debug(`Include patterns: ${includePatterns.join(", ")}`);
@@ -191,7 +192,7 @@ export class BitbucketProvider {
191
192
  if (includePatterns) {
192
193
  args.include_patterns = includePatterns;
193
194
  }
194
- const rawDiff = await this.reviewHandlers.handleGetPullRequestDiff(args);
195
+ const rawDiff = await RetryManager.withRetry(() => this.reviewHandlers.handleGetPullRequestDiff(args), `Get PR diff ${workspace}/${repository}#${pullRequestId}`);
195
196
  const diffData = this.parseMCPResponse(rawDiff);
196
197
  return {
197
198
  diff: diffData.diff || "",
@@ -207,18 +208,28 @@ export class BitbucketProvider {
207
208
  async getFileContent(workspace, repository, filePath, branch) {
208
209
  await this.initialize();
209
210
  const cacheKey = Cache.keys.fileContent(workspace, repository, filePath, branch);
210
- return cache.getOrSet(cacheKey, async () => {
211
+ return cache.getOrSetResilient(cacheKey, async () => {
211
212
  logger.debug(`Getting file content: ${workspace}/${repository}/${filePath}@${branch}`);
212
- const result = await this.fileHandlers.handleGetFileContent({
213
+ const result = await RetryManager.withRetry(() => this.fileHandlers.handleGetFileContent({
213
214
  workspace,
214
215
  repository,
215
216
  file_path: filePath,
216
217
  branch,
217
- });
218
- // Handle file content response directly (don't JSON parse)
219
- if (result.content && result.content[0] && result.content[0].text) {
220
- const fileResponse = JSON.parse(result.content[0].text);
221
- return fileResponse.content || "";
218
+ }), `Get file content ${workspace}/${repository}/${filePath}@${branch}`);
219
+ // Handle file content response with proper error handling for plain text files
220
+ if (result.content &&
221
+ result.content[0] &&
222
+ result.content[0].text) {
223
+ try {
224
+ const fileResponse = JSON.parse(result.content[0].text);
225
+ return fileResponse.content || "";
226
+ }
227
+ catch (parseError) {
228
+ // If JSON parsing fails, the content might be plain text (like .clinerules)
229
+ // Return the text content directly
230
+ logger.debug(`JSON parsing failed for ${filePath}, treating as plain text: ${parseError.message}`);
231
+ return result.content[0].text || "";
232
+ }
222
233
  }
223
234
  // Handle direct response format
224
235
  return result.content || "";
@@ -230,14 +241,14 @@ export class BitbucketProvider {
230
241
  async listDirectoryContent(workspace, repository, path, branch) {
231
242
  await this.initialize();
232
243
  const cacheKey = Cache.keys.directoryContent(workspace, repository, path, branch);
233
- return cache.getOrSet(cacheKey, async () => {
244
+ return cache.getOrSetResilient(cacheKey, async () => {
234
245
  logger.debug(`Listing directory: ${workspace}/${repository}/${path}@${branch}`);
235
- const result = await this.fileHandlers.handleListDirectoryContent({
246
+ const result = await RetryManager.withRetry(() => this.fileHandlers.handleListDirectoryContent({
236
247
  workspace,
237
248
  repository,
238
249
  path,
239
250
  branch,
240
- });
251
+ }), `List directory ${workspace}/${repository}/${path}@${branch}`);
241
252
  const dirData = this.parseMCPResponse(result);
242
253
  return dirData.contents || [];
243
254
  }, 3600);
@@ -254,12 +265,12 @@ export class BitbucketProvider {
254
265
  try {
255
266
  logger.debug(`Updating PR description: ${workspace}/${repository}#${pullRequestId}`);
256
267
  logger.debug(`Description length: ${description.length} characters`);
257
- const result = await this.pullRequestHandlers.handleUpdatePullRequest({
268
+ const result = await RetryManager.withRetry(() => this.pullRequestHandlers.handleUpdatePullRequest({
258
269
  workspace,
259
270
  repository,
260
271
  pull_request_id: pullRequestId,
261
272
  description: description,
262
- });
273
+ }), `Update PR description ${workspace}/${repository}#${pullRequestId}`);
263
274
  // Log the raw MCP response
264
275
  logger.debug(`Raw MCP update response: ${JSON.stringify(result, null, 2)}`);
265
276
  const updateData = this.parseMCPResponse(result);
@@ -335,10 +346,12 @@ export class BitbucketProvider {
335
346
  logger.debug(` Type: ${options.lineType || "CONTEXT"}`);
336
347
  }
337
348
  logger.debug(`🔍 MCP addComment args: ${JSON.stringify(args, null, 2)}`);
338
- const result = await this.pullRequestHandlers.handleAddComment(args);
349
+ const result = await RetryManager.withRetry(() => this.pullRequestHandlers.handleAddComment(args), `Add comment to PR ${workspace}/${repository}#${pullRequestId}`);
339
350
  // Parse response exactly like pr-police.js
340
351
  let commentData;
341
- if (result.content && result.content[0] && result.content[0].text) {
352
+ if (result.content &&
353
+ result.content[0] &&
354
+ result.content[0].text) {
342
355
  commentData = JSON.parse(result.content[0].text);
343
356
  }
344
357
  else {
@@ -361,7 +374,7 @@ export class BitbucketProvider {
361
374
  pull_request_id: pullRequestId,
362
375
  comment_text: `**File: ${options.filePath}**\n\n${commentText}`,
363
376
  };
364
- const fallbackResult = await this.pullRequestHandlers.handleAddComment(fallbackArgs);
377
+ const fallbackResult = await RetryManager.withRetry(() => this.pullRequestHandlers.handleAddComment(fallbackArgs), `Add fallback comment to PR ${workspace}/${repository}#${pullRequestId}`);
365
378
  let fallbackData;
366
379
  if (fallbackResult.content &&
367
380
  fallbackResult.content[0] &&
@@ -108,6 +108,18 @@ export declare class CodeReviewer {
108
108
  * Process batches serially (original implementation)
109
109
  */
110
110
  private processSerially;
111
+ /**
112
+ * Pre-allocate tokens based on distribution strategy with proper integer arithmetic
113
+ */
114
+ private preAllocateTokens;
115
+ /**
116
+ * Try weighted allocation for batches
117
+ */
118
+ private tryWeightedAllocation;
119
+ /**
120
+ * Try equal allocation for batches
121
+ */
122
+ private tryEqualAllocation;
111
123
  /**
112
124
  * Process a single batch with concurrency control
113
125
  */
@@ -922,7 +922,25 @@ ${recommendation}
922
922
  // Initialize concurrency control
923
923
  const semaphore = new Semaphore(optimalConcurrency);
924
924
  const tokenBudget = new TokenBudgetManager(this.getSafeTokenLimit() * 0.8); // 80% for safety
925
- logger.info(`🎯 Parallel processing: ${optimalConcurrency} concurrent batches, ${tokenBudget.getTotalBudget()} token budget`);
925
+ // NEW: Pre-allocate tokens based on distribution strategy
926
+ const distributionStrategy = parallelConfig.tokenBudgetDistribution || "equal";
927
+ logger.info(`🎯 Using ${distributionStrategy} token distribution strategy for ${batches.length} batches`);
928
+ const tokenAllocations = this.preAllocateTokens(batches, tokenBudget, distributionStrategy);
929
+ if (!tokenAllocations) {
930
+ const totalRequired = batches.reduce((sum, b) => sum + b.estimatedTokens, 0);
931
+ const totalBudget = tokenBudget.getTotalBudget();
932
+ throw new Error(`Insufficient token budget: required ${totalRequired}, available ${totalBudget}. ` +
933
+ `Consider reducing batch count (current: ${batches.length}) or increasing token limit.`);
934
+ }
935
+ // Apply pre-allocated tokens to the budget manager
936
+ if (!tokenBudget.preAllocateAllBatches(tokenAllocations)) {
937
+ throw new Error("Failed to pre-allocate tokens for all batches");
938
+ }
939
+ logger.info(`🎯 Parallel processing: ${optimalConcurrency} concurrent batches, ${tokenBudget.getTotalBudget()} token budget (${distributionStrategy} distribution)`);
940
+ // Log allocation details
941
+ tokenAllocations.forEach((tokens, batchIndex) => {
942
+ logger.debug(`Batch ${batchIndex + 1}: ${tokens} tokens allocated`);
943
+ });
926
944
  const batchResults = new Array(batches.length);
927
945
  const allViolations = [];
928
946
  const processingPromises = [];
@@ -1006,6 +1024,149 @@ ${recommendation}
1006
1024
  logger.success(`🎯 Serial processing completed: ${allViolations.length} total violations from ${batches.length} batches in ${Math.round(totalTime / 1000)}s (avg ${avgBatchSize.toFixed(1)} files/batch)`);
1007
1025
  return { violations: allViolations, batchResults };
1008
1026
  }
1027
+ /**
1028
+ * Pre-allocate tokens based on distribution strategy with proper integer arithmetic
1029
+ */
1030
+ preAllocateTokens(batches, tokenBudget, strategy) {
1031
+ // Ensure we're working with integer budget to avoid floating-point issues
1032
+ const totalBudget = Math.floor(tokenBudget.getTotalBudget());
1033
+ const allocations = new Map();
1034
+ if (strategy === "equal") {
1035
+ // Equal distribution: divide budget equally among all batches with proper remainder handling
1036
+ const baseTokens = Math.floor(totalBudget / batches.length);
1037
+ const remainder = totalBudget % batches.length;
1038
+ if (baseTokens < 1000) {
1039
+ // Minimum viable tokens per batch
1040
+ logger.error(`Equal distribution would give ${baseTokens} tokens per batch, which is insufficient`);
1041
+ return null;
1042
+ }
1043
+ let totalAllocated = 0;
1044
+ for (let i = 0; i < batches.length; i++) {
1045
+ // Distribute remainder to first few batches
1046
+ const tokens = baseTokens + (i < remainder ? 1 : 0);
1047
+ allocations.set(i, tokens);
1048
+ totalAllocated += tokens;
1049
+ }
1050
+ // Double-check that we haven't exceeded budget due to any calculation errors
1051
+ if (totalAllocated > totalBudget) {
1052
+ logger.error(`Equal distribution calculation error: ${totalAllocated} > ${totalBudget}`);
1053
+ // Adjust the last batch to fit within budget
1054
+ const lastBatchIndex = batches.length - 1;
1055
+ const lastBatchTokens = allocations.get(lastBatchIndex);
1056
+ const adjustment = totalAllocated - totalBudget;
1057
+ const newLastBatchTokens = lastBatchTokens - adjustment;
1058
+ if (newLastBatchTokens < 1000) {
1059
+ logger.error(`Adjustment would result in last batch having ${newLastBatchTokens} tokens, which is below the minimum threshold (1000). Aborting allocation.`);
1060
+ return null;
1061
+ }
1062
+ allocations.set(lastBatchIndex, newLastBatchTokens);
1063
+ totalAllocated = totalBudget;
1064
+ logger.warn(`Adjusted last batch by -${adjustment} tokens to fit budget`);
1065
+ }
1066
+ logger.info(`Equal distribution: ${baseTokens} tokens per batch for ${batches.length} batches`);
1067
+ logger.debug(`Pre-allocated ${totalAllocated} tokens across ${batches.length} batches (${totalBudget - totalAllocated} remaining)`);
1068
+ }
1069
+ else if (strategy === "weighted") {
1070
+ // Weighted distribution: try weighted first, automatically fallback to equal if needed
1071
+ logger.debug(`Attempting weighted distribution...`);
1072
+ const weightedResult = this.tryWeightedAllocation(batches, totalBudget);
1073
+ if (weightedResult) {
1074
+ // Weighted allocation succeeded
1075
+ weightedResult.forEach((tokens, batchIndex) => {
1076
+ allocations.set(batchIndex, tokens);
1077
+ });
1078
+ logger.info(`✅ Weighted distribution: optimal allocation successful`);
1079
+ logger.debug(`Pre-allocated ${Array.from(weightedResult.values()).reduce((sum, tokens) => sum + tokens, 0)} tokens across ${batches.length} batches`);
1080
+ }
1081
+ else {
1082
+ // Weighted allocation failed, automatically fallback to equal distribution
1083
+ logger.warn(`⚠️ Weighted distribution: insufficient budget for optimal allocation, falling back to equal distribution`);
1084
+ const equalResult = this.tryEqualAllocation(batches, totalBudget);
1085
+ if (!equalResult) {
1086
+ logger.error(`Weighted distribution: both optimal and equal allocation failed`);
1087
+ return null;
1088
+ }
1089
+ equalResult.forEach((tokens, batchIndex) => {
1090
+ allocations.set(batchIndex, tokens);
1091
+ });
1092
+ logger.info(`✅ Weighted distribution: equal allocation fallback successful`);
1093
+ logger.debug(`Pre-allocated ${Array.from(equalResult.values()).reduce((sum, tokens) => sum + tokens, 0)} tokens across ${batches.length} batches`);
1094
+ }
1095
+ }
1096
+ // Final validation with strict integer checking
1097
+ const totalAllocated = Array.from(allocations.values()).reduce((sum, tokens) => sum + tokens, 0);
1098
+ if (totalAllocated > totalBudget) {
1099
+ logger.error(`CRITICAL: Total allocation (${totalAllocated}) exceeds budget (${totalBudget}) - this should never happen`);
1100
+ logger.error(`Budget type: ${typeof totalBudget}, Allocation type: ${typeof totalAllocated}`);
1101
+ logger.error(`Individual allocations: ${Array.from(allocations.entries())
1102
+ .map(([i, tokens]) => `batch${i}:${tokens}`)
1103
+ .join(", ")}`);
1104
+ throw new Error(`Total allocation (${totalAllocated}) exceeds budget (${totalBudget}) - this should never happen`);
1105
+ }
1106
+ return allocations;
1107
+ }
1108
+ /**
1109
+ * Try weighted allocation for batches
1110
+ */
1111
+ tryWeightedAllocation(batches, totalBudget) {
1112
+ const totalEstimated = batches.reduce((sum, batch) => sum + batch.estimatedTokens, 0);
1113
+ if (totalEstimated > totalBudget) {
1114
+ logger.debug(`Total estimated tokens (${totalEstimated}) exceed budget (${totalBudget})`);
1115
+ return null;
1116
+ }
1117
+ let totalAllocated = 0;
1118
+ const minTokensPerBatch = 1000;
1119
+ const allocations = new Map();
1120
+ for (let i = 0; i < batches.length; i++) {
1121
+ const batch = batches[i];
1122
+ const weight = batch.estimatedTokens / totalEstimated;
1123
+ const allocation = Math.floor(weight * totalBudget);
1124
+ const finalAllocation = Math.max(allocation, minTokensPerBatch);
1125
+ allocations.set(i, finalAllocation);
1126
+ totalAllocated += finalAllocation;
1127
+ }
1128
+ // Check if we exceeded budget due to minimum allocations
1129
+ if (totalAllocated > totalBudget) {
1130
+ logger.debug(`Weighted allocation with minimums (${totalAllocated}) exceeds budget (${totalBudget})`);
1131
+ return null;
1132
+ }
1133
+ return allocations;
1134
+ }
1135
+ /**
1136
+ * Try equal allocation for batches
1137
+ */
1138
+ tryEqualAllocation(batches, totalBudget) {
1139
+ const baseTokens = Math.floor(totalBudget / batches.length);
1140
+ const remainder = totalBudget % batches.length;
1141
+ if (baseTokens < 1000) {
1142
+ // Minimum viable tokens per batch
1143
+ logger.debug(`Equal distribution would give ${baseTokens} tokens per batch, which is insufficient`);
1144
+ return null;
1145
+ }
1146
+ const allocations = new Map();
1147
+ let totalAllocated = 0;
1148
+ for (let i = 0; i < batches.length; i++) {
1149
+ // Distribute remainder to first few batches
1150
+ const tokens = baseTokens + (i < remainder ? 1 : 0);
1151
+ allocations.set(i, tokens);
1152
+ totalAllocated += tokens;
1153
+ }
1154
+ // Double-check that we haven't exceeded budget due to any calculation errors
1155
+ if (totalAllocated > totalBudget) {
1156
+ logger.debug(`Equal distribution calculation error: ${totalAllocated} > ${totalBudget}`);
1157
+ // Adjust the last batch to fit within budget
1158
+ const lastBatchIndex = batches.length - 1;
1159
+ const lastBatchTokens = allocations.get(lastBatchIndex);
1160
+ const adjustment = totalAllocated - totalBudget;
1161
+ const newLastBatchTokens = lastBatchTokens - adjustment;
1162
+ if (newLastBatchTokens < 1000) {
1163
+ logger.error(`Adjustment would result in last batch having ${newLastBatchTokens} tokens, which is below the minimum threshold (1000). Aborting allocation.`);
1164
+ return null;
1165
+ }
1166
+ allocations.set(lastBatchIndex, newLastBatchTokens);
1167
+ }
1168
+ return allocations;
1169
+ }
1009
1170
  /**
1010
1171
  * Process a single batch with concurrency control
1011
1172
  */
@@ -1013,9 +1174,22 @@ ${recommendation}
1013
1174
  // Acquire semaphore permit
1014
1175
  await semaphore.acquire();
1015
1176
  try {
1016
- // Check token budget
1017
- if (!tokenBudget.allocateForBatch(batchIndex, batch.estimatedTokens)) {
1018
- throw new Error(`Insufficient token budget for batch ${batchIndex + 1}`);
1177
+ // NEW: In pre-allocation mode, tokens are already allocated, just verify and mark as processing
1178
+ if (tokenBudget.isPreAllocationMode()) {
1179
+ const batchState = tokenBudget.getBatchState(batchIndex);
1180
+ if (batchState !== "pending") {
1181
+ throw new Error(`Batch ${batchIndex + 1} is not in pending state (current: ${batchState})`);
1182
+ }
1183
+ // Mark as processing (this is handled in allocateForBatch for pre-allocation mode)
1184
+ if (!tokenBudget.allocateForBatch(batchIndex, batch.estimatedTokens)) {
1185
+ throw new Error(`Failed to mark batch ${batchIndex + 1} as processing`);
1186
+ }
1187
+ }
1188
+ else {
1189
+ // Legacy mode: allocate tokens dynamically
1190
+ if (!tokenBudget.allocateForBatch(batchIndex, batch.estimatedTokens)) {
1191
+ throw new Error(`Insufficient token budget for batch ${batchIndex + 1}`);
1192
+ }
1019
1193
  }
1020
1194
  logger.info(`🔄 Processing batch ${batchIndex + 1}/${totalBatches} (${batch.files.length} files, parallel)`);
1021
1195
  // Process the batch (existing logic)
@@ -1023,6 +1197,11 @@ ${recommendation}
1023
1197
  logger.info(`✅ Batch ${batchIndex + 1} completed: ${result.violations.length} violations in ${Math.round(result.processingTime / 1000)}s`);
1024
1198
  return result;
1025
1199
  }
1200
+ catch (error) {
1201
+ // Mark batch as failed in token budget
1202
+ tokenBudget.markBatchFailed(batchIndex, error.message);
1203
+ throw error;
1204
+ }
1026
1205
  finally {
1027
1206
  // Always release resources
1028
1207
  tokenBudget.releaseBatch(batchIndex);
@@ -2,15 +2,21 @@
2
2
  * Enhanced Description Enhancer - Optimized to work with Unified Context
3
3
  * Preserves all original functionality from pr-describe.js but optimized
4
4
  */
5
- import { EnhancementOptions, EnhancementResult, AIProviderConfig } from "../types/index.js";
5
+ import { EnhancementOptions, EnhancementResult, AIProviderConfig, DescriptionEnhancementConfig } from "../types/index.js";
6
6
  import { UnifiedContext } from "../core/ContextGatherer.js";
7
7
  import { BitbucketProvider } from "../core/providers/BitbucketProvider.js";
8
8
  export declare class DescriptionEnhancer {
9
9
  private neurolink;
10
10
  private bitbucketProvider;
11
11
  private aiConfig;
12
+ private enhancementConfig;
12
13
  private defaultRequiredSections;
13
- constructor(bitbucketProvider: BitbucketProvider, aiConfig: AIProviderConfig);
14
+ constructor(bitbucketProvider: BitbucketProvider, aiConfig: AIProviderConfig, enhancementConfig: DescriptionEnhancementConfig);
15
+ /**
16
+ * Get system prompt for description enhancement
17
+ * Uses config.systemPrompt if provided, otherwise uses default
18
+ */
19
+ private getSystemPrompt;
14
20
  /**
15
21
  * Enhance description using pre-gathered unified context (OPTIMIZED)
16
22
  */
@@ -60,5 +66,5 @@ export declare class DescriptionEnhancer {
60
66
  */
61
67
  getStats(): any;
62
68
  }
63
- export declare function createDescriptionEnhancer(bitbucketProvider: BitbucketProvider, aiConfig: AIProviderConfig): DescriptionEnhancer;
69
+ export declare function createDescriptionEnhancer(bitbucketProvider: BitbucketProvider, aiConfig: AIProviderConfig, enhancementConfig: DescriptionEnhancementConfig): DescriptionEnhancer;
64
70
  //# sourceMappingURL=DescriptionEnhancer.d.ts.map
@@ -8,6 +8,7 @@ export class DescriptionEnhancer {
8
8
  neurolink;
9
9
  bitbucketProvider;
10
10
  aiConfig;
11
+ enhancementConfig;
11
12
  defaultRequiredSections = [
12
13
  { key: "changelog", name: "Changelog (Modules Modified)", required: true },
13
14
  {
@@ -21,9 +22,33 @@ export class DescriptionEnhancer {
21
22
  required: true,
22
23
  },
23
24
  ];
24
- constructor(bitbucketProvider, aiConfig) {
25
+ constructor(bitbucketProvider, aiConfig, enhancementConfig) {
25
26
  this.bitbucketProvider = bitbucketProvider;
26
27
  this.aiConfig = aiConfig;
28
+ this.enhancementConfig = enhancementConfig;
29
+ }
30
+ /**
31
+ * Get system prompt for description enhancement
32
+ * Uses config.systemPrompt if provided, otherwise uses default
33
+ */
34
+ getSystemPrompt() {
35
+ const isCustomPrompt = !!this.enhancementConfig.systemPrompt;
36
+ if (isCustomPrompt) {
37
+ logger.debug("✓ Using custom systemPrompt from configuration");
38
+ logger.debug(`Custom prompt preview: ${this.enhancementConfig.systemPrompt?.substring(0, 100)}...`);
39
+ }
40
+ else {
41
+ logger.debug("Using default systemPrompt (no custom prompt configured)");
42
+ }
43
+ return (this.enhancementConfig.systemPrompt ||
44
+ `You are an Expert Technical Writer specializing in pull request documentation.
45
+ Focus on clarity, completeness, and helping reviewers understand the changes.
46
+
47
+ CRITICAL INSTRUCTION: Return ONLY the enhanced PR description content.
48
+ - DO NOT add meta-commentary like "No description provided" or "Here is the enhanced description"
49
+ - DO NOT add explanatory text about what you're doing
50
+ - START directly with the actual PR content (title, sections, etc.)
51
+ - If there's no existing description, just write the new sections without mentioning it`);
27
52
  }
28
53
  /**
29
54
  * Enhance description using pre-gathered unified context (OPTIMIZED)
@@ -34,7 +59,21 @@ export class DescriptionEnhancer {
34
59
  logger.phase("📝 Enhancing PR description...");
35
60
  logger.info(`Processing PR #${context.pr.id}: "${context.pr.title}"`);
36
61
  // Step 1: Analyze existing content and identify what needs enhancement
37
- const analysisResult = this.analyzeExistingContent(context.pr.description, options.customSections || this.defaultRequiredSections);
62
+ const sectionsToUse = options.customSections || this.defaultRequiredSections;
63
+ logger.debug(`Checking ${sectionsToUse.length} required sections: ${sectionsToUse.map((s) => s.key).join(", ")}`);
64
+ const analysisResult = this.analyzeExistingContent(context.pr.description, sectionsToUse);
65
+ const presentSections = analysisResult.requiredSections
66
+ .filter((s) => s.present)
67
+ .map((s) => s.key);
68
+ const missingSections = analysisResult.requiredSections
69
+ .filter((s) => !s.present)
70
+ .map((s) => s.key);
71
+ if (presentSections.length > 0) {
72
+ logger.debug(`✓ Present sections: ${presentSections.join(", ")}`);
73
+ }
74
+ if (missingSections.length > 0) {
75
+ logger.debug(`✗ Missing sections: ${missingSections.join(", ")}`);
76
+ }
38
77
  logger.info(`Content analysis: ${analysisResult.preservedContent.media.length} media items, ` +
39
78
  `${analysisResult.missingCount} missing sections`);
40
79
  // Step 2: Generate enhanced description using AI
@@ -135,15 +174,21 @@ export class DescriptionEnhancer {
135
174
  ],
136
175
  };
137
176
  return requiredSections.map((section) => {
138
- const patterns = sectionPatterns[section.key];
139
- const isPresent = patterns
140
- ? patterns.some((pattern) => pattern.test(description))
141
- : false;
177
+ let patterns = sectionPatterns[section.key];
178
+ if (!patterns) {
179
+ logger.debug(`No predefined pattern for section "${section.key}", using dynamic pattern based on name`);
180
+ const nameWords = section.name.split(/\s+/).filter((w) => w.length > 2); // Filter out short words like "Or", "Of"
181
+ const namePattern = new RegExp(`##.*?${nameWords.join(".*?")}`, "i");
182
+ const keyWords = section.key.split("_").filter((w) => w.length > 2);
183
+ const keyPattern = new RegExp(`##.*?${keyWords.join(".*?")}`, "i");
184
+ patterns = [namePattern, keyPattern];
185
+ }
186
+ const isPresent = patterns.some((pattern) => pattern.test(description));
142
187
  return {
143
188
  ...section,
144
189
  present: isPresent,
145
190
  content: isPresent
146
- ? this.extractSectionContent(description, patterns || [])
191
+ ? this.extractSectionContent(description, patterns)
147
192
  : "",
148
193
  };
149
194
  });
@@ -207,6 +252,7 @@ export class DescriptionEnhancer {
207
252
  try {
208
253
  const result = await this.neurolink.generate({
209
254
  input: { text: enhancementPrompt },
255
+ systemPrompt: this.getSystemPrompt(), // Use config or default system prompt
210
256
  provider: this.aiConfig.provider,
211
257
  model: this.aiConfig.model,
212
258
  temperature: this.aiConfig.temperature || 0.7,
@@ -224,6 +270,13 @@ export class DescriptionEnhancer {
224
270
  .replace(/^```markdown\s*/, "")
225
271
  .replace(/\s*```$/, "")
226
272
  .trim();
273
+ // Remove any meta-commentary that AI might have added
274
+ enhancedDescription = enhancedDescription
275
+ .replace(/^No description provided\.?\s*/i, "")
276
+ .replace(/^Here is the enhanced description:?\s*/i, "")
277
+ .replace(/^I will enhance.*?:\s*/i, "")
278
+ .replace(/^Enhanced description:?\s*/i, "")
279
+ .trim();
227
280
  if (!enhancedDescription) {
228
281
  throw new Error("AI generated empty description");
229
282
  }
@@ -231,7 +284,12 @@ export class DescriptionEnhancer {
231
284
  const finalValidation = this.validateRequiredSections(enhancedDescription, options.customSections || this.defaultRequiredSections);
232
285
  const stillMissing = finalValidation.filter((s) => !s.present);
233
286
  if (stillMissing.length > 0) {
234
- logger.warn(`Warning: ${stillMissing.length} required sections still missing after AI enhancement`);
287
+ const missingSectionNames = stillMissing.map((s) => s.key).join(", ");
288
+ logger.warn(`Warning: ${stillMissing.length} required sections still missing after AI enhancement: ${missingSectionNames}`);
289
+ logger.debug(`AI may not have added these sections or they don't match detection patterns`);
290
+ }
291
+ else {
292
+ logger.debug(`✓ All ${finalValidation.length} required sections are now present`);
235
293
  }
236
294
  return enhancedDescription;
237
295
  }
@@ -260,7 +318,15 @@ export class DescriptionEnhancer {
260
318
  diffInfo = `**Diff Strategy**: File-by-file analysis (${context.diffStrategy.fileCount} files)
261
319
  **Modified Files**: ${JSON.stringify(fileList, null, 2)}`;
262
320
  }
263
- return `You are an expert technical writer specializing in comprehensive PR descriptions.
321
+ const customInstructions = this.enhancementConfig.enhancementInstructions || "";
322
+ if (customInstructions) {
323
+ logger.debug("✓ Using custom enhancementInstructions from configuration");
324
+ logger.debug(`Instructions preview: ${customInstructions.substring(0, 80)}...`);
325
+ }
326
+ else {
327
+ logger.debug("Using default enhancementInstructions");
328
+ }
329
+ return `${customInstructions || "You are an expert technical writer specializing in comprehensive PR descriptions."}
264
330
 
265
331
  ## PR INFORMATION:
266
332
  **Title**: ${context.pr.title}
@@ -439,7 +505,7 @@ Generate the enhanced description now, ensuring ALL preservation requirements ar
439
505
  };
440
506
  }
441
507
  }
442
- export function createDescriptionEnhancer(bitbucketProvider, aiConfig) {
443
- return new DescriptionEnhancer(bitbucketProvider, aiConfig);
508
+ export function createDescriptionEnhancer(bitbucketProvider, aiConfig, enhancementConfig) {
509
+ return new DescriptionEnhancer(bitbucketProvider, aiConfig, enhancementConfig);
444
510
  }
445
511
  //# sourceMappingURL=DescriptionEnhancer.js.map
@@ -237,8 +237,9 @@ export class MultiInstanceProcessor {
237
237
  minLimit = Math.min(minLimit, instanceLimit);
238
238
  }
239
239
  // Total budget is the sum of all instance limits, but with safety margin
240
- const totalBudget = instances.length * minLimit * 0.8; // 80% safety margin
241
- logger.debug(`Calculated total token budget: ${totalBudget} (${instances.length} instances × ${minLimit} × 0.8)`);
240
+ // Use Math.floor to ensure integer result and avoid floating-point precision issues
241
+ const totalBudget = Math.floor(instances.length * minLimit * 0.8); // 80% safety margin
242
+ logger.debug(`Calculated total token budget: ${totalBudget} (${instances.length} instances × ${minLimit} × 0.8, floored)`);
242
243
  return totalBudget;
243
244
  }
244
245
  /**