@juspay/yama 1.6.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/.mcp-config.example.json +26 -0
  2. package/CHANGELOG.md +46 -0
  3. package/README.md +311 -685
  4. package/dist/cli/v2.cli.d.ts +13 -0
  5. package/dist/cli/v2.cli.js +359 -0
  6. package/dist/index.d.ts +12 -13
  7. package/dist/index.js +18 -19
  8. package/dist/v2/config/ConfigLoader.d.ts +50 -0
  9. package/dist/v2/config/ConfigLoader.js +205 -0
  10. package/dist/v2/config/DefaultConfig.d.ts +9 -0
  11. package/dist/v2/config/DefaultConfig.js +187 -0
  12. package/dist/v2/core/LearningOrchestrator.d.ts +65 -0
  13. package/dist/v2/core/LearningOrchestrator.js +499 -0
  14. package/dist/v2/core/MCPServerManager.d.ts +22 -0
  15. package/dist/v2/core/MCPServerManager.js +100 -0
  16. package/dist/v2/core/SessionManager.d.ts +72 -0
  17. package/dist/v2/core/SessionManager.js +200 -0
  18. package/dist/v2/core/YamaV2Orchestrator.d.ts +112 -0
  19. package/dist/v2/core/YamaV2Orchestrator.js +549 -0
  20. package/dist/v2/learning/FeedbackExtractor.d.ts +46 -0
  21. package/dist/v2/learning/FeedbackExtractor.js +237 -0
  22. package/dist/v2/learning/KnowledgeBaseManager.d.ts +91 -0
  23. package/dist/v2/learning/KnowledgeBaseManager.js +475 -0
  24. package/dist/v2/learning/types.d.ts +121 -0
  25. package/dist/v2/learning/types.js +15 -0
  26. package/dist/v2/prompts/EnhancementSystemPrompt.d.ts +8 -0
  27. package/dist/v2/prompts/EnhancementSystemPrompt.js +216 -0
  28. package/dist/v2/prompts/LangfusePromptManager.d.ts +48 -0
  29. package/dist/v2/prompts/LangfusePromptManager.js +144 -0
  30. package/dist/v2/prompts/LearningSystemPrompt.d.ts +11 -0
  31. package/dist/v2/prompts/LearningSystemPrompt.js +180 -0
  32. package/dist/v2/prompts/PromptBuilder.d.ts +45 -0
  33. package/dist/v2/prompts/PromptBuilder.js +257 -0
  34. package/dist/v2/prompts/ReviewSystemPrompt.d.ts +8 -0
  35. package/dist/v2/prompts/ReviewSystemPrompt.js +270 -0
  36. package/dist/v2/types/config.types.d.ts +141 -0
  37. package/dist/v2/types/config.types.js +5 -0
  38. package/dist/v2/types/mcp.types.d.ts +191 -0
  39. package/dist/v2/types/mcp.types.js +6 -0
  40. package/dist/v2/types/v2.types.d.ts +182 -0
  41. package/dist/v2/types/v2.types.js +42 -0
  42. package/dist/v2/utils/ObservabilityConfig.d.ts +22 -0
  43. package/dist/v2/utils/ObservabilityConfig.js +48 -0
  44. package/package.json +16 -10
  45. package/yama.config.example.yaml +259 -204
  46. package/dist/cli/index.d.ts +0 -12
  47. package/dist/cli/index.js +0 -538
  48. package/dist/core/ContextGatherer.d.ts +0 -110
  49. package/dist/core/ContextGatherer.js +0 -470
  50. package/dist/core/Guardian.d.ts +0 -81
  51. package/dist/core/Guardian.js +0 -480
  52. package/dist/core/providers/BitbucketProvider.d.ts +0 -105
  53. package/dist/core/providers/BitbucketProvider.js +0 -489
  54. package/dist/features/CodeReviewer.d.ts +0 -173
  55. package/dist/features/CodeReviewer.js +0 -1707
  56. package/dist/features/DescriptionEnhancer.d.ts +0 -70
  57. package/dist/features/DescriptionEnhancer.js +0 -511
  58. package/dist/features/MultiInstanceProcessor.d.ts +0 -74
  59. package/dist/features/MultiInstanceProcessor.js +0 -360
  60. package/dist/types/index.d.ts +0 -624
  61. package/dist/types/index.js +0 -104
  62. package/dist/utils/Cache.d.ts +0 -103
  63. package/dist/utils/Cache.js +0 -444
  64. package/dist/utils/ConfigManager.d.ts +0 -88
  65. package/dist/utils/ConfigManager.js +0 -602
  66. package/dist/utils/ContentSimilarityService.d.ts +0 -74
  67. package/dist/utils/ContentSimilarityService.js +0 -215
  68. package/dist/utils/ExactDuplicateRemover.d.ts +0 -77
  69. package/dist/utils/ExactDuplicateRemover.js +0 -361
  70. package/dist/utils/Logger.d.ts +0 -31
  71. package/dist/utils/Logger.js +0 -214
  72. package/dist/utils/MemoryBankManager.d.ts +0 -73
  73. package/dist/utils/MemoryBankManager.js +0 -310
  74. package/dist/utils/ParallelProcessing.d.ts +0 -140
  75. package/dist/utils/ParallelProcessing.js +0 -333
  76. package/dist/utils/ProviderLimits.d.ts +0 -58
  77. package/dist/utils/ProviderLimits.js +0 -143
  78. package/dist/utils/RetryManager.d.ts +0 -78
  79. package/dist/utils/RetryManager.js +0 -205
@@ -1,470 +0,0 @@
1
- /**
2
- * Unified Context Gatherer - The foundation for all Yama operations
3
- * Gathers all necessary context once and reuses it across all operations
4
- */
5
- // NeuroLink will be dynamically imported
6
- import { ProviderError, } from "../types/index.js";
7
- import { logger } from "../utils/Logger.js";
8
- import { cache, Cache } from "../utils/Cache.js";
9
- import { createMemoryBankManager } from "../utils/MemoryBankManager.js";
10
- import { getProviderTokenLimit } from "../utils/ProviderLimits.js";
11
- export class ContextGatherer {
12
- neurolink;
13
- bitbucketProvider;
14
- aiConfig;
15
- memoryBankManager;
16
- startTime = 0;
17
- constructor(bitbucketProvider, aiConfig, memoryBankConfig) {
18
- this.bitbucketProvider = bitbucketProvider;
19
- this.aiConfig = aiConfig;
20
- this.memoryBankManager = createMemoryBankManager(memoryBankConfig, bitbucketProvider);
21
- }
22
- /**
23
- * Main context gathering method - used by all operations
24
- */
25
- async gatherContext(identifier, options = {}) {
26
- this.startTime = Date.now();
27
- const contextId = this.generateContextId(identifier);
28
- const cacheHits = [];
29
- logger.phase("🔍 Gathering unified context...");
30
- logger.info(`Target: ${identifier.workspace}/${identifier.repository}`);
31
- try {
32
- // Step 1: Find and get PR information
33
- const pr = await this.findAndGetPR(identifier, cacheHits, options.forceRefresh);
34
- const completeIdentifier = {
35
- ...identifier,
36
- pullRequestId: pr.id,
37
- };
38
- // Step 2: Gather project context (memory bank + clinerules)
39
- const projectContext = await this.gatherProjectContext(completeIdentifier, cacheHits, options.forceRefresh);
40
- // Step 3: Determine diff strategy based on file count and config
41
- const diffStrategy = this.determineDiffStrategy(pr.fileChanges || [], options.diffStrategyConfig);
42
- logger.info(`Diff strategy: ${diffStrategy.strategy} (${diffStrategy.reason})`);
43
- // Step 4: Get diff data based on strategy (if requested)
44
- let prDiff;
45
- let fileDiffs;
46
- if (options.includeDiff !== false) {
47
- if (diffStrategy.strategy === "whole") {
48
- prDiff = await this.getPRDiff(completeIdentifier, options.contextLines || 3, options.excludePatterns || ["*.lock", "*.svg"], cacheHits, options.forceRefresh);
49
- }
50
- else {
51
- fileDiffs = await this.getFileByFileDiffs(completeIdentifier, pr.fileChanges || [], options.contextLines || 3, options.excludePatterns || ["*.lock", "*.svg"], cacheHits, options.forceRefresh);
52
- }
53
- }
54
- const gatheringDuration = Date.now() - this.startTime;
55
- const context = {
56
- pr,
57
- identifier: completeIdentifier,
58
- projectContext,
59
- diffStrategy,
60
- prDiff,
61
- fileDiffs,
62
- contextId,
63
- gatheredAt: new Date().toISOString(),
64
- cacheHits,
65
- gatheringDuration,
66
- };
67
- logger.success(`Context gathered in ${Math.round(gatheringDuration / 1000)}s ` +
68
- `(${cacheHits.length} cache hits, ${diffStrategy.fileCount} files, ${diffStrategy.estimatedSize})`);
69
- // Cache the complete context for reuse
70
- this.cacheContext(context);
71
- return context;
72
- }
73
- catch (error) {
74
- logger.error(`Context gathering failed: ${error.message}`);
75
- throw new ProviderError(`Failed to gather context: ${error.message}`);
76
- }
77
- }
78
- /**
79
- * Step 1: Find PR and get detailed information
80
- */
81
- async findAndGetPR(identifier, cacheHits, forceRefresh = false) {
82
- logger.debug("Step 1: Finding and getting PR information...");
83
- // If PR ID is provided, get details directly
84
- if (identifier.pullRequestId) {
85
- const cacheKey = Cache.keys.prInfo(identifier.workspace, identifier.repository, identifier.pullRequestId);
86
- if (!forceRefresh && cache.has(cacheKey)) {
87
- cacheHits.push("pr-details");
88
- }
89
- return cache.getOrSet(cacheKey, async () => {
90
- logger.debug(`Getting PR details: ${identifier.workspace}/${identifier.repository}#${identifier.pullRequestId}`);
91
- return await this.bitbucketProvider.getPRDetails(identifier);
92
- }, 1800);
93
- }
94
- // If branch is provided, find PR first
95
- if (identifier.branch) {
96
- const branchCacheKey = Cache.keys.branchInfo(identifier.workspace, identifier.repository, identifier.branch);
97
- if (!forceRefresh && cache.has(branchCacheKey)) {
98
- cacheHits.push("branch-pr-lookup");
99
- }
100
- const prInfo = await cache.getOrSet(branchCacheKey, async () => {
101
- logger.debug(`Finding PR for branch: ${identifier.workspace}/${identifier.repository}@${identifier.branch}`);
102
- return await this.bitbucketProvider.findPRForBranch(identifier);
103
- }, 3600);
104
- // Now get full PR details
105
- const detailsCacheKey = Cache.keys.prInfo(identifier.workspace, identifier.repository, prInfo.id);
106
- if (!forceRefresh && cache.has(detailsCacheKey)) {
107
- cacheHits.push("pr-details-from-branch");
108
- }
109
- return cache.getOrSet(detailsCacheKey, async () => {
110
- return await this.bitbucketProvider.getPRDetails({
111
- ...identifier,
112
- pullRequestId: prInfo.id,
113
- });
114
- }, 1800);
115
- }
116
- throw new ProviderError("Either pullRequestId or branch must be provided");
117
- }
118
- /**
119
- * Step 2: Gather project context (memory bank + clinerules)
120
- */
121
- async gatherProjectContext(identifier, cacheHits, forceRefresh = false) {
122
- logger.debug("Step 2: Gathering project context...");
123
- const cacheKey = Cache.keys.projectContext(identifier.workspace, identifier.repository, identifier.branch || "main");
124
- if (!forceRefresh && cache.has(cacheKey)) {
125
- cacheHits.push("project-context");
126
- }
127
- return cache.getOrSet(cacheKey, async () => {
128
- try {
129
- // Use MemoryBankManager to get memory bank files
130
- const memoryBankResult = await this.memoryBankManager.getMemoryBankFiles(identifier, forceRefresh);
131
- if (memoryBankResult.files.length === 0) {
132
- logger.debug("No memory bank files found");
133
- return {
134
- memoryBank: {
135
- summary: "No project context available",
136
- projectContext: "None",
137
- patterns: "None",
138
- standards: "None",
139
- },
140
- clinerules: "",
141
- filesProcessed: 0,
142
- };
143
- }
144
- // Convert MemoryBankFile[] to Record<string, string> for AI processing
145
- const fileContents = {};
146
- memoryBankResult.files.forEach((file) => {
147
- fileContents[file.name] = file.content;
148
- });
149
- logger.debug(`✓ Loaded ${memoryBankResult.files.length} memory bank files from ${memoryBankResult.resolvedPath}${memoryBankResult.fallbackUsed ? " (fallback)" : ""}`);
150
- // Get .clinerules file
151
- let clinerules = "";
152
- try {
153
- clinerules = await this.bitbucketProvider.getFileContent(identifier.workspace, identifier.repository, ".clinerules", identifier.branch || "main");
154
- logger.debug("✓ Got .clinerules content");
155
- }
156
- catch (error) {
157
- logger.debug(`Could not read .clinerules: ${error.message}`);
158
- }
159
- // Parse and summarize with AI
160
- const contextData = await this.parseProjectContextWithAI(fileContents, clinerules);
161
- return {
162
- memoryBank: {
163
- summary: `Project Context: ${contextData.projectContext}
164
- Patterns: ${contextData.patterns}
165
- Standards: ${contextData.standards}`,
166
- projectContext: contextData.projectContext,
167
- patterns: contextData.patterns,
168
- standards: contextData.standards,
169
- },
170
- clinerules,
171
- filesProcessed: memoryBankResult.filesProcessed,
172
- };
173
- }
174
- catch (error) {
175
- logger.debug(`Failed to gather project context: ${error.message}`);
176
- return {
177
- memoryBank: {
178
- summary: "Context gathering failed",
179
- projectContext: "Failed to load",
180
- patterns: "Failed to load",
181
- standards: "Failed to load",
182
- },
183
- clinerules: "",
184
- filesProcessed: 0,
185
- };
186
- }
187
- }, 7200);
188
- }
189
- /**
190
- * Get safe token limit based on AI provider using shared utility
191
- */
192
- getSafeTokenLimit() {
193
- const provider = this.aiConfig.provider || "auto";
194
- const configuredTokens = this.aiConfig.maxTokens;
195
- // Use conservative limits for ContextGatherer (safer for large context processing)
196
- const providerLimit = getProviderTokenLimit(provider, true);
197
- // Use the smaller of configured tokens or provider limit
198
- if (configuredTokens && configuredTokens > 0) {
199
- const safeLimit = Math.min(configuredTokens, providerLimit);
200
- logger.debug(`Token limit: configured=${configuredTokens}, provider=${providerLimit}, using=${safeLimit}`);
201
- return safeLimit;
202
- }
203
- logger.debug(`Token limit: using provider default=${providerLimit} for ${provider}`);
204
- return providerLimit;
205
- }
206
- /**
207
- * Parse project context with AI
208
- */
209
- async parseProjectContextWithAI(fileContents, clinerules) {
210
- const prompt = `Parse and summarize these memory bank files and .clinerules:
211
-
212
- Memory Bank Files: ${JSON.stringify(fileContents, null, 2)}
213
-
214
- .clinerules Content: ${clinerules}
215
-
216
- Extract and summarize the content and return ONLY this JSON format:
217
- {
218
- "success": true,
219
- "projectContext": "Summary of project purpose, architecture, key components...",
220
- "patterns": "Summary of coding patterns, best practices, conventions...",
221
- "standards": "Summary of quality standards, review criteria..."
222
- }`;
223
- try {
224
- // Initialize NeuroLink with eval-based dynamic import
225
- if (!this.neurolink) {
226
- const { NeuroLink } = await import("@juspay/neurolink");
227
- this.neurolink = new NeuroLink();
228
- }
229
- // Context for project analysis
230
- const aiContext = {
231
- operation: "project-context-analysis",
232
- fileCount: Object.keys(fileContents).length,
233
- hasClinerules: !!clinerules,
234
- analysisType: "memory-bank-synthesis",
235
- };
236
- // Get safe token limit based on provider
237
- const safeMaxTokens = this.getSafeTokenLimit();
238
- logger.debug(`Using AI provider: ${this.aiConfig.provider || "auto"}`);
239
- logger.debug(`Configured maxTokens: ${this.aiConfig.maxTokens}`);
240
- logger.debug(`Safe maxTokens limit: ${safeMaxTokens}`);
241
- const result = await this.neurolink.generate({
242
- input: { text: prompt },
243
- systemPrompt: "You are an Expert Project Analyst. Synthesize project context from documentation and configuration files to help AI understand the codebase architecture, patterns, and business domain.",
244
- provider: this.aiConfig.provider,
245
- model: this.aiConfig.model,
246
- temperature: 0.3,
247
- maxTokens: safeMaxTokens, // Use provider-aware safe token limit
248
- timeout: "10m", // Allow longer processing for quality
249
- context: aiContext,
250
- enableAnalytics: this.aiConfig.enableAnalytics || true,
251
- enableEvaluation: false, // Not needed for context synthesis
252
- });
253
- // Log context analysis
254
- if (result.analytics) {
255
- logger.debug(`Context Analysis - Files: ${Object.keys(fileContents).length}, Provider: ${result.provider}`);
256
- }
257
- // Modern NeuroLink returns { content: string }
258
- const response = this.parseAIResponse(result);
259
- if (response.success) {
260
- return {
261
- projectContext: response.projectContext || "None",
262
- patterns: response.patterns || "None",
263
- standards: response.standards || "None",
264
- };
265
- }
266
- throw new Error("AI parsing failed");
267
- }
268
- catch (error) {
269
- logger.warn(`AI context parsing failed, using fallback: ${error.message}`);
270
- return {
271
- projectContext: "AI parsing unavailable",
272
- patterns: "Standard patterns assumed",
273
- standards: "Standard quality requirements",
274
- };
275
- }
276
- }
277
- /**
278
- * Step 3: Determine optimal diff strategy
279
- */
280
- determineDiffStrategy(fileChanges, config) {
281
- const fileCount = fileChanges.length;
282
- // Get threshold values from config or use defaults
283
- const wholeDiffMaxFiles = config?.thresholds?.wholeDiffMaxFiles ?? 2;
284
- // Note: fileByFileMinFiles is currently same as wholeDiffMaxFiles + 1
285
- // but kept separate for future flexibility
286
- // Check if force strategy is configured
287
- if (config?.forceStrategy && config.forceStrategy !== "auto") {
288
- return {
289
- strategy: config.forceStrategy,
290
- reason: `Forced by configuration`,
291
- fileCount,
292
- estimatedSize: this.estimateDiffSize(fileCount),
293
- };
294
- }
295
- // Determine strategy based on thresholds
296
- let strategy = "whole";
297
- let reason = "";
298
- if (fileCount === 0) {
299
- strategy = "whole";
300
- reason = "No files to analyze";
301
- }
302
- else if (fileCount <= wholeDiffMaxFiles) {
303
- strategy = "whole";
304
- reason = `${fileCount} file(s) ≤ ${wholeDiffMaxFiles} (threshold), using whole diff`;
305
- }
306
- else {
307
- strategy = "file-by-file";
308
- reason = `${fileCount} file(s) > ${wholeDiffMaxFiles} (threshold), using file-by-file`;
309
- }
310
- return {
311
- strategy,
312
- reason,
313
- fileCount,
314
- estimatedSize: this.estimateDiffSize(fileCount),
315
- };
316
- }
317
- /**
318
- * Estimate diff size based on file count
319
- */
320
- estimateDiffSize(fileCount) {
321
- if (fileCount === 0) {
322
- return "0 KB";
323
- }
324
- if (fileCount <= 2) {
325
- return "Small (~5-20 KB)";
326
- }
327
- if (fileCount <= 5) {
328
- return "Small (~10-50 KB)";
329
- }
330
- if (fileCount <= 20) {
331
- return "Medium (~50-200 KB)";
332
- }
333
- if (fileCount <= 50) {
334
- return "Large (~200-500 KB)";
335
- }
336
- return "Very Large (>500 KB)";
337
- }
338
- /**
339
- * Get whole PR diff
340
- */
341
- async getPRDiff(identifier, contextLines, excludePatterns, cacheHits, forceRefresh = false) {
342
- logger.debug("Getting whole PR diff...");
343
- const cacheKey = Cache.keys.prDiff(identifier.workspace, identifier.repository, identifier.pullRequestId);
344
- if (!forceRefresh && cache.has(cacheKey)) {
345
- cacheHits.push("pr-diff");
346
- }
347
- return cache.getOrSet(cacheKey, async () => {
348
- return await this.bitbucketProvider.getPRDiff(identifier, contextLines, excludePatterns);
349
- }, 1800);
350
- }
351
- /**
352
- * Get file-by-file diffs for large changesets
353
- */
354
- async getFileByFileDiffs(identifier, fileChanges, contextLines, excludePatterns, cacheHits, forceRefresh = false) {
355
- logger.debug(`Getting file-by-file diffs for ${fileChanges.length} files...`);
356
- const fileDiffs = new Map();
357
- // Filter out excluded files
358
- const filteredFiles = fileChanges.filter((file) => !excludePatterns.some((pattern) => new RegExp(pattern.replace(/\*/g, ".*")).test(file)));
359
- logger.debug(`Processing ${filteredFiles.length} files after exclusions`);
360
- // Process files in batches for better performance
361
- const batchSize = 5;
362
- for (let i = 0; i < filteredFiles.length; i += batchSize) {
363
- const batch = filteredFiles.slice(i, i + batchSize);
364
- const batchPromises = batch.map(async (file) => {
365
- const fileCacheKey = `file-diff:${identifier.workspace}:${identifier.repository}:${identifier.pullRequestId}:${file}`;
366
- if (!forceRefresh && cache.has(fileCacheKey)) {
367
- cacheHits.push(`file-diff-${file}`);
368
- }
369
- return cache.getOrSet(fileCacheKey, async () => {
370
- // Use include_patterns to get diff for just this file
371
- const fileDiff = await this.bitbucketProvider.getPRDiff(identifier, contextLines, excludePatterns, [file]);
372
- return fileDiff.diff;
373
- }, 1800);
374
- });
375
- const batchResults = await Promise.all(batchPromises);
376
- batch.forEach((file, index) => {
377
- fileDiffs.set(file, batchResults[index]);
378
- });
379
- // Small delay between batches to avoid overwhelming the API
380
- if (i + batchSize < filteredFiles.length) {
381
- await new Promise((resolve) => setTimeout(resolve, 500));
382
- }
383
- }
384
- logger.debug(`✓ Got diffs for ${fileDiffs.size} files`);
385
- return fileDiffs;
386
- }
387
- /**
388
- * Cache the complete context for reuse
389
- */
390
- cacheContext(context) {
391
- const contextCacheKey = `context:${context.contextId}`;
392
- cache.set(contextCacheKey, context, 1800); // 30 minutes
393
- // Tag it for easy invalidation
394
- cache.setWithTags(contextCacheKey, context, [
395
- `workspace:${context.identifier.workspace}`,
396
- `repository:${context.identifier.repository}`,
397
- `pr:${context.identifier.pullRequestId}`,
398
- ], 1800);
399
- }
400
- /**
401
- * Get cached context if available
402
- */
403
- async getCachedContext(identifier) {
404
- const contextId = this.generateContextId(identifier);
405
- const contextCacheKey = `context:${contextId}`;
406
- const cached = cache.get(contextCacheKey);
407
- if (cached) {
408
- logger.debug(`✓ Using cached context: ${contextId}`);
409
- return cached;
410
- }
411
- return null;
412
- }
413
- /**
414
- * Invalidate context cache for a specific PR
415
- */
416
- invalidateContext(identifier) {
417
- cache.invalidateTag(`pr:${identifier.pullRequestId}`);
418
- cache.invalidateTag(`workspace:${identifier.workspace}`);
419
- logger.debug(`Context cache invalidated for PR ${identifier.pullRequestId}`);
420
- }
421
- /**
422
- * Generate unique context ID
423
- */
424
- generateContextId(identifier) {
425
- const parts = [
426
- identifier.workspace,
427
- identifier.repository,
428
- identifier.pullRequestId || identifier.branch || "unknown",
429
- ];
430
- return Buffer.from(parts.join(":"))
431
- .toString("base64")
432
- .replace(/[+/=]/g, "")
433
- .substring(0, 16);
434
- }
435
- /**
436
- * Parse AI response utility
437
- */
438
- parseAIResponse(result) {
439
- try {
440
- const responseText = result.content || result.text || result.response || "";
441
- if (!responseText) {
442
- return { success: false, error: "Empty response" };
443
- }
444
- // Find JSON in response
445
- const jsonMatch = responseText.match(/\{[\s\S]*\}/);
446
- if (jsonMatch) {
447
- return JSON.parse(jsonMatch[0]);
448
- }
449
- return { success: false, error: "No JSON found" };
450
- }
451
- catch (error) {
452
- return { success: false, error: error.message };
453
- }
454
- }
455
- /**
456
- * Get gathering statistics
457
- */
458
- getStats() {
459
- return {
460
- lastGatheringDuration: this.startTime ? Date.now() - this.startTime : 0,
461
- cacheStats: cache.stats(),
462
- cacheHitRatio: cache.getHitRatio(),
463
- };
464
- }
465
- }
466
- // Export factory function
467
- export function createContextGatherer(bitbucketProvider, aiConfig, memoryBankConfig) {
468
- return new ContextGatherer(bitbucketProvider, aiConfig, memoryBankConfig);
469
- }
470
- //# sourceMappingURL=ContextGatherer.js.map
@@ -1,81 +0,0 @@
1
- /**
2
- * Yama - Unified orchestrator class
3
- * The main class that coordinates all operations using shared context
4
- */
5
- import { GuardianConfig, OperationOptions, ProcessResult, StreamUpdate, StreamOptions, ReviewOptions, EnhancementOptions } from "../types/index.js";
6
- export declare class Guardian {
7
- private config;
8
- private bitbucketProvider;
9
- private contextGatherer;
10
- private codeReviewer;
11
- private descriptionEnhancer;
12
- private neurolink;
13
- private initialized;
14
- private logger;
15
- constructor(config?: Partial<GuardianConfig>);
16
- /**
17
- * Initialize Guardian with configuration
18
- */
19
- initialize(configPath?: string): Promise<void>;
20
- /**
21
- * Main method: Process PR with multiple operations using unified context
22
- */
23
- processPR(options: OperationOptions): Promise<ProcessResult>;
24
- /**
25
- * Streaming version of processPR for real-time updates
26
- */
27
- processPRStream(options: OperationOptions, _streamOptions?: StreamOptions): AsyncIterableIterator<StreamUpdate>;
28
- /**
29
- * Gather unified context (cached and reusable)
30
- */
31
- private gatherUnifiedContext;
32
- /**
33
- * Execute individual operation using shared context
34
- */
35
- private executeOperation;
36
- /**
37
- * Execute code review using shared context
38
- */
39
- private executeCodeReview;
40
- /**
41
- * Execute description enhancement using shared context
42
- */
43
- private executeDescriptionEnhancement;
44
- /**
45
- * Individual operation methods for backwards compatibility
46
- */
47
- /**
48
- * Code review operation (standalone)
49
- */
50
- reviewCode(options: ReviewOptions): Promise<any>;
51
- /**
52
- * Description enhancement operation (standalone)
53
- */
54
- enhanceDescription(options: EnhancementOptions): Promise<any>;
55
- /**
56
- * Health check for all components
57
- */
58
- healthCheck(): Promise<{
59
- healthy: boolean;
60
- components: any;
61
- }>;
62
- /**
63
- * Get comprehensive statistics
64
- */
65
- getStats(): any;
66
- /**
67
- * Clear all caches
68
- */
69
- clearCache(): void;
70
- /**
71
- * Ensure Guardian is initialized
72
- */
73
- private ensureInitialized;
74
- /**
75
- * Shutdown Guardian gracefully
76
- */
77
- shutdown(): Promise<void>;
78
- }
79
- export declare function createGuardian(config?: Partial<GuardianConfig>): Guardian;
80
- export declare const guardian: Guardian;
81
- //# sourceMappingURL=Guardian.d.ts.map