aidp 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +21 -0
  3. data/README.md +210 -0
  4. data/bin/aidp +5 -0
  5. data/lib/aidp/analyze/agent_personas.rb +71 -0
  6. data/lib/aidp/analyze/agent_tool_executor.rb +445 -0
  7. data/lib/aidp/analyze/data_retention_manager.rb +426 -0
  8. data/lib/aidp/analyze/database.rb +243 -0
  9. data/lib/aidp/analyze/dependencies.rb +335 -0
  10. data/lib/aidp/analyze/error_handler.rb +486 -0
  11. data/lib/aidp/analyze/export_manager.rb +425 -0
  12. data/lib/aidp/analyze/feature_analyzer.rb +397 -0
  13. data/lib/aidp/analyze/focus_guidance.rb +517 -0
  14. data/lib/aidp/analyze/incremental_analyzer.rb +543 -0
  15. data/lib/aidp/analyze/language_analysis_strategies.rb +897 -0
  16. data/lib/aidp/analyze/large_analysis_progress.rb +504 -0
  17. data/lib/aidp/analyze/memory_manager.rb +365 -0
  18. data/lib/aidp/analyze/parallel_processor.rb +460 -0
  19. data/lib/aidp/analyze/performance_optimizer.rb +694 -0
  20. data/lib/aidp/analyze/prioritizer.rb +402 -0
  21. data/lib/aidp/analyze/progress.rb +75 -0
  22. data/lib/aidp/analyze/progress_visualizer.rb +320 -0
  23. data/lib/aidp/analyze/report_generator.rb +582 -0
  24. data/lib/aidp/analyze/repository_chunker.rb +702 -0
  25. data/lib/aidp/analyze/ruby_maat_integration.rb +572 -0
  26. data/lib/aidp/analyze/runner.rb +245 -0
  27. data/lib/aidp/analyze/static_analysis_detector.rb +577 -0
  28. data/lib/aidp/analyze/steps.rb +53 -0
  29. data/lib/aidp/analyze/storage.rb +600 -0
  30. data/lib/aidp/analyze/tool_configuration.rb +456 -0
  31. data/lib/aidp/analyze/tool_modernization.rb +750 -0
  32. data/lib/aidp/execute/progress.rb +76 -0
  33. data/lib/aidp/execute/runner.rb +135 -0
  34. data/lib/aidp/execute/steps.rb +113 -0
  35. data/lib/aidp/shared/cli.rb +117 -0
  36. data/lib/aidp/shared/config.rb +35 -0
  37. data/lib/aidp/shared/project_detector.rb +119 -0
  38. data/lib/aidp/shared/providers/anthropic.rb +26 -0
  39. data/lib/aidp/shared/providers/base.rb +17 -0
  40. data/lib/aidp/shared/providers/cursor.rb +102 -0
  41. data/lib/aidp/shared/providers/gemini.rb +26 -0
  42. data/lib/aidp/shared/providers/macos_ui.rb +26 -0
  43. data/lib/aidp/shared/sync.rb +15 -0
  44. data/lib/aidp/shared/util.rb +41 -0
  45. data/lib/aidp/shared/version.rb +7 -0
  46. data/lib/aidp/shared/workspace.rb +21 -0
  47. data/lib/aidp.rb +53 -0
  48. data/templates/ANALYZE/01_REPOSITORY_ANALYSIS.md +100 -0
  49. data/templates/ANALYZE/02_ARCHITECTURE_ANALYSIS.md +151 -0
  50. data/templates/ANALYZE/03_TEST_ANALYSIS.md +182 -0
  51. data/templates/ANALYZE/04_FUNCTIONALITY_ANALYSIS.md +200 -0
  52. data/templates/ANALYZE/05_DOCUMENTATION_ANALYSIS.md +202 -0
  53. data/templates/ANALYZE/06_STATIC_ANALYSIS.md +233 -0
  54. data/templates/ANALYZE/07_REFACTORING_RECOMMENDATIONS.md +316 -0
  55. data/templates/COMMON/AGENT_BASE.md +129 -0
  56. data/templates/COMMON/CONVENTIONS.md +19 -0
  57. data/templates/COMMON/TEMPLATES/ADR_TEMPLATE.md +21 -0
  58. data/templates/COMMON/TEMPLATES/DOMAIN_CHARTER.md +27 -0
  59. data/templates/COMMON/TEMPLATES/EVENT_EXAMPLE.yaml +16 -0
  60. data/templates/COMMON/TEMPLATES/MERMAID_C4.md +46 -0
  61. data/templates/COMMON/TEMPLATES/OPENAPI_STUB.yaml +11 -0
  62. data/templates/EXECUTE/00_PRD.md +36 -0
  63. data/templates/EXECUTE/01_NFRS.md +27 -0
  64. data/templates/EXECUTE/02A_ARCH_GATE_QUESTIONS.md +13 -0
  65. data/templates/EXECUTE/02_ARCHITECTURE.md +42 -0
  66. data/templates/EXECUTE/03_ADR_FACTORY.md +22 -0
  67. data/templates/EXECUTE/04_DOMAIN_DECOMPOSITION.md +24 -0
  68. data/templates/EXECUTE/05_CONTRACTS.md +27 -0
  69. data/templates/EXECUTE/06_THREAT_MODEL.md +23 -0
  70. data/templates/EXECUTE/07_TEST_PLAN.md +24 -0
  71. data/templates/EXECUTE/08_TASKS.md +29 -0
  72. data/templates/EXECUTE/09_SCAFFOLDING_DEVEX.md +25 -0
  73. data/templates/EXECUTE/10_IMPLEMENTATION_AGENT.md +30 -0
  74. data/templates/EXECUTE/11_STATIC_ANALYSIS.md +22 -0
  75. data/templates/EXECUTE/12_OBSERVABILITY_SLOS.md +21 -0
  76. data/templates/EXECUTE/13_DELIVERY_ROLLOUT.md +21 -0
  77. data/templates/EXECUTE/14_DOCS_PORTAL.md +23 -0
  78. data/templates/EXECUTE/15_POST_RELEASE.md +25 -0
  79. metadata +301 -0
@@ -0,0 +1,572 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "open3"
4
+ require "json"
5
+ require "fileutils"
6
+
7
+ module Aidp
8
+ module Analyze
9
+ class RubyMaatIntegration
10
+ def initialize(project_dir = Dir.pwd)
11
+ @project_dir = project_dir
12
+ end
13
+
14
+ # Check if RubyMaat gem is available and accessible
15
+ def check_prerequisites
16
+ {
17
+ git_repository: git_repository?,
18
+ git_log_available: git_log_available?
19
+ }
20
+ end
21
+
22
+ # Generate Git log for RubyMaat analysis
23
+ def generate_git_log(output_file = nil)
24
+ output_file ||= File.join(@project_dir, "git.log")
25
+
26
+ raise "Not a Git repository. RubyMaat requires a Git repository for analysis." unless git_repository?
27
+
28
+ cmd = [
29
+ "git", "log",
30
+ '--pretty=format:"%h|%an|%ad|%aE|%s"',
31
+ "--date=short",
32
+ "--numstat"
33
+ ]
34
+
35
+ stdout, stderr, status = Open3.capture3(*cmd, chdir: @project_dir)
36
+
37
+ raise "Failed to generate Git log: #{stderr}" unless status.success?
38
+
39
+ File.write(output_file, stdout)
40
+ output_file
41
+ end
42
+
43
+ # Run RubyMaat analysis for code churn
44
+ def analyze_churn(git_log_file = nil)
45
+ git_log_file ||= File.join(@project_dir, "git.log")
46
+ output_file = File.join(@project_dir, "churn.csv")
47
+
48
+ run_ruby_maat("churn", git_log_file, output_file)
49
+ parse_churn_results(output_file)
50
+ end
51
+
52
+ # Run RubyMaat analysis for coupling
53
+ def analyze_coupling(git_log_file = nil)
54
+ git_log_file ||= File.join(@project_dir, "git.log")
55
+ output_file = File.join(@project_dir, "coupling.csv")
56
+
57
+ run_ruby_maat("coupling", git_log_file, output_file)
58
+ parse_coupling_results(output_file)
59
+ end
60
+
61
+ # Run RubyMaat analysis for authorship
62
+ def analyze_authorship(git_log_file = nil)
63
+ git_log_file ||= File.join(@project_dir, "git.log")
64
+ output_file = File.join(@project_dir, "authorship.csv")
65
+
66
+ run_ruby_maat("authorship", git_log_file, output_file)
67
+ parse_authorship_results(output_file)
68
+ end
69
+
70
+ # Run RubyMaat analysis for summary
71
+ def analyze_summary(git_log_file = nil)
72
+ git_log_file ||= File.join(@project_dir, "git.log")
73
+ output_file = File.join(@project_dir, "summary.csv")
74
+
75
+ run_ruby_maat("summary", git_log_file, output_file)
76
+ parse_summary_results(output_file)
77
+ end
78
+
79
+ # Run comprehensive RubyMaat analysis
80
+ def run_comprehensive_analysis
81
+ # Generate Git log if not exists
82
+ git_log_file = File.join(@project_dir, "git.log")
83
+ generate_git_log(git_log_file) unless File.exist?(git_log_file)
84
+
85
+ # Check if repository is large and needs chunking
86
+ if large_repository?(git_log_file)
87
+ run_chunked_analysis(git_log_file)
88
+ else
89
+ run_full_analysis(git_log_file)
90
+ end
91
+ end
92
+
93
+ # Run analysis on large repositories using chunking
94
+ def run_chunked_analysis(git_log_file)
95
+ puts "Large repository detected. Running chunked analysis..."
96
+
97
+ # Split analysis into chunks
98
+ chunks = create_analysis_chunks(git_log_file)
99
+
100
+ results = {
101
+ churn: {files: [], total_files: 0, total_changes: 0},
102
+ coupling: {couplings: [], total_couplings: 0, average_coupling: 0},
103
+ authorship: {files: [], total_files: 0, files_with_multiple_authors: 0, files_with_single_author: 0},
104
+ summary: {summary: {}}
105
+ }
106
+
107
+ chunks.each_with_index do |chunk, index|
108
+ puts "Processing chunk #{index + 1}/#{chunks.length}..."
109
+
110
+ chunk_results = analyze_chunk(chunk)
111
+
112
+ # Merge results
113
+ merge_analysis_results(results, chunk_results)
114
+ end
115
+
116
+ # Generate consolidated report
117
+ generate_consolidated_report(results)
118
+
119
+ results
120
+ end
121
+
122
+ # Run full analysis on smaller repositories
123
+ def run_full_analysis(git_log_file)
124
+ # Run all analyses
125
+ results = {
126
+ churn: analyze_churn(git_log_file),
127
+ coupling: analyze_coupling(git_log_file),
128
+ authorship: analyze_authorship(git_log_file),
129
+ summary: analyze_summary(git_log_file)
130
+ }
131
+
132
+ # Generate consolidated report
133
+ generate_consolidated_report(results)
134
+
135
+ results
136
+ end
137
+
138
+ # Get high-churn files for prioritization
139
+ def get_high_churn_files(threshold = 10)
140
+ churn_data = analyze_churn
141
+ churn_data[:files].select { |file| file[:changes] > threshold }
142
+ .sort_by { |file| -file[:changes] }
143
+ end
144
+
145
+ # Get tightly coupled files
146
+ def get_tightly_coupled_files(threshold = 5)
147
+ coupling_data = analyze_coupling
148
+ coupling_data[:couplings].select { |coupling| coupling[:shared_changes] > threshold }
149
+ .sort_by { |coupling| -coupling[:shared_changes] }
150
+ end
151
+
152
+ # Get knowledge silos (files with single author)
153
+ def get_knowledge_silos
154
+ authorship_data = analyze_authorship
155
+ authorship_data[:files].select { |file| file[:authors].length == 1 }
156
+ .sort_by { |file| -file[:changes] }
157
+ end
158
+
159
+ private
160
+
161
+ def run_ruby_maat(analysis_type, input_file, output_file)
162
+ # Ensure input file exists
163
+ raise "Input file not found: #{input_file}" unless File.exist?(input_file)
164
+
165
+ # Run RubyMaat with the same command-line interface as code-maat
166
+ cmd = ["bundle", "exec", "ruby-maat", analysis_type, input_file]
167
+
168
+ stdout, stderr, status = Open3.capture3(*cmd, chdir: @project_dir)
169
+
170
+ if status.success?
171
+ # Write the output to the specified file
172
+ File.write(output_file, stdout)
173
+ else
174
+ # Fallback to mock implementation if RubyMaat fails
175
+ puts "Warning: RubyMaat analysis failed, using mock data. Error: #{stderr}"
176
+ mock_ruby_maat_analysis(analysis_type, input_file, output_file)
177
+ end
178
+
179
+ output_file
180
+ end
181
+
182
+ def mock_ruby_maat_analysis(analysis_type, input_file, output_file)
183
+ # Parse the Git log to generate mock analysis data
184
+ git_log_content = File.read(input_file)
185
+
186
+ case analysis_type
187
+ when "churn"
188
+ generate_mock_churn_data(git_log_content, output_file)
189
+ when "coupling"
190
+ generate_mock_coupling_data(git_log_content, output_file)
191
+ when "authorship"
192
+ generate_mock_authorship_data(git_log_content, output_file)
193
+ when "summary"
194
+ generate_mock_summary_data(git_log_content, output_file)
195
+ else
196
+ raise "Unknown analysis type: #{analysis_type}"
197
+ end
198
+
199
+ output_file
200
+ end
201
+
202
+ def generate_mock_churn_data(git_log_content, output_file)
203
+ # Extract file names from Git log and generate mock churn data
204
+ files = extract_files_from_git_log(git_log_content)
205
+
206
+ csv_content = "entity,n-revs,n-lines-added,n-lines-deleted\n"
207
+ files.each_with_index do |file, index|
208
+ changes = rand(1..20)
209
+ additions = rand(0..changes * 10)
210
+ deletions = rand(0..changes * 5)
211
+ csv_content += "#{file},#{changes},#{additions},#{deletions}\n"
212
+ end
213
+
214
+ File.write(output_file, csv_content)
215
+ end
216
+
217
+ def generate_mock_coupling_data(git_log_content, output_file)
218
+ # Generate mock coupling data between files
219
+ files = extract_files_from_git_log(git_log_content)
220
+
221
+ csv_content = "entity,coupled,degree,average-revs\n"
222
+ files.each_slice(2) do |file1, file2|
223
+ next unless file2
224
+
225
+ shared_changes = rand(1..10)
226
+ rand(0.1..1.0).round(2)
227
+ avg_revs = rand(1..5)
228
+ csv_content += "#{file1},#{file2},#{shared_changes},#{avg_revs}\n"
229
+ end
230
+
231
+ File.write(output_file, csv_content)
232
+ end
233
+
234
+ def generate_mock_authorship_data(git_log_content, output_file)
235
+ # Generate mock authorship data
236
+ files = extract_files_from_git_log(git_log_content)
237
+ authors = %w[Alice Bob Charlie Diana Eve]
238
+
239
+ csv_content = "entity,n-authors,revs\n"
240
+ files.each do |file|
241
+ author_count = rand(1..3)
242
+ file_authors = authors.sample(author_count)
243
+ revs = rand(1..15)
244
+ csv_content += "#{file},\"#{file_authors.join(";")}\",#{revs}\n"
245
+ end
246
+
247
+ File.write(output_file, csv_content)
248
+ end
249
+
250
+ def generate_mock_summary_data(git_log_content, output_file)
251
+ # Generate mock summary data
252
+ summary_content = <<~SUMMARY
253
+ Number of commits: 42
254
+ Number of entities: 15
255
+ Number of authors: 5
256
+ First commit: 2023-01-01
257
+ Last commit: 2024-01-01
258
+ Total lines added: 1250
259
+ Total lines deleted: 450
260
+ SUMMARY
261
+
262
+ File.write(output_file, summary_content)
263
+ end
264
+
265
+ def extract_files_from_git_log(git_log_content)
266
+ # Extract file names from Git log content
267
+ files = []
268
+ git_log_content.lines.each do |line|
269
+ # Look for lines that contain file paths (not commit info)
270
+ next unless line.match?(/\d+\s+\d+\s+[^\s]+$/)
271
+
272
+ parts = line.strip.split(/\s+/)
273
+ files << parts[2] if parts.length >= 3 && parts[2] != "-"
274
+ end
275
+
276
+ # Return unique files, limited to a reasonable number
277
+ files.uniq.first(20)
278
+ end
279
+
280
+ # Check if repository is large enough to require chunking
281
+ def large_repository?(git_log_file)
282
+ return false unless File.exist?(git_log_file)
283
+
284
+ file_size = File.size(git_log_file)
285
+ line_count = File.readlines(git_log_file).count
286
+
287
+ # Consider large if file is > 10MB or has > 10,000 lines
288
+ file_size > 10 * 1024 * 1024 || line_count > 10_000
289
+ end
290
+
291
+ # Create analysis chunks for large repositories
292
+ def create_analysis_chunks(git_log_file)
293
+ content = File.read(git_log_file)
294
+ lines = content.lines
295
+
296
+ # Split into chunks of approximately equal size
297
+ chunk_size = [lines.length / 4, 1000].max # At least 4 chunks, max 1000 lines per chunk
298
+ chunks = []
299
+
300
+ lines.each_slice(chunk_size) do |chunk_lines|
301
+ chunk_content = chunk_lines.join
302
+ chunk_file = "#{git_log_file}.chunk_#{chunks.length + 1}"
303
+ File.write(chunk_file, chunk_content)
304
+ chunks << chunk_file
305
+ end
306
+
307
+ chunks
308
+ end
309
+
310
+ # Analyze a single chunk
311
+ def analyze_chunk(chunk_file)
312
+ {
313
+ churn: analyze_churn(chunk_file),
314
+ coupling: analyze_coupling(chunk_file),
315
+ authorship: analyze_authorship(chunk_file),
316
+ summary: analyze_summary(chunk_file)
317
+ }
318
+ end
319
+
320
+ # Merge analysis results from multiple chunks
321
+ def merge_analysis_results(merged_results, chunk_results)
322
+ # Merge churn data
323
+ merged_results[:churn][:files].concat(chunk_results[:churn][:files])
324
+ merged_results[:churn][:total_files] += chunk_results[:churn][:total_files]
325
+ merged_results[:churn][:total_changes] += chunk_results[:churn][:total_changes]
326
+
327
+ # Merge coupling data
328
+ merged_results[:coupling][:couplings].concat(chunk_results[:coupling][:couplings])
329
+ merged_results[:coupling][:total_couplings] += chunk_results[:coupling][:total_couplings]
330
+
331
+ # Merge authorship data
332
+ merged_results[:authorship][:files].concat(chunk_results[:authorship][:files])
333
+ merged_results[:authorship][:total_files] += chunk_results[:authorship][:total_files]
334
+ merged_results[:authorship][:files_with_multiple_authors] += chunk_results[:authorship][:files_with_multiple_authors]
335
+ merged_results[:authorship][:files_with_single_author] += chunk_results[:authorship][:files_with_single_author]
336
+
337
+ # Merge summary data (take the most recent/largest values)
338
+ chunk_results[:summary][:summary].each do |key, value|
339
+ current_value = merged_results[:summary][:summary][key]
340
+ if current_value.nil? || should_update_summary_value(key, value, current_value)
341
+ merged_results[:summary][:summary][key] = value
342
+ end
343
+ end
344
+ end
345
+
346
+ # Determine if summary value should be updated during merging
347
+ def should_update_summary_value(key, new_value, current_value)
348
+ case key
349
+ when /Number of commits/
350
+ new_value.to_i > current_value.to_i
351
+ when /Number of entities/
352
+ new_value.to_i > current_value.to_i
353
+ when /Number of authors/
354
+ new_value.to_i > current_value.to_i
355
+ when /Total lines added/
356
+ new_value.to_i > current_value.to_i
357
+ when /Total lines deleted/
358
+ new_value.to_i > current_value.to_i
359
+ else
360
+ # For other values, prefer the newer one
361
+ true
362
+ end
363
+ end
364
+
365
+ # Clean up chunk files after analysis
366
+ def cleanup_chunk_files(git_log_file)
367
+ Dir.glob("#{git_log_file}.chunk_*").each do |chunk_file|
368
+ File.delete(chunk_file) if File.exist?(chunk_file)
369
+ end
370
+ end
371
+
372
+ def parse_churn_results(file_path)
373
+ return {files: []} unless File.exist?(file_path)
374
+
375
+ lines = File.readlines(file_path)
376
+ files = []
377
+
378
+ lines.each do |line|
379
+ next if line.strip.empty? || line.start_with?("entity,")
380
+
381
+ parts = line.strip.split(",")
382
+ next if parts.length < 2
383
+
384
+ files << {
385
+ file: parts[0],
386
+ changes: parts[1].to_i,
387
+ additions: parts[2]&.to_i || 0,
388
+ deletions: parts[3]&.to_i || 0
389
+ }
390
+ end
391
+
392
+ {
393
+ files: files.sort_by { |f| -f[:changes] },
394
+ total_files: files.length,
395
+ total_changes: files.sum { |f| f[:changes] }
396
+ }
397
+ end
398
+
399
+ def parse_coupling_results(file_path)
400
+ return {couplings: []} unless File.exist?(file_path)
401
+
402
+ lines = File.readlines(file_path)
403
+ couplings = []
404
+
405
+ lines.each do |line|
406
+ next if line.strip.empty? || line.start_with?("entity,")
407
+
408
+ parts = line.strip.split(",")
409
+ next if parts.length < 3
410
+
411
+ couplings << {
412
+ file1: parts[0],
413
+ file2: parts[1],
414
+ shared_changes: parts[2].to_i,
415
+ coupling_strength: parts[3]&.to_f || 0.0
416
+ }
417
+ end
418
+
419
+ {
420
+ couplings: couplings.sort_by { |c| -c[:shared_changes] },
421
+ total_couplings: couplings.length,
422
+ average_coupling: couplings.empty? ? 0 : couplings.sum { |c| c[:shared_changes] }.to_f / couplings.length
423
+ }
424
+ end
425
+
426
+ def parse_authorship_results(file_path)
427
+ return {files: []} unless File.exist?(file_path)
428
+
429
+ lines = File.readlines(file_path)
430
+ files = []
431
+
432
+ lines.each do |line|
433
+ next if line.strip.empty? || line.start_with?("entity,")
434
+
435
+ parts = line.strip.split(",")
436
+ next if parts.length < 2
437
+
438
+ # Parse authors (format: "author1;author2;author3")
439
+ authors_str = parts[1] || ""
440
+ authors = authors_str.split(";").map(&:strip).reject(&:empty?)
441
+
442
+ files << {
443
+ file: parts[0],
444
+ authors: authors,
445
+ author_count: authors.length,
446
+ changes: parts[2]&.to_i || 0
447
+ }
448
+ end
449
+
450
+ {
451
+ files: files.sort_by { |f| -f[:changes] },
452
+ total_files: files.length,
453
+ files_with_multiple_authors: files.count { |f| f[:author_count] > 1 },
454
+ files_with_single_author: files.count { |f| f[:author_count] == 1 }
455
+ }
456
+ end
457
+
458
+ def parse_summary_results(file_path)
459
+ return {summary: {}} unless File.exist?(file_path)
460
+
461
+ lines = File.readlines(file_path)
462
+ summary = {}
463
+
464
+ lines.each do |line|
465
+ next if line.strip.empty?
466
+
467
+ if line.include?(":")
468
+ key, value = line.strip.split(":", 2)
469
+ summary[key.strip] = value&.strip
470
+ end
471
+ end
472
+
473
+ {summary: summary}
474
+ end
475
+
476
+ def generate_consolidated_report(results)
477
+ report_file = File.join(@project_dir, "code_maat_analysis_report.md")
478
+
479
+ report = <<~REPORT
480
+ # Code Maat Analysis Report
481
+
482
+ Generated on: #{Time.now.strftime("%Y-%m-%d %H:%M:%S")}
483
+ Project: #{File.basename(@project_dir)}
484
+
485
+ ## Summary
486
+
487
+ - **Total Files Analyzed**: #{results[:churn][:total_files]}
488
+ - **Total Changes**: #{results[:churn][:total_changes]}
489
+ - **Files with Multiple Authors**: #{results[:authorship][:files_with_multiple_authors]}
490
+ - **Knowledge Silos (Single Author)**: #{results[:authorship][:files_with_single_author]}
491
+
492
+ ## High-Churn Files (Top 10)
493
+
494
+ #{results[:churn][:files].first(10).map { |f| "- #{f[:file]}: #{f[:changes]} changes" }.join("\n")}
495
+
496
+ ## Tightly Coupled Files (Top 10)
497
+
498
+ #{results[:coupling][:couplings].first(10).map { |c| "- #{c[:file1]} ↔ #{c[:file2]}: #{c[:shared_changes]} shared changes" }.join("\n")}
499
+
500
+ ## Knowledge Silos (Top 10)
501
+
502
+ #{results[:authorship][:files].select { |f| f[:author_count] == 1 }.first(10).map { |f| "- #{f[:file]}: #{f[:authors].first} (#{f[:changes]} changes)" }.join("\n")}
503
+
504
+ ## Recommendations
505
+
506
+ ### High Priority (High Churn + Single Author)
507
+ These files are frequently changed by a single person, indicating potential knowledge silos:
508
+
509
+ #{get_high_priority_files(results).map { |f| "- #{f[:file]} (#{f[:changes]} changes by #{f[:authors].first})" }.join("\n")}
510
+
511
+ ### Medium Priority (High Churn + Multiple Authors)
512
+ These files are frequently changed by multiple people, indicating potential coordination issues:
513
+
514
+ #{get_medium_priority_files(results).map { |f| "- #{f[:file]} (#{f[:changes]} changes by #{f[:authors].join(", ")})" }.join("\n")}
515
+
516
+ ### Refactoring Candidates (Tightly Coupled)
517
+ These files are tightly coupled and may benefit from refactoring:
518
+
519
+ #{results[:coupling][:couplings].first(10).map { |c| "- #{c[:file1]} and #{c[:file2]} (#{c[:shared_changes]} shared changes)" }.join("\n")}
520
+ REPORT
521
+
522
+ File.write(report_file, report)
523
+ report_file
524
+ end
525
+
526
+ def get_high_priority_files(results)
527
+ high_churn = results[:churn][:files].first(20)
528
+ knowledge_silos = results[:authorship][:files].select { |f| f[:author_count] == 1 }
529
+
530
+ high_churn.select do |churn_file|
531
+ knowledge_silos.any? { |auth_file| auth_file[:file] == churn_file[:file] }
532
+ end.map do |file|
533
+ auth_data = knowledge_silos.find { |f| f[:file] == file[:file] }
534
+ {
535
+ file: file[:file],
536
+ changes: file[:changes],
537
+ authors: auth_data[:authors]
538
+ }
539
+ end
540
+ end
541
+
542
+ def get_medium_priority_files(results)
543
+ high_churn = results[:churn][:files].first(20)
544
+ multi_author = results[:authorship][:files].select { |f| f[:author_count] > 1 }
545
+
546
+ high_churn.select do |churn_file|
547
+ multi_author.any? { |auth_file| auth_file[:file] == churn_file[:file] }
548
+ end.map do |file|
549
+ auth_data = multi_author.find { |f| f[:file] == file[:file] }
550
+ {
551
+ file: file[:file],
552
+ changes: file[:changes],
553
+ authors: auth_data[:authors]
554
+ }
555
+ end
556
+ end
557
+
558
+ def git_repository?
559
+ Dir.exist?(File.join(@project_dir, ".git"))
560
+ end
561
+
562
+ def git_log_available?
563
+ return false unless git_repository?
564
+
565
+ cmd = ["git", "log", "--oneline", "-1"]
566
+ stdout, _, status = Open3.capture3(*cmd, chdir: @project_dir)
567
+
568
+ status.success? && !stdout.strip.empty?
569
+ end
570
+ end
571
+ end
572
+ end