gitflow-analytics 1.0.3__py3-none-any.whl → 1.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/classification/__init__.py +31 -0
  3. gitflow_analytics/classification/batch_classifier.py +752 -0
  4. gitflow_analytics/classification/classifier.py +464 -0
  5. gitflow_analytics/classification/feature_extractor.py +725 -0
  6. gitflow_analytics/classification/linguist_analyzer.py +574 -0
  7. gitflow_analytics/classification/model.py +455 -0
  8. gitflow_analytics/cli.py +4158 -350
  9. gitflow_analytics/cli_rich.py +198 -48
  10. gitflow_analytics/config/__init__.py +43 -0
  11. gitflow_analytics/config/errors.py +261 -0
  12. gitflow_analytics/config/loader.py +905 -0
  13. gitflow_analytics/config/profiles.py +264 -0
  14. gitflow_analytics/config/repository.py +124 -0
  15. gitflow_analytics/config/schema.py +444 -0
  16. gitflow_analytics/config/validator.py +154 -0
  17. gitflow_analytics/config.py +44 -508
  18. gitflow_analytics/core/analyzer.py +1209 -98
  19. gitflow_analytics/core/cache.py +1337 -29
  20. gitflow_analytics/core/data_fetcher.py +1285 -0
  21. gitflow_analytics/core/identity.py +363 -14
  22. gitflow_analytics/core/metrics_storage.py +526 -0
  23. gitflow_analytics/core/progress.py +372 -0
  24. gitflow_analytics/core/schema_version.py +269 -0
  25. gitflow_analytics/extractors/ml_tickets.py +1100 -0
  26. gitflow_analytics/extractors/story_points.py +8 -1
  27. gitflow_analytics/extractors/tickets.py +749 -11
  28. gitflow_analytics/identity_llm/__init__.py +6 -0
  29. gitflow_analytics/identity_llm/analysis_pass.py +231 -0
  30. gitflow_analytics/identity_llm/analyzer.py +464 -0
  31. gitflow_analytics/identity_llm/models.py +76 -0
  32. gitflow_analytics/integrations/github_integration.py +175 -11
  33. gitflow_analytics/integrations/jira_integration.py +461 -24
  34. gitflow_analytics/integrations/orchestrator.py +124 -1
  35. gitflow_analytics/metrics/activity_scoring.py +322 -0
  36. gitflow_analytics/metrics/branch_health.py +470 -0
  37. gitflow_analytics/metrics/dora.py +379 -20
  38. gitflow_analytics/models/database.py +843 -53
  39. gitflow_analytics/pm_framework/__init__.py +115 -0
  40. gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
  41. gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
  42. gitflow_analytics/pm_framework/base.py +406 -0
  43. gitflow_analytics/pm_framework/models.py +211 -0
  44. gitflow_analytics/pm_framework/orchestrator.py +652 -0
  45. gitflow_analytics/pm_framework/registry.py +333 -0
  46. gitflow_analytics/qualitative/__init__.py +9 -10
  47. gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
  48. gitflow_analytics/qualitative/classifiers/__init__.py +3 -3
  49. gitflow_analytics/qualitative/classifiers/change_type.py +518 -244
  50. gitflow_analytics/qualitative/classifiers/domain_classifier.py +272 -165
  51. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +321 -222
  52. gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
  53. gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
  54. gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
  55. gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
  56. gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
  57. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
  58. gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
  59. gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
  60. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
  61. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +215 -189
  62. gitflow_analytics/qualitative/core/__init__.py +4 -4
  63. gitflow_analytics/qualitative/core/llm_fallback.py +239 -235
  64. gitflow_analytics/qualitative/core/nlp_engine.py +157 -148
  65. gitflow_analytics/qualitative/core/pattern_cache.py +214 -192
  66. gitflow_analytics/qualitative/core/processor.py +381 -248
  67. gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
  68. gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
  69. gitflow_analytics/qualitative/models/__init__.py +7 -7
  70. gitflow_analytics/qualitative/models/schemas.py +155 -121
  71. gitflow_analytics/qualitative/utils/__init__.py +4 -4
  72. gitflow_analytics/qualitative/utils/batch_processor.py +136 -123
  73. gitflow_analytics/qualitative/utils/cost_tracker.py +142 -140
  74. gitflow_analytics/qualitative/utils/metrics.py +172 -158
  75. gitflow_analytics/qualitative/utils/text_processing.py +146 -104
  76. gitflow_analytics/reports/__init__.py +100 -0
  77. gitflow_analytics/reports/analytics_writer.py +539 -14
  78. gitflow_analytics/reports/base.py +648 -0
  79. gitflow_analytics/reports/branch_health_writer.py +322 -0
  80. gitflow_analytics/reports/classification_writer.py +924 -0
  81. gitflow_analytics/reports/cli_integration.py +427 -0
  82. gitflow_analytics/reports/csv_writer.py +1676 -212
  83. gitflow_analytics/reports/data_models.py +504 -0
  84. gitflow_analytics/reports/database_report_generator.py +427 -0
  85. gitflow_analytics/reports/example_usage.py +344 -0
  86. gitflow_analytics/reports/factory.py +499 -0
  87. gitflow_analytics/reports/formatters.py +698 -0
  88. gitflow_analytics/reports/html_generator.py +1116 -0
  89. gitflow_analytics/reports/interfaces.py +489 -0
  90. gitflow_analytics/reports/json_exporter.py +2770 -0
  91. gitflow_analytics/reports/narrative_writer.py +2287 -158
  92. gitflow_analytics/reports/story_point_correlation.py +1144 -0
  93. gitflow_analytics/reports/weekly_trends_writer.py +389 -0
  94. gitflow_analytics/training/__init__.py +5 -0
  95. gitflow_analytics/training/model_loader.py +377 -0
  96. gitflow_analytics/training/pipeline.py +550 -0
  97. gitflow_analytics/tui/__init__.py +1 -1
  98. gitflow_analytics/tui/app.py +129 -126
  99. gitflow_analytics/tui/screens/__init__.py +3 -3
  100. gitflow_analytics/tui/screens/analysis_progress_screen.py +188 -179
  101. gitflow_analytics/tui/screens/configuration_screen.py +154 -178
  102. gitflow_analytics/tui/screens/loading_screen.py +100 -110
  103. gitflow_analytics/tui/screens/main_screen.py +89 -72
  104. gitflow_analytics/tui/screens/results_screen.py +305 -281
  105. gitflow_analytics/tui/widgets/__init__.py +2 -2
  106. gitflow_analytics/tui/widgets/data_table.py +67 -69
  107. gitflow_analytics/tui/widgets/export_modal.py +76 -76
  108. gitflow_analytics/tui/widgets/progress_widget.py +41 -46
  109. gitflow_analytics-1.3.11.dist-info/METADATA +1015 -0
  110. gitflow_analytics-1.3.11.dist-info/RECORD +122 -0
  111. gitflow_analytics-1.0.3.dist-info/METADATA +0 -490
  112. gitflow_analytics-1.0.3.dist-info/RECORD +0 -62
  113. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/WHEEL +0 -0
  114. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/entry_points.txt +0 -0
  115. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/licenses/LICENSE +0 -0
  116. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.11.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,427 @@
1
+ """Database-backed report generator for GitFlow Analytics.
2
+
3
+ WHY: This module generates reports directly from the SQLite database,
4
+ providing fast retrieval and consistent formatting for daily metrics
5
+ and trend analysis.
6
+ """
7
+
8
+ import logging
9
+ from datetime import date, datetime, timedelta
10
+ from pathlib import Path
11
+ from typing import Any, Dict, List, Optional, Tuple
12
+
13
+ from ..core.metrics_storage import DailyMetricsStorage
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class DatabaseReportGenerator:
19
+ """Generate reports directly from database-stored daily metrics.
20
+
21
+ WHY: Database-backed reporting provides fast, consistent report generation
22
+ with built-in trend analysis and classification insights.
23
+ """
24
+
25
+ def __init__(self, metrics_storage: DailyMetricsStorage):
26
+ """Initialize database report generator.
27
+
28
+ Args:
29
+ metrics_storage: DailyMetricsStorage instance for data access
30
+ """
31
+ self.storage = metrics_storage
32
+ logger.info("Initialized database report generator")
33
+
34
+ def generate_qualitative_report(
35
+ self,
36
+ start_date: date,
37
+ end_date: date,
38
+ output_path: Path,
39
+ developer_ids: Optional[List[str]] = None,
40
+ project_keys: Optional[List[str]] = None
41
+ ) -> Dict[str, Any]:
42
+ """Generate comprehensive qualitative analysis report.
43
+
44
+ WHY: Provides detailed insights into development patterns,
45
+ classification trends, and team productivity based on stored metrics.
46
+
47
+ Args:
48
+ start_date: Report start date
49
+ end_date: Report end date
50
+ output_path: Path to write report file
51
+ developer_ids: Optional filter by specific developers
52
+ project_keys: Optional filter by specific projects
53
+
54
+ Returns:
55
+ Dict with report metadata and statistics
56
+ """
57
+ logger.info(f"Generating qualitative report for {start_date} to {end_date}")
58
+
59
+ # Gather data
60
+ daily_metrics = self.storage.get_date_range_metrics(
61
+ start_date, end_date, developer_ids, project_keys
62
+ )
63
+
64
+ classification_summary = self.storage.get_classification_summary(
65
+ start_date, end_date
66
+ )
67
+
68
+ trends = self.storage.calculate_weekly_trends(start_date, end_date)
69
+
70
+ # Generate report content
71
+ report_content = self._build_qualitative_report_content(
72
+ daily_metrics, classification_summary, trends, start_date, end_date
73
+ )
74
+
75
+ # Write report to file
76
+ output_path.parent.mkdir(parents=True, exist_ok=True)
77
+ with open(output_path, 'w', encoding='utf-8') as f:
78
+ f.write(report_content)
79
+
80
+ # Calculate report statistics
81
+ report_stats = {
82
+ 'start_date': start_date.isoformat(),
83
+ 'end_date': end_date.isoformat(),
84
+ 'total_days': (end_date - start_date).days + 1,
85
+ 'unique_developers': len(set(m['developer_id'] for m in daily_metrics)),
86
+ 'unique_projects': len(set(m['project_key'] for m in daily_metrics)),
87
+ 'total_commits': sum(m['total_commits'] for m in daily_metrics),
88
+ 'total_records': len(daily_metrics),
89
+ 'trends_calculated': len(trends),
90
+ 'output_file': str(output_path),
91
+ 'generated_at': datetime.utcnow().isoformat()
92
+ }
93
+
94
+ logger.info(f"Generated qualitative report with {report_stats['total_records']} records")
95
+ return report_stats
96
+
97
+ def _build_qualitative_report_content(
98
+ self,
99
+ daily_metrics: List[Dict[str, Any]],
100
+ classification_summary: Dict[str, Dict[str, int]],
101
+ trends: Dict[Tuple[str, str], Dict[str, float]],
102
+ start_date: date,
103
+ end_date: date
104
+ ) -> str:
105
+ """Build the complete qualitative report content.
106
+
107
+ WHY: Structures the report with clear sections for executive summary,
108
+ detailed analysis, and actionable insights based on database metrics.
109
+ """
110
+ lines = []
111
+
112
+ # Header
113
+ lines.extend([
114
+ "# GitFlow Analytics - Qualitative Report",
115
+ f"**Report Period:** {start_date.isoformat()} to {end_date.isoformat()}",
116
+ f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
117
+ "",
118
+ "---",
119
+ ""
120
+ ])
121
+
122
+ # Executive Summary
123
+ lines.extend(self._build_executive_summary(daily_metrics, classification_summary))
124
+
125
+ # Team Analysis
126
+ lines.extend(self._build_team_analysis(classification_summary['by_developer']))
127
+
128
+ # Project Analysis
129
+ lines.extend(self._build_project_analysis(classification_summary['by_project']))
130
+
131
+ # Weekly Trends Analysis
132
+ lines.extend(self._build_trends_analysis(trends, daily_metrics))
133
+
134
+ # Classification Insights
135
+ lines.extend(self._build_classification_insights(daily_metrics))
136
+
137
+ # Recommendations
138
+ lines.extend(self._build_recommendations(daily_metrics, classification_summary, trends))
139
+
140
+ return "\n".join(lines)
141
+
142
+ def _build_executive_summary(
143
+ self,
144
+ daily_metrics: List[Dict[str, Any]],
145
+ classification_summary: Dict[str, Dict[str, int]]
146
+ ) -> List[str]:
147
+ """Build executive summary section."""
148
+ if not daily_metrics:
149
+ return ["## Executive Summary", "", "No data available for the selected period.", ""]
150
+
151
+ total_commits = sum(m['total_commits'] for m in daily_metrics)
152
+ unique_developers = len(set(m['developer_id'] for m in daily_metrics))
153
+ unique_projects = len(set(m['project_key'] for m in daily_metrics))
154
+
155
+ # Calculate top categories
156
+ category_totals = {}
157
+ for metrics in daily_metrics:
158
+ for category in ['feature', 'bug_fix', 'refactor', 'documentation', 'maintenance']:
159
+ field = f"{category}_commits"
160
+ category_totals[category] = category_totals.get(category, 0) + metrics.get(field, 0)
161
+
162
+ top_category = max(category_totals, key=category_totals.get) if category_totals else "feature"
163
+ top_category_count = category_totals.get(top_category, 0)
164
+ top_category_pct = (top_category_count / total_commits * 100) if total_commits > 0 else 0
165
+
166
+ # Top contributor
167
+ dev_commits = {}
168
+ for metrics in daily_metrics:
169
+ dev_name = metrics['developer_name']
170
+ dev_commits[dev_name] = dev_commits.get(dev_name, 0) + metrics['total_commits']
171
+
172
+ top_contributor = max(dev_commits, key=dev_commits.get) if dev_commits else "Unknown"
173
+ top_contributor_commits = dev_commits.get(top_contributor, 0)
174
+ top_contributor_pct = (top_contributor_commits / total_commits * 100) if total_commits > 0 else 0
175
+
176
+ return [
177
+ "## Executive Summary",
178
+ "",
179
+ f"- **Total Activity:** {total_commits:,} commits across {unique_developers} developers and {unique_projects} projects",
180
+ f"- **Primary Focus:** {top_category.replace('_', ' ').title()} development ({top_category_count} commits, {top_category_pct:.1f}%)",
181
+ f"- **Top Contributor:** {top_contributor} ({top_contributor_commits} commits, {top_contributor_pct:.1f}%)",
182
+ f"- **Average Daily Activity:** {total_commits / max(1, len(set(m['date'] for m in daily_metrics))):.1f} commits per day",
183
+ "",
184
+ ]
185
+
186
+ def _build_team_analysis(self, developer_summary: Dict[str, Dict[str, int]]) -> List[str]:
187
+ """Build team analysis section."""
188
+ if not developer_summary:
189
+ return ["## Team Analysis", "", "No developer data available.", ""]
190
+
191
+ lines = ["## Team Analysis", ""]
192
+
193
+ # Sort developers by total commits
194
+ sorted_devs = sorted(
195
+ developer_summary.items(),
196
+ key=lambda x: x[1]['total'],
197
+ reverse=True
198
+ )
199
+
200
+ for dev_name, stats in sorted_devs[:10]: # Top 10 developers
201
+ total = stats['total']
202
+ features = stats['features']
203
+ bugs = stats['bug_fixes']
204
+ refactors = stats['refactors']
205
+
206
+ feature_pct = (features / total * 100) if total > 0 else 0
207
+ bug_pct = (bugs / total * 100) if total > 0 else 0
208
+ refactor_pct = (refactors / total * 100) if total > 0 else 0
209
+
210
+ lines.extend([
211
+ f"### {dev_name}",
212
+ f"- **Total Commits:** {total}",
213
+ f"- **Features:** {features} ({feature_pct:.1f}%)",
214
+ f"- **Bug Fixes:** {bugs} ({bug_pct:.1f}%)",
215
+ f"- **Refactoring:** {refactors} ({refactor_pct:.1f}%)",
216
+ ""
217
+ ])
218
+
219
+ return lines
220
+
221
+ def _build_project_analysis(self, project_summary: Dict[str, Dict[str, int]]) -> List[str]:
222
+ """Build project analysis section."""
223
+ if not project_summary:
224
+ return ["## Project Analysis", "", "No project data available.", ""]
225
+
226
+ lines = ["## Project Analysis", ""]
227
+
228
+ # Sort projects by total commits
229
+ sorted_projects = sorted(
230
+ project_summary.items(),
231
+ key=lambda x: x[1]['total'],
232
+ reverse=True
233
+ )
234
+
235
+ for project_key, stats in sorted_projects:
236
+ total = stats['total']
237
+ features = stats['features']
238
+ bugs = stats['bug_fixes']
239
+ refactors = stats['refactors']
240
+
241
+ feature_pct = (features / total * 100) if total > 0 else 0
242
+ bug_pct = (bugs / total * 100) if total > 0 else 0
243
+ refactor_pct = (refactors / total * 100) if total > 0 else 0
244
+
245
+ lines.extend([
246
+ f"### {project_key}",
247
+ f"- **Total Commits:** {total}",
248
+ f"- **Features:** {features} ({feature_pct:.1f}%)",
249
+ f"- **Bug Fixes:** {bugs} ({bug_pct:.1f}%)",
250
+ f"- **Refactoring:** {refactors} ({refactor_pct:.1f}%)",
251
+ ""
252
+ ])
253
+
254
+ return lines
255
+
256
+ def _build_trends_analysis(
257
+ self,
258
+ trends: Dict[Tuple[str, str], Dict[str, float]],
259
+ daily_metrics: List[Dict[str, Any]]
260
+ ) -> List[str]:
261
+ """Build weekly trends analysis section."""
262
+ lines = ["## Weekly Trends Analysis", ""]
263
+
264
+ if not trends:
265
+ return lines + ["No trend data available (requires at least 2 weeks of data).", ""]
266
+
267
+ # Group trends by developer
268
+ dev_trends = {}
269
+ for (dev_id, project_key), trend_data in trends.items():
270
+ if dev_id not in dev_trends:
271
+ dev_trends[dev_id] = {}
272
+ dev_trends[dev_id][project_key] = trend_data
273
+
274
+ # Get developer names mapping
275
+ dev_names = {}
276
+ for metrics in daily_metrics:
277
+ dev_names[metrics['developer_id']] = metrics['developer_name']
278
+
279
+ for dev_id, project_trends in dev_trends.items():
280
+ dev_name = dev_names.get(dev_id, dev_id)
281
+ lines.extend([f"### {dev_name}", ""])
282
+
283
+ for project_key, trend_data in project_trends.items():
284
+ total_change = trend_data['total_commits_change']
285
+ feature_change = trend_data['feature_commits_change']
286
+ bug_change = trend_data['bug_fix_commits_change']
287
+ refactor_change = trend_data['refactor_commits_change']
288
+
289
+ # Format trend direction
290
+ def format_change(change: float) -> str:
291
+ if change > 5:
292
+ return f"+{change:.1f}% ⬆️"
293
+ elif change < -5:
294
+ return f"{change:.1f}% ⬇️"
295
+ else:
296
+ return f"{change:+.1f}% →"
297
+
298
+ lines.extend([
299
+ f"**{project_key}:**",
300
+ f"- Total Commits: {format_change(total_change)}",
301
+ f"- Features: {format_change(feature_change)}",
302
+ f"- Bug Fixes: {format_change(bug_change)}",
303
+ f"- Refactoring: {format_change(refactor_change)}",
304
+ ""
305
+ ])
306
+
307
+ return lines
308
+
309
+ def _build_classification_insights(self, daily_metrics: List[Dict[str, Any]]) -> List[str]:
310
+ """Build classification insights section."""
311
+ lines = ["## Classification Insights", ""]
312
+
313
+ if not daily_metrics:
314
+ return lines + ["No classification data available.", ""]
315
+
316
+ # Calculate overall classification distribution
317
+ total_commits = sum(m['total_commits'] for m in daily_metrics)
318
+
319
+ category_totals = {}
320
+ for metrics in daily_metrics:
321
+ for category in ['feature', 'bug_fix', 'refactor', 'documentation',
322
+ 'maintenance', 'test', 'style', 'build', 'other']:
323
+ field = f"{category}_commits"
324
+ category_totals[category] = category_totals.get(category, 0) + metrics.get(field, 0)
325
+
326
+ lines.append("### Overall Distribution")
327
+ lines.append("")
328
+
329
+ for category, count in sorted(category_totals.items(), key=lambda x: x[1], reverse=True):
330
+ if count > 0:
331
+ percentage = (count / total_commits * 100) if total_commits > 0 else 0
332
+ category_name = category.replace('_', ' ').title()
333
+ lines.append(f"- **{category_name}:** {count} commits ({percentage:.1f}%)")
334
+
335
+ lines.append("")
336
+
337
+ # Ticket tracking insights
338
+ total_tracked = sum(m['tracked_commits'] for m in daily_metrics)
339
+ total_untracked = sum(m['untracked_commits'] for m in daily_metrics)
340
+ total_ticket_commits = total_tracked + total_untracked
341
+
342
+ if total_ticket_commits > 0:
343
+ tracking_rate = (total_tracked / total_ticket_commits * 100)
344
+ lines.extend([
345
+ "### Ticket Tracking",
346
+ "",
347
+ f"- **Tracked Commits:** {total_tracked} ({tracking_rate:.1f}%)",
348
+ f"- **Untracked Commits:** {total_untracked} ({100 - tracking_rate:.1f}%)",
349
+ ""
350
+ ])
351
+
352
+ return lines
353
+
354
+ def _build_recommendations(
355
+ self,
356
+ daily_metrics: List[Dict[str, Any]],
357
+ classification_summary: Dict[str, Dict[str, int]],
358
+ trends: Dict[Tuple[str, str], Dict[str, float]]
359
+ ) -> List[str]:
360
+ """Build recommendations section."""
361
+ lines = ["## Recommendations", ""]
362
+
363
+ recommendations = []
364
+
365
+ # Ticket tracking recommendations
366
+ total_tracked = sum(m['tracked_commits'] for m in daily_metrics)
367
+ total_untracked = sum(m['untracked_commits'] for m in daily_metrics)
368
+ total_commits = total_tracked + total_untracked
369
+
370
+ if total_commits > 0:
371
+ tracking_rate = (total_tracked / total_commits * 100)
372
+ if tracking_rate < 70:
373
+ recommendations.append(
374
+ f"**Improve Ticket Tracking:** Only {tracking_rate:.1f}% of commits are linked to tickets. "
375
+ "Consider implementing commit message templates or pre-commit hooks."
376
+ )
377
+
378
+ # Classification balance recommendations
379
+ total_feature = sum(m['feature_commits'] for m in daily_metrics)
380
+ total_bugs = sum(m['bug_fix_commits'] for m in daily_metrics)
381
+ total_refactor = sum(m['refactor_commits'] for m in daily_metrics)
382
+ total_class_commits = total_feature + total_bugs + total_refactor
383
+
384
+ if total_class_commits > 0:
385
+ bug_ratio = (total_bugs / total_class_commits * 100)
386
+ if bug_ratio > 40:
387
+ recommendations.append(
388
+ f"**High Bug Fix Activity:** {bug_ratio:.1f}% of development time is spent on bug fixes. "
389
+ "Consider investing in code quality improvements and testing."
390
+ )
391
+
392
+ refactor_ratio = (total_refactor / total_class_commits * 100)
393
+ if refactor_ratio < 10:
394
+ recommendations.append(
395
+ f"**Low Refactoring Activity:** Only {refactor_ratio:.1f}% of commits are refactoring. "
396
+ "Regular refactoring helps maintain code quality and reduces technical debt."
397
+ )
398
+
399
+ # Trend-based recommendations
400
+ declining_developers = []
401
+ for (dev_id, project_key), trend_data in trends.items():
402
+ if trend_data['total_commits_change'] < -20:
403
+ # Find developer name
404
+ dev_name = "Unknown"
405
+ for metrics in daily_metrics:
406
+ if metrics['developer_id'] == dev_id:
407
+ dev_name = metrics['developer_name']
408
+ break
409
+ declining_developers.append(dev_name)
410
+
411
+ if declining_developers:
412
+ dev_list = ", ".join(set(declining_developers))
413
+ recommendations.append(
414
+ f"**Monitor Developer Activity:** The following developers show declining activity: {dev_list}. "
415
+ "Consider checking for blockers or workload balance issues."
416
+ )
417
+
418
+ # Output recommendations
419
+ if recommendations:
420
+ for i, rec in enumerate(recommendations, 1):
421
+ lines.append(f"{i}. {rec}")
422
+ lines.append("")
423
+ else:
424
+ lines.append("No specific recommendations at this time. Overall development patterns look healthy.")
425
+ lines.append("")
426
+
427
+ return lines