gitflow-analytics 1.0.1__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. gitflow_analytics/__init__.py +11 -11
  2. gitflow_analytics/_version.py +2 -2
  3. gitflow_analytics/classification/__init__.py +31 -0
  4. gitflow_analytics/classification/batch_classifier.py +752 -0
  5. gitflow_analytics/classification/classifier.py +464 -0
  6. gitflow_analytics/classification/feature_extractor.py +725 -0
  7. gitflow_analytics/classification/linguist_analyzer.py +574 -0
  8. gitflow_analytics/classification/model.py +455 -0
  9. gitflow_analytics/cli.py +4490 -378
  10. gitflow_analytics/cli_rich.py +503 -0
  11. gitflow_analytics/config/__init__.py +43 -0
  12. gitflow_analytics/config/errors.py +261 -0
  13. gitflow_analytics/config/loader.py +904 -0
  14. gitflow_analytics/config/profiles.py +264 -0
  15. gitflow_analytics/config/repository.py +124 -0
  16. gitflow_analytics/config/schema.py +441 -0
  17. gitflow_analytics/config/validator.py +154 -0
  18. gitflow_analytics/config.py +44 -398
  19. gitflow_analytics/core/analyzer.py +1320 -172
  20. gitflow_analytics/core/branch_mapper.py +132 -132
  21. gitflow_analytics/core/cache.py +1554 -175
  22. gitflow_analytics/core/data_fetcher.py +1193 -0
  23. gitflow_analytics/core/identity.py +571 -185
  24. gitflow_analytics/core/metrics_storage.py +526 -0
  25. gitflow_analytics/core/progress.py +372 -0
  26. gitflow_analytics/core/schema_version.py +269 -0
  27. gitflow_analytics/extractors/base.py +13 -11
  28. gitflow_analytics/extractors/ml_tickets.py +1100 -0
  29. gitflow_analytics/extractors/story_points.py +77 -59
  30. gitflow_analytics/extractors/tickets.py +841 -89
  31. gitflow_analytics/identity_llm/__init__.py +6 -0
  32. gitflow_analytics/identity_llm/analysis_pass.py +231 -0
  33. gitflow_analytics/identity_llm/analyzer.py +464 -0
  34. gitflow_analytics/identity_llm/models.py +76 -0
  35. gitflow_analytics/integrations/github_integration.py +258 -87
  36. gitflow_analytics/integrations/jira_integration.py +572 -123
  37. gitflow_analytics/integrations/orchestrator.py +206 -82
  38. gitflow_analytics/metrics/activity_scoring.py +322 -0
  39. gitflow_analytics/metrics/branch_health.py +470 -0
  40. gitflow_analytics/metrics/dora.py +542 -179
  41. gitflow_analytics/models/database.py +986 -59
  42. gitflow_analytics/pm_framework/__init__.py +115 -0
  43. gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
  44. gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
  45. gitflow_analytics/pm_framework/base.py +406 -0
  46. gitflow_analytics/pm_framework/models.py +211 -0
  47. gitflow_analytics/pm_framework/orchestrator.py +652 -0
  48. gitflow_analytics/pm_framework/registry.py +333 -0
  49. gitflow_analytics/qualitative/__init__.py +29 -0
  50. gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
  51. gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
  52. gitflow_analytics/qualitative/classifiers/change_type.py +742 -0
  53. gitflow_analytics/qualitative/classifiers/domain_classifier.py +506 -0
  54. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +535 -0
  55. gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
  56. gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
  57. gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
  58. gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
  59. gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
  60. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
  61. gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
  62. gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
  63. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
  64. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +438 -0
  65. gitflow_analytics/qualitative/core/__init__.py +13 -0
  66. gitflow_analytics/qualitative/core/llm_fallback.py +657 -0
  67. gitflow_analytics/qualitative/core/nlp_engine.py +382 -0
  68. gitflow_analytics/qualitative/core/pattern_cache.py +479 -0
  69. gitflow_analytics/qualitative/core/processor.py +673 -0
  70. gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
  71. gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
  72. gitflow_analytics/qualitative/models/__init__.py +25 -0
  73. gitflow_analytics/qualitative/models/schemas.py +306 -0
  74. gitflow_analytics/qualitative/utils/__init__.py +13 -0
  75. gitflow_analytics/qualitative/utils/batch_processor.py +339 -0
  76. gitflow_analytics/qualitative/utils/cost_tracker.py +345 -0
  77. gitflow_analytics/qualitative/utils/metrics.py +361 -0
  78. gitflow_analytics/qualitative/utils/text_processing.py +285 -0
  79. gitflow_analytics/reports/__init__.py +100 -0
  80. gitflow_analytics/reports/analytics_writer.py +550 -18
  81. gitflow_analytics/reports/base.py +648 -0
  82. gitflow_analytics/reports/branch_health_writer.py +322 -0
  83. gitflow_analytics/reports/classification_writer.py +924 -0
  84. gitflow_analytics/reports/cli_integration.py +427 -0
  85. gitflow_analytics/reports/csv_writer.py +1700 -216
  86. gitflow_analytics/reports/data_models.py +504 -0
  87. gitflow_analytics/reports/database_report_generator.py +427 -0
  88. gitflow_analytics/reports/example_usage.py +344 -0
  89. gitflow_analytics/reports/factory.py +499 -0
  90. gitflow_analytics/reports/formatters.py +698 -0
  91. gitflow_analytics/reports/html_generator.py +1116 -0
  92. gitflow_analytics/reports/interfaces.py +489 -0
  93. gitflow_analytics/reports/json_exporter.py +2770 -0
  94. gitflow_analytics/reports/narrative_writer.py +2289 -158
  95. gitflow_analytics/reports/story_point_correlation.py +1144 -0
  96. gitflow_analytics/reports/weekly_trends_writer.py +389 -0
  97. gitflow_analytics/training/__init__.py +5 -0
  98. gitflow_analytics/training/model_loader.py +377 -0
  99. gitflow_analytics/training/pipeline.py +550 -0
  100. gitflow_analytics/tui/__init__.py +5 -0
  101. gitflow_analytics/tui/app.py +724 -0
  102. gitflow_analytics/tui/screens/__init__.py +8 -0
  103. gitflow_analytics/tui/screens/analysis_progress_screen.py +496 -0
  104. gitflow_analytics/tui/screens/configuration_screen.py +523 -0
  105. gitflow_analytics/tui/screens/loading_screen.py +348 -0
  106. gitflow_analytics/tui/screens/main_screen.py +321 -0
  107. gitflow_analytics/tui/screens/results_screen.py +722 -0
  108. gitflow_analytics/tui/widgets/__init__.py +7 -0
  109. gitflow_analytics/tui/widgets/data_table.py +255 -0
  110. gitflow_analytics/tui/widgets/export_modal.py +301 -0
  111. gitflow_analytics/tui/widgets/progress_widget.py +187 -0
  112. gitflow_analytics-1.3.6.dist-info/METADATA +1015 -0
  113. gitflow_analytics-1.3.6.dist-info/RECORD +122 -0
  114. gitflow_analytics-1.0.1.dist-info/METADATA +0 -463
  115. gitflow_analytics-1.0.1.dist-info/RECORD +0 -31
  116. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/WHEEL +0 -0
  117. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/entry_points.txt +0 -0
  118. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/licenses/LICENSE +0 -0
  119. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.3.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,420 @@
1
+ #!/usr/bin/env python3
2
+ """Example usage of the Enhanced Qualitative Analyzer.
3
+
4
+ This example demonstrates how to use the EnhancedQualitativeAnalyzer to generate
5
+ comprehensive qualitative insights across four key dimensions:
6
+ 1. Executive Summary Analysis
7
+ 2. Project Analysis
8
+ 3. Developer Analysis
9
+ 4. Workflow Analysis
10
+
11
+ The enhanced analyzer produces natural language insights, risk assessments,
12
+ and actionable recommendations for different stakeholder levels.
13
+ """
14
+
15
+ import json
16
+ from datetime import datetime, timedelta, timezone
17
+ from pathlib import Path
18
+ from typing import Any
19
+
20
+ from .enhanced_analyzer import EnhancedQualitativeAnalyzer
21
+
22
+
23
+ def create_sample_commits() -> list[dict[str, Any]]:
24
+ """Create sample commit data for demonstration."""
25
+
26
+ base_time = datetime.now(timezone.utc) - timedelta(weeks=8)
27
+
28
+ sample_commits = [
29
+ {
30
+ "hash": "abc123",
31
+ "message": "feat: implement user authentication system",
32
+ "author_name": "Alice Developer",
33
+ "author_email": "alice@company.com",
34
+ "canonical_id": "alice@company.com",
35
+ "timestamp": base_time + timedelta(days=1),
36
+ "project_key": "AUTH_SERVICE",
37
+ "insertions": 250,
38
+ "deletions": 30,
39
+ "filtered_insertions": 250,
40
+ "filtered_deletions": 30,
41
+ "files_changed": 8,
42
+ "ticket_references": ["AUTH-123"],
43
+ "story_points": 5,
44
+ },
45
+ {
46
+ "hash": "def456",
47
+ "message": "fix: resolve login timeout issue",
48
+ "author_name": "Bob Engineer",
49
+ "author_email": "bob@company.com",
50
+ "canonical_id": "bob@company.com",
51
+ "timestamp": base_time + timedelta(days=3),
52
+ "project_key": "AUTH_SERVICE",
53
+ "insertions": 45,
54
+ "deletions": 12,
55
+ "filtered_insertions": 45,
56
+ "filtered_deletions": 12,
57
+ "files_changed": 2,
58
+ "ticket_references": ["AUTH-124"],
59
+ "story_points": 2,
60
+ },
61
+ {
62
+ "hash": "ghi789",
63
+ "message": "refactor: improve database connection pooling",
64
+ "author_name": "Alice Developer",
65
+ "author_email": "alice@company.com",
66
+ "canonical_id": "alice@company.com",
67
+ "timestamp": base_time + timedelta(days=5),
68
+ "project_key": "DATA_SERVICE",
69
+ "insertions": 180,
70
+ "deletions": 95,
71
+ "filtered_insertions": 180,
72
+ "filtered_deletions": 95,
73
+ "files_changed": 5,
74
+ "ticket_references": ["DATA-67"],
75
+ "story_points": 8,
76
+ },
77
+ {
78
+ "hash": "jkl012",
79
+ "message": "docs: update API documentation",
80
+ "author_name": "Carol Writer",
81
+ "author_email": "carol@company.com",
82
+ "canonical_id": "carol@company.com",
83
+ "timestamp": base_time + timedelta(days=7),
84
+ "project_key": "API_DOCS",
85
+ "insertions": 120,
86
+ "deletions": 20,
87
+ "filtered_insertions": 120,
88
+ "filtered_deletions": 20,
89
+ "files_changed": 3,
90
+ "ticket_references": ["DOC-45"],
91
+ "story_points": 3,
92
+ },
93
+ # Add more commits for realistic analysis
94
+ {
95
+ "hash": "mno345",
96
+ "message": "feat: add user preferences endpoint",
97
+ "author_name": "Bob Engineer",
98
+ "author_email": "bob@company.com",
99
+ "canonical_id": "bob@company.com",
100
+ "timestamp": base_time + timedelta(days=10),
101
+ "project_key": "USER_SERVICE",
102
+ "insertions": 320,
103
+ "deletions": 15,
104
+ "filtered_insertions": 320,
105
+ "filtered_deletions": 15,
106
+ "files_changed": 12,
107
+ "ticket_references": ["USER-89"],
108
+ "story_points": 13,
109
+ },
110
+ ]
111
+
112
+ return sample_commits
113
+
114
+
115
+ def create_sample_developer_stats() -> list[dict[str, Any]]:
116
+ """Create sample developer statistics."""
117
+
118
+ base_time = datetime.now(timezone.utc)
119
+
120
+ return [
121
+ {
122
+ "canonical_id": "alice@company.com",
123
+ "primary_name": "Alice Developer",
124
+ "primary_email": "alice@company.com",
125
+ "total_commits": 45,
126
+ "total_story_points": 89,
127
+ "alias_count": 1,
128
+ "first_seen": base_time - timedelta(weeks=12),
129
+ "last_seen": base_time - timedelta(days=2),
130
+ "github_username": "alice-dev",
131
+ },
132
+ {
133
+ "canonical_id": "bob@company.com",
134
+ "primary_name": "Bob Engineer",
135
+ "primary_email": "bob@company.com",
136
+ "total_commits": 38,
137
+ "total_story_points": 72,
138
+ "alias_count": 1,
139
+ "first_seen": base_time - timedelta(weeks=10),
140
+ "last_seen": base_time - timedelta(days=1),
141
+ "github_username": "bob-eng",
142
+ },
143
+ {
144
+ "canonical_id": "carol@company.com",
145
+ "primary_name": "Carol Writer",
146
+ "primary_email": "carol@company.com",
147
+ "total_commits": 23,
148
+ "total_story_points": 34,
149
+ "alias_count": 1,
150
+ "first_seen": base_time - timedelta(weeks=8),
151
+ "last_seen": base_time - timedelta(days=7),
152
+ "github_username": "carol-docs",
153
+ },
154
+ ]
155
+
156
+
157
+ def create_sample_project_metrics() -> dict[str, Any]:
158
+ """Create sample project metrics."""
159
+
160
+ return {
161
+ "ticket_analysis": {
162
+ "commit_coverage_pct": 87.5,
163
+ "total_tickets_referenced": 156,
164
+ "unique_tickets": 145,
165
+ },
166
+ "story_point_analysis": {"total_story_points": 195, "average_per_commit": 4.2},
167
+ }
168
+
169
+
170
+ def create_sample_pm_data() -> dict[str, Any]:
171
+ """Create sample PM platform integration data."""
172
+
173
+ return {
174
+ "correlations": [
175
+ {
176
+ "commit_hash": "abc123",
177
+ "ticket_id": "AUTH-123",
178
+ "confidence": 0.95,
179
+ "correlation_method": "exact_match",
180
+ },
181
+ {
182
+ "commit_hash": "def456",
183
+ "ticket_id": "AUTH-124",
184
+ "confidence": 0.89,
185
+ "correlation_method": "semantic_similarity",
186
+ },
187
+ ],
188
+ "metrics": {
189
+ "total_pm_issues": 178,
190
+ "platform_coverage": {
191
+ "jira": {"total_issues": 156, "linked_issues": 142, "coverage_percentage": 91.0}
192
+ },
193
+ "story_point_analysis": {"story_point_coverage_pct": 78.5},
194
+ },
195
+ }
196
+
197
+
198
+ def demonstrate_enhanced_analysis():
199
+ """Demonstrate the enhanced qualitative analyzer functionality."""
200
+
201
+ print("🔍 Enhanced Qualitative Analysis Demonstration")
202
+ print("=" * 60)
203
+
204
+ # Create sample data
205
+ commits = create_sample_commits()
206
+ developer_stats = create_sample_developer_stats()
207
+ project_metrics = create_sample_project_metrics()
208
+ pm_data = create_sample_pm_data()
209
+
210
+ print("📊 Sample data created:")
211
+ print(f" - {len(commits)} commits")
212
+ print(f" - {len(developer_stats)} developers")
213
+ print(f" - {len(project_metrics.get('ticket_analysis', {}))} ticket metrics")
214
+ print(f" - PM integration with {len(pm_data.get('correlations', []))} correlations")
215
+ print()
216
+
217
+ # Initialize the enhanced analyzer
218
+ analyzer = EnhancedQualitativeAnalyzer()
219
+ print("🚀 Enhanced Qualitative Analyzer initialized")
220
+ print()
221
+
222
+ # Perform comprehensive analysis
223
+ print("🔄 Performing comprehensive analysis...")
224
+ analysis_result = analyzer.analyze_comprehensive(
225
+ commits=commits,
226
+ qualitative_data=None, # No detailed qualitative data in this example
227
+ developer_stats=developer_stats,
228
+ project_metrics=project_metrics,
229
+ pm_data=pm_data,
230
+ weeks_analyzed=8,
231
+ )
232
+
233
+ print("✅ Analysis completed!")
234
+ print()
235
+
236
+ # Display results by dimension
237
+ print("📈 EXECUTIVE SUMMARY ANALYSIS")
238
+ print("-" * 40)
239
+
240
+ exec_analysis = analysis_result.get("executive_analysis", {})
241
+ print(f"Team Health: {exec_analysis.get('health_assessment', 'Unknown')}")
242
+ print(f"Health Confidence: {exec_analysis.get('health_confidence', 0):.2f}")
243
+
244
+ velocity_trends = exec_analysis.get("velocity_trends", {})
245
+ print(f"Velocity Trend: {velocity_trends.get('overall_trend', 'Unknown')}")
246
+ print(f"Weekly Average: {velocity_trends.get('weekly_average', 0)} commits")
247
+
248
+ achievements = exec_analysis.get("key_achievements", [])
249
+ print(f"Key Achievements: {len(achievements)} identified")
250
+ for achievement in achievements[:2]:
251
+ print(f" • {achievement.get('title', 'Unknown')}")
252
+
253
+ concerns = exec_analysis.get("major_concerns", [])
254
+ print(f"Major Concerns: {len(concerns)} identified")
255
+ for concern in concerns[:2]:
256
+ print(f" ⚠️ {concern.get('title', 'Unknown')}")
257
+
258
+ print(f"\\nExecutive Summary: {exec_analysis.get('executive_summary', 'No summary available')}")
259
+ print()
260
+
261
+ print("🏗️ PROJECT ANALYSIS")
262
+ print("-" * 40)
263
+
264
+ project_analysis = analysis_result.get("project_analysis", {})
265
+ for project_key, project_data in list(project_analysis.items())[:3]: # Show first 3 projects
266
+ momentum = project_data.get("momentum", {})
267
+ health = project_data.get("health_indicators", {}).get("overall_health", {})
268
+
269
+ print(f"Project: {project_key}")
270
+ print(
271
+ f" Momentum: {momentum.get('classification', 'Unknown')} ({momentum.get('confidence', 0):.2f} confidence)"
272
+ )
273
+ print(f" Health Score: {health.get('score', 0)}/100 ({health.get('status', 'Unknown')})")
274
+
275
+ recommendations = project_data.get("recommendations", [])
276
+ if recommendations:
277
+ print(f" Top Recommendation: {recommendations[0].get('title', 'None')}")
278
+ print()
279
+
280
+ print("👨‍💻 DEVELOPER ANALYSIS")
281
+ print("-" * 40)
282
+
283
+ developer_analysis = analysis_result.get("developer_analysis", {})
284
+ for dev_id, dev_data in list(developer_analysis.items())[:3]: # Show first 3 developers
285
+ contribution = dev_data.get("contribution_pattern", {})
286
+ collaboration = dev_data.get("collaboration_score", {})
287
+ growth = dev_data.get("growth_trajectory", {})
288
+
289
+ # Use canonical_id for lookup if name not available
290
+ dev_name = None
291
+ for dev_stat in developer_stats:
292
+ if dev_stat.get("canonical_id") == dev_id:
293
+ dev_name = dev_stat.get("primary_name", dev_id)
294
+ break
295
+
296
+ print(f"Developer: {dev_name or dev_id}")
297
+ print(
298
+ f" Pattern: {contribution.get('pattern', 'Unknown')} ({contribution.get('confidence', 0):.2f} confidence)"
299
+ )
300
+ print(
301
+ f" Collaboration: {collaboration.get('level', 'Unknown')} ({collaboration.get('score', 0):.1f}/100)"
302
+ )
303
+ print(f" Growth: {growth.get('trajectory', 'Unknown')} trajectory")
304
+
305
+ recommendations = dev_data.get("career_recommendations", [])
306
+ if recommendations:
307
+ print(f" Career Recommendation: {recommendations[0].get('title', 'None')}")
308
+ print()
309
+
310
+ print("⚙️ WORKFLOW ANALYSIS")
311
+ print("-" * 40)
312
+
313
+ workflow_analysis = analysis_result.get("workflow_analysis", {})
314
+ git_pm = workflow_analysis.get("git_pm_effectiveness", {})
315
+ bottlenecks = workflow_analysis.get("process_bottlenecks", [])
316
+ compliance = workflow_analysis.get("compliance_metrics", {})
317
+
318
+ print(f"Git-PM Effectiveness: {git_pm.get('effectiveness', 'Unknown')}")
319
+ print(f"Effectiveness Score: {git_pm.get('score', 0):.1f}%")
320
+ print(f"Process Bottlenecks: {len(bottlenecks)} identified")
321
+ print(f"Overall Compliance: {compliance.get('overall_score', 0):.1f}%")
322
+
323
+ automation_opps = workflow_analysis.get("automation_opportunities", [])
324
+ print(f"Automation Opportunities: {len(automation_opps)} identified")
325
+
326
+ print(
327
+ f"\\nWorkflow Summary: {workflow_analysis.get('workflow_narrative', 'No summary available')}"
328
+ )
329
+ print()
330
+
331
+ print("🔗 CROSS-DIMENSIONAL INSIGHTS")
332
+ print("-" * 40)
333
+
334
+ cross_insights = analysis_result.get("cross_insights", [])
335
+ print(f"Cross-insights identified: {len(cross_insights)}")
336
+ for insight in cross_insights:
337
+ print(
338
+ f" • {insight.get('title', 'Unknown')} ({insight.get('priority', 'unknown')} priority)"
339
+ )
340
+ print()
341
+
342
+ # Show how to export to JSON format
343
+ print("💾 JSON EXPORT EXAMPLE")
344
+ print("-" * 40)
345
+
346
+ # Create a sample output path
347
+ output_path = Path("enhanced_analysis_example.json")
348
+
349
+ # Export the results
350
+ try:
351
+ with open(output_path, "w") as f:
352
+ json.dump(analysis_result, f, indent=2, default=str)
353
+
354
+ print(f"✅ Enhanced analysis exported to: {output_path}")
355
+ print(f"File size: {output_path.stat().st_size / 1024:.1f} KB")
356
+
357
+ # Show a sample of the JSON structure
358
+ print("\\nJSON Structure Preview:")
359
+ print(
360
+ json.dumps(
361
+ {
362
+ key: f"<{len(value) if isinstance(value, (list, dict)) else type(value).__name__}>"
363
+ for key, value in analysis_result.items()
364
+ },
365
+ indent=2,
366
+ )
367
+ )
368
+
369
+ except Exception as e:
370
+ print(f"❌ Export failed: {e}")
371
+
372
+ print()
373
+ print("🎯 INTEGRATION WITH EXISTING REPORTS")
374
+ print("-" * 40)
375
+ print("To integrate with existing GitFlow Analytics JSON exports:")
376
+ print("1. Import EnhancedQualitativeAnalyzer in your analysis pipeline")
377
+ print("2. Call analyzer.analyze_comprehensive() with your commit data")
378
+ print("3. Pass the results to ComprehensiveJSONExporter.export_comprehensive_data()")
379
+ print("4. The enhanced analysis will be included under 'enhanced_qualitative_analysis' key")
380
+ print()
381
+ print("Example integration code:")
382
+ print(
383
+ """
384
+ from gitflow_analytics.qualitative import EnhancedQualitativeAnalyzer
385
+ from gitflow_analytics.reports.json_exporter import ComprehensiveJSONExporter
386
+
387
+ # Initialize components
388
+ analyzer = EnhancedQualitativeAnalyzer()
389
+ exporter = ComprehensiveJSONExporter()
390
+
391
+ # Perform enhanced analysis
392
+ enhanced_analysis = analyzer.analyze_comprehensive(
393
+ commits=commits,
394
+ developer_stats=developer_stats,
395
+ project_metrics=project_metrics,
396
+ pm_data=pm_data,
397
+ weeks_analyzed=weeks
398
+ )
399
+
400
+ # Export with enhanced analysis included
401
+ exporter.export_comprehensive_data(
402
+ commits=commits,
403
+ prs=prs,
404
+ developer_stats=developer_stats,
405
+ project_metrics=project_metrics,
406
+ dora_metrics=dora_metrics,
407
+ output_path=output_path,
408
+ weeks=weeks,
409
+ pm_data=pm_data,
410
+ enhanced_qualitative_analysis=enhanced_analysis # Include enhanced analysis
411
+ )
412
+ """
413
+ )
414
+ print()
415
+ print("🏁 Enhanced Qualitative Analysis demonstration completed!")
416
+ print("=" * 60)
417
+
418
+
419
+ if __name__ == "__main__":
420
+ demonstrate_enhanced_analysis()
@@ -0,0 +1,25 @@
1
+ """Data models and schemas for qualitative analysis."""
2
+
3
+ from .schemas import (
4
+ CacheConfig,
5
+ ChangeTypeConfig,
6
+ DomainConfig,
7
+ IntentConfig,
8
+ LLMConfig,
9
+ NLPConfig,
10
+ QualitativeCommitData,
11
+ QualitativeConfig,
12
+ RiskConfig,
13
+ )
14
+
15
+ __all__ = [
16
+ "QualitativeCommitData",
17
+ "QualitativeConfig",
18
+ "NLPConfig",
19
+ "LLMConfig",
20
+ "CacheConfig",
21
+ "ChangeTypeConfig",
22
+ "IntentConfig",
23
+ "DomainConfig",
24
+ "RiskConfig",
25
+ ]