gitflow-analytics 1.0.3__py3-none-any.whl → 1.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. gitflow_analytics/_version.py +1 -1
  2. gitflow_analytics/classification/__init__.py +31 -0
  3. gitflow_analytics/classification/batch_classifier.py +752 -0
  4. gitflow_analytics/classification/classifier.py +464 -0
  5. gitflow_analytics/classification/feature_extractor.py +725 -0
  6. gitflow_analytics/classification/linguist_analyzer.py +574 -0
  7. gitflow_analytics/classification/model.py +455 -0
  8. gitflow_analytics/cli.py +4108 -350
  9. gitflow_analytics/cli_rich.py +198 -48
  10. gitflow_analytics/config/__init__.py +43 -0
  11. gitflow_analytics/config/errors.py +261 -0
  12. gitflow_analytics/config/loader.py +904 -0
  13. gitflow_analytics/config/profiles.py +264 -0
  14. gitflow_analytics/config/repository.py +124 -0
  15. gitflow_analytics/config/schema.py +441 -0
  16. gitflow_analytics/config/validator.py +154 -0
  17. gitflow_analytics/config.py +44 -508
  18. gitflow_analytics/core/analyzer.py +1209 -98
  19. gitflow_analytics/core/cache.py +1337 -29
  20. gitflow_analytics/core/data_fetcher.py +1193 -0
  21. gitflow_analytics/core/identity.py +363 -14
  22. gitflow_analytics/core/metrics_storage.py +526 -0
  23. gitflow_analytics/core/progress.py +372 -0
  24. gitflow_analytics/core/schema_version.py +269 -0
  25. gitflow_analytics/extractors/ml_tickets.py +1100 -0
  26. gitflow_analytics/extractors/story_points.py +8 -1
  27. gitflow_analytics/extractors/tickets.py +749 -11
  28. gitflow_analytics/identity_llm/__init__.py +6 -0
  29. gitflow_analytics/identity_llm/analysis_pass.py +231 -0
  30. gitflow_analytics/identity_llm/analyzer.py +464 -0
  31. gitflow_analytics/identity_llm/models.py +76 -0
  32. gitflow_analytics/integrations/github_integration.py +175 -11
  33. gitflow_analytics/integrations/jira_integration.py +461 -24
  34. gitflow_analytics/integrations/orchestrator.py +124 -1
  35. gitflow_analytics/metrics/activity_scoring.py +322 -0
  36. gitflow_analytics/metrics/branch_health.py +470 -0
  37. gitflow_analytics/metrics/dora.py +379 -20
  38. gitflow_analytics/models/database.py +843 -53
  39. gitflow_analytics/pm_framework/__init__.py +115 -0
  40. gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
  41. gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
  42. gitflow_analytics/pm_framework/base.py +406 -0
  43. gitflow_analytics/pm_framework/models.py +211 -0
  44. gitflow_analytics/pm_framework/orchestrator.py +652 -0
  45. gitflow_analytics/pm_framework/registry.py +333 -0
  46. gitflow_analytics/qualitative/__init__.py +9 -10
  47. gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
  48. gitflow_analytics/qualitative/classifiers/__init__.py +3 -3
  49. gitflow_analytics/qualitative/classifiers/change_type.py +518 -244
  50. gitflow_analytics/qualitative/classifiers/domain_classifier.py +272 -165
  51. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +321 -222
  52. gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
  53. gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
  54. gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
  55. gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
  56. gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
  57. gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
  58. gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
  59. gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
  60. gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
  61. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +215 -189
  62. gitflow_analytics/qualitative/core/__init__.py +4 -4
  63. gitflow_analytics/qualitative/core/llm_fallback.py +239 -235
  64. gitflow_analytics/qualitative/core/nlp_engine.py +157 -148
  65. gitflow_analytics/qualitative/core/pattern_cache.py +214 -192
  66. gitflow_analytics/qualitative/core/processor.py +381 -248
  67. gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
  68. gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
  69. gitflow_analytics/qualitative/models/__init__.py +7 -7
  70. gitflow_analytics/qualitative/models/schemas.py +155 -121
  71. gitflow_analytics/qualitative/utils/__init__.py +4 -4
  72. gitflow_analytics/qualitative/utils/batch_processor.py +136 -123
  73. gitflow_analytics/qualitative/utils/cost_tracker.py +142 -140
  74. gitflow_analytics/qualitative/utils/metrics.py +172 -158
  75. gitflow_analytics/qualitative/utils/text_processing.py +146 -104
  76. gitflow_analytics/reports/__init__.py +100 -0
  77. gitflow_analytics/reports/analytics_writer.py +539 -14
  78. gitflow_analytics/reports/base.py +648 -0
  79. gitflow_analytics/reports/branch_health_writer.py +322 -0
  80. gitflow_analytics/reports/classification_writer.py +924 -0
  81. gitflow_analytics/reports/cli_integration.py +427 -0
  82. gitflow_analytics/reports/csv_writer.py +1676 -212
  83. gitflow_analytics/reports/data_models.py +504 -0
  84. gitflow_analytics/reports/database_report_generator.py +427 -0
  85. gitflow_analytics/reports/example_usage.py +344 -0
  86. gitflow_analytics/reports/factory.py +499 -0
  87. gitflow_analytics/reports/formatters.py +698 -0
  88. gitflow_analytics/reports/html_generator.py +1116 -0
  89. gitflow_analytics/reports/interfaces.py +489 -0
  90. gitflow_analytics/reports/json_exporter.py +2770 -0
  91. gitflow_analytics/reports/narrative_writer.py +2287 -158
  92. gitflow_analytics/reports/story_point_correlation.py +1144 -0
  93. gitflow_analytics/reports/weekly_trends_writer.py +389 -0
  94. gitflow_analytics/training/__init__.py +5 -0
  95. gitflow_analytics/training/model_loader.py +377 -0
  96. gitflow_analytics/training/pipeline.py +550 -0
  97. gitflow_analytics/tui/__init__.py +1 -1
  98. gitflow_analytics/tui/app.py +129 -126
  99. gitflow_analytics/tui/screens/__init__.py +3 -3
  100. gitflow_analytics/tui/screens/analysis_progress_screen.py +188 -179
  101. gitflow_analytics/tui/screens/configuration_screen.py +154 -178
  102. gitflow_analytics/tui/screens/loading_screen.py +100 -110
  103. gitflow_analytics/tui/screens/main_screen.py +89 -72
  104. gitflow_analytics/tui/screens/results_screen.py +305 -281
  105. gitflow_analytics/tui/widgets/__init__.py +2 -2
  106. gitflow_analytics/tui/widgets/data_table.py +67 -69
  107. gitflow_analytics/tui/widgets/export_modal.py +76 -76
  108. gitflow_analytics/tui/widgets/progress_widget.py +41 -46
  109. gitflow_analytics-1.3.6.dist-info/METADATA +1015 -0
  110. gitflow_analytics-1.3.6.dist-info/RECORD +122 -0
  111. gitflow_analytics-1.0.3.dist-info/METADATA +0 -490
  112. gitflow_analytics-1.0.3.dist-info/RECORD +0 -62
  113. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/WHEEL +0 -0
  114. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/entry_points.txt +0 -0
  115. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/licenses/LICENSE +0 -0
  116. {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/top_level.txt +0 -0
@@ -2,242 +2,284 @@
2
2
 
3
3
  import hashlib
4
4
  import re
5
- from typing import Dict, List, Set
6
5
 
7
6
 
8
7
  class TextProcessor:
9
8
  """Utility class for text preprocessing and feature extraction.
10
-
9
+
11
10
  This class provides common text processing operations needed across
12
11
  the qualitative analysis pipeline, including normalization, feature
13
12
  extraction, and similarity calculations.
14
13
  """
15
-
14
+
16
15
  def __init__(self) -> None:
17
16
  """Initialize text processor with common patterns."""
18
17
  # Common patterns for normalization
19
- self.url_pattern = re.compile(r'https?://[^\s]+')
20
- self.email_pattern = re.compile(r'\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b')
21
- self.hash_pattern = re.compile(r'\b[a-f0-9]{7,40}\b') # Git hashes
22
- self.ticket_pattern = re.compile(r'\b(?:JIRA|TICKET|ISSUE|BUG|TASK)-?\d+\b', re.IGNORECASE)
23
-
18
+ self.url_pattern = re.compile(r"https?://[^\s]+")
19
+ self.email_pattern = re.compile(r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b")
20
+ self.hash_pattern = re.compile(r"\b[a-f0-9]{7,40}\b") # Git hashes
21
+ self.ticket_pattern = re.compile(r"\b(?:JIRA|TICKET|ISSUE|BUG|TASK)-?\d+\b", re.IGNORECASE)
22
+
24
23
  # Stop words for feature extraction
25
- self.stop_words: Set[str] = {
26
- 'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
27
- 'of', 'with', 'by', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
28
- 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could',
29
- 'should', 'may', 'might', 'can', 'this', 'that', 'these', 'those'
24
+ self.stop_words: set[str] = {
25
+ "the",
26
+ "a",
27
+ "an",
28
+ "and",
29
+ "or",
30
+ "but",
31
+ "in",
32
+ "on",
33
+ "at",
34
+ "to",
35
+ "for",
36
+ "of",
37
+ "with",
38
+ "by",
39
+ "is",
40
+ "are",
41
+ "was",
42
+ "were",
43
+ "be",
44
+ "been",
45
+ "being",
46
+ "have",
47
+ "has",
48
+ "had",
49
+ "do",
50
+ "does",
51
+ "did",
52
+ "will",
53
+ "would",
54
+ "could",
55
+ "should",
56
+ "may",
57
+ "might",
58
+ "can",
59
+ "this",
60
+ "that",
61
+ "these",
62
+ "those",
30
63
  }
31
-
64
+
32
65
  def normalize_message(self, message: str) -> str:
33
66
  """Normalize commit message for consistent processing.
34
-
67
+
35
68
  This method standardizes commit messages by removing URLs, emails,
36
- hashes, and other variable content that doesn't contribute to
69
+ hashes, and other variable content that doesn't contribute to
37
70
  semantic classification.
38
-
71
+
39
72
  Args:
40
73
  message: Raw commit message
41
-
74
+
42
75
  Returns:
43
76
  Normalized message suitable for classification
44
77
  """
45
78
  if not message:
46
79
  return ""
47
-
80
+
48
81
  # Convert to lowercase for consistency
49
82
  normalized = message.lower().strip()
50
-
83
+
51
84
  # Remove URLs, emails, and hashes
52
- normalized = self.url_pattern.sub('[URL]', normalized)
53
- normalized = self.email_pattern.sub('[EMAIL]', normalized)
54
- normalized = self.hash_pattern.sub('[HASH]', normalized)
55
-
85
+ normalized = self.url_pattern.sub("[URL]", normalized)
86
+ normalized = self.email_pattern.sub("[EMAIL]", normalized)
87
+ normalized = self.hash_pattern.sub("[HASH]", normalized)
88
+
56
89
  # Normalize ticket references
57
- normalized = self.ticket_pattern.sub('[TICKET]', normalized)
58
-
90
+ normalized = self.ticket_pattern.sub("[TICKET]", normalized)
91
+
59
92
  # Remove extra whitespace
60
- normalized = re.sub(r'\s+', ' ', normalized)
61
-
93
+ normalized = re.sub(r"\s+", " ", normalized)
94
+
62
95
  return normalized.strip()
63
-
64
- def extract_keywords(self, text: str, min_length: int = 3) -> List[str]:
96
+
97
+ def extract_keywords(self, text: str, min_length: int = 3) -> list[str]:
65
98
  """Extract meaningful keywords from text.
66
-
99
+
67
100
  Extracts keywords by removing stop words, punctuation, and short words
68
101
  that are unlikely to be semantically meaningful.
69
-
102
+
70
103
  Args:
71
104
  text: Input text to extract keywords from
72
105
  min_length: Minimum length for keywords
73
-
106
+
74
107
  Returns:
75
108
  List of extracted keywords
76
109
  """
77
110
  if not text:
78
111
  return []
79
-
112
+
80
113
  # Split into words and clean
81
- words = re.findall(r'\b[a-zA-Z]+\b', text.lower())
82
-
114
+ words = re.findall(r"\b[a-zA-Z]+\b", text.lower())
115
+
83
116
  # Filter stop words and short words
84
117
  keywords = [
85
- word for word in words
86
- if word not in self.stop_words and len(word) >= min_length
118
+ word for word in words if word not in self.stop_words and len(word) >= min_length
87
119
  ]
88
-
120
+
89
121
  return keywords
90
-
91
- def create_semantic_fingerprint(self, message: str, files: List[str]) -> str:
122
+
123
+ def create_semantic_fingerprint(self, message: str, files: list[str]) -> str:
92
124
  """Create a semantic fingerprint for similarity matching.
93
-
125
+
94
126
  Creates a hash-based fingerprint that captures the semantic essence
95
127
  of a commit for pattern matching and caching.
96
-
128
+
97
129
  Args:
98
130
  message: Commit message
99
131
  files: List of changed files
100
-
132
+
101
133
  Returns:
102
134
  Hex-encoded fingerprint string
103
135
  """
104
136
  # Normalize message for consistent fingerprinting
105
137
  normalized_msg = self.normalize_message(message)
106
138
  keywords = self.extract_keywords(normalized_msg)
107
-
139
+
108
140
  # Extract file patterns (extensions, directories)
109
141
  file_patterns = []
110
142
  for file_path in files[:10]: # Limit to prevent huge fingerprints
111
143
  # Get file extension
112
- if '.' in file_path:
113
- ext = file_path.split('.')[-1].lower()
144
+ if "." in file_path:
145
+ ext = file_path.split(".")[-1].lower()
114
146
  file_patterns.append(f"ext:{ext}")
115
-
116
- # Get directory patterns
117
- parts = file_path.split('/')
147
+
148
+ # Get directory patterns
149
+ parts = file_path.split("/")
118
150
  if len(parts) > 1:
119
151
  # First directory
120
152
  file_patterns.append(f"dir:{parts[0]}")
121
153
  # Last directory before file
122
154
  if len(parts) > 2:
123
155
  file_patterns.append(f"dir:{parts[-2]}")
124
-
156
+
125
157
  # Combine keywords and file patterns
126
158
  semantic_elements = sorted(keywords[:10]) + sorted(set(file_patterns))
127
-
159
+
128
160
  # Create fingerprint
129
- fingerprint_text = '|'.join(semantic_elements)
161
+ fingerprint_text = "|".join(semantic_elements)
130
162
  return hashlib.md5(fingerprint_text.encode()).hexdigest()
131
-
163
+
132
164
  def calculate_message_similarity(self, msg1: str, msg2: str) -> float:
133
165
  """Calculate semantic similarity between two commit messages.
134
-
166
+
135
167
  Uses keyword overlap to estimate semantic similarity between
136
168
  commit messages for grouping similar commits.
137
-
169
+
138
170
  Args:
139
171
  msg1: First commit message
140
172
  msg2: Second commit message
141
-
173
+
142
174
  Returns:
143
175
  Similarity score between 0.0 and 1.0
144
176
  """
145
177
  if not msg1 or not msg2:
146
178
  return 0.0
147
-
179
+
148
180
  # Extract keywords from both messages
149
181
  keywords1 = set(self.extract_keywords(self.normalize_message(msg1)))
150
182
  keywords2 = set(self.extract_keywords(self.normalize_message(msg2)))
151
-
183
+
152
184
  if not keywords1 or not keywords2:
153
185
  return 0.0
154
-
186
+
155
187
  # Calculate Jaccard similarity
156
188
  intersection = len(keywords1.intersection(keywords2))
157
189
  union = len(keywords1.union(keywords2))
158
-
190
+
159
191
  return intersection / union if union > 0 else 0.0
160
-
161
- def extract_file_patterns(self, files: List[str]) -> Dict[str, int]:
192
+
193
+ def extract_file_patterns(self, files: list[str]) -> dict[str, int]:
162
194
  """Extract file patterns for domain classification.
163
-
195
+
164
196
  Analyzes file paths to extract patterns useful for determining
165
197
  the technical domain of changes.
166
-
198
+
167
199
  Args:
168
200
  files: List of file paths
169
-
201
+
170
202
  Returns:
171
203
  Dictionary mapping pattern types to counts
172
204
  """
173
205
  patterns = {
174
- 'extensions': {},
175
- 'directories': {},
176
- 'special_files': {},
206
+ "extensions": {},
207
+ "directories": {},
208
+ "special_files": {},
177
209
  }
178
-
210
+
179
211
  for file_path in files:
180
212
  # File extensions
181
- if '.' in file_path:
182
- ext = file_path.split('.')[-1].lower()
183
- patterns['extensions'][ext] = patterns['extensions'].get(ext, 0) + 1
184
-
213
+ if "." in file_path:
214
+ ext = file_path.split(".")[-1].lower()
215
+ patterns["extensions"][ext] = patterns["extensions"].get(ext, 0) + 1
216
+
185
217
  # Directory patterns
186
- parts = file_path.split('/')
218
+ parts = file_path.split("/")
187
219
  for part in parts[:-1]: # Exclude filename
188
220
  if part: # Skip empty parts
189
- patterns['directories'][part] = patterns['directories'].get(part, 0) + 1
190
-
221
+ patterns["directories"][part] = patterns["directories"].get(part, 0) + 1
222
+
191
223
  # Special files
192
224
  filename = parts[-1].lower()
193
225
  special_files = [
194
- 'dockerfile', 'makefile', 'readme', 'license', 'changelog',
195
- 'package.json', 'requirements.txt', 'setup.py', 'pom.xml'
226
+ "dockerfile",
227
+ "makefile",
228
+ "readme",
229
+ "license",
230
+ "changelog",
231
+ "package.json",
232
+ "requirements.txt",
233
+ "setup.py",
234
+ "pom.xml",
196
235
  ]
197
236
  for special in special_files:
198
237
  if special in filename:
199
- patterns['special_files'][special] = patterns['special_files'].get(special, 0) + 1
200
-
238
+ patterns["special_files"][special] = (
239
+ patterns["special_files"].get(special, 0) + 1
240
+ )
241
+
201
242
  return patterns
202
-
203
- def calculate_commit_complexity(self, message: str, files: List[str],
204
- insertions: int, deletions: int) -> Dict[str, float]:
243
+
244
+ def calculate_commit_complexity(
245
+ self, message: str, files: list[str], insertions: int, deletions: int
246
+ ) -> dict[str, float]:
205
247
  """Calculate various complexity metrics for a commit.
206
-
248
+
207
249
  Estimates the complexity of a commit based on message content,
208
250
  file changes, and line changes to help with risk assessment.
209
-
251
+
210
252
  Args:
211
253
  message: Commit message
212
254
  files: List of changed files
213
255
  insertions: Number of lines inserted
214
256
  deletions: Number of lines deleted
215
-
257
+
216
258
  Returns:
217
259
  Dictionary of complexity metrics
218
260
  """
219
261
  metrics = {}
220
-
262
+
221
263
  # Message complexity (length, keywords)
222
- metrics['message_length'] = len(message)
264
+ metrics["message_length"] = len(message)
223
265
  keywords = self.extract_keywords(message)
224
- metrics['keyword_count'] = len(keywords)
225
- metrics['message_complexity'] = min(1.0, len(keywords) / 10.0)
226
-
266
+ metrics["keyword_count"] = len(keywords)
267
+ metrics["message_complexity"] = min(1.0, len(keywords) / 10.0)
268
+
227
269
  # File complexity
228
- metrics['files_changed'] = len(files)
229
- metrics['file_complexity'] = min(1.0, len(files) / 20.0)
230
-
270
+ metrics["files_changed"] = len(files)
271
+ metrics["file_complexity"] = min(1.0, len(files) / 20.0)
272
+
231
273
  # Line change complexity
232
274
  total_changes = insertions + deletions
233
- metrics['total_changes'] = total_changes
234
- metrics['change_complexity'] = min(1.0, total_changes / 500.0)
235
-
275
+ metrics["total_changes"] = total_changes
276
+ metrics["change_complexity"] = min(1.0, total_changes / 500.0)
277
+
236
278
  # Overall complexity score (0.0 to 1.0)
237
- metrics['overall_complexity'] = (
238
- metrics['message_complexity'] * 0.2 +
239
- metrics['file_complexity'] * 0.3 +
240
- metrics['change_complexity'] * 0.5
279
+ metrics["overall_complexity"] = (
280
+ metrics["message_complexity"] * 0.2
281
+ + metrics["file_complexity"] * 0.3
282
+ + metrics["change_complexity"] * 0.5
241
283
  )
242
-
243
- return metrics
284
+
285
+ return metrics
@@ -0,0 +1,100 @@
1
+ # Reports package
2
+
3
+ # Legacy imports for backward compatibility
4
+ from .analytics_writer import AnalyticsReportGenerator
5
+
6
+ # New abstraction layer components
7
+ from .base import (
8
+ BaseReportGenerator,
9
+ ChainedReportGenerator,
10
+ CompositeReportGenerator,
11
+ ReportData,
12
+ ReportMetadata,
13
+ ReportOutput,
14
+ )
15
+ from .csv_writer import CSVReportGenerator
16
+ from .data_models import (
17
+ CommitData,
18
+ CommitType,
19
+ DeveloperIdentity,
20
+ DeveloperMetrics,
21
+ DORAMetrics,
22
+ ProjectMetrics,
23
+ PullRequestData,
24
+ ReportSummary,
25
+ TicketMetrics,
26
+ WeeklyMetrics,
27
+ WorkStyle,
28
+ )
29
+ from .factory import (
30
+ ReportBuilder,
31
+ ReportFactory,
32
+ create_multiple_reports,
33
+ create_report,
34
+ get_default_factory,
35
+ )
36
+ from .formatters import (
37
+ CSVFormatter,
38
+ DateFormatter,
39
+ JSONFormatter,
40
+ MarkdownFormatter,
41
+ MetricFormatter,
42
+ NumberFormatter,
43
+ TextFormatter,
44
+ )
45
+ from .html_generator import HTMLReportGenerator
46
+ from .interfaces import ReportField, ReportFormat, ReportSchema, ReportType
47
+ from .json_exporter import ComprehensiveJSONExporter
48
+ from .narrative_writer import NarrativeReportGenerator
49
+
50
+ __all__ = [
51
+ # Legacy generators
52
+ 'CSVReportGenerator',
53
+ 'AnalyticsReportGenerator',
54
+ 'NarrativeReportGenerator',
55
+ 'ComprehensiveJSONExporter',
56
+ 'HTMLReportGenerator',
57
+
58
+ # Base classes
59
+ 'BaseReportGenerator',
60
+ 'CompositeReportGenerator',
61
+ 'ChainedReportGenerator',
62
+ 'ReportData',
63
+ 'ReportOutput',
64
+ 'ReportMetadata',
65
+
66
+ # Interfaces
67
+ 'ReportFormat',
68
+ 'ReportType',
69
+ 'ReportField',
70
+ 'ReportSchema',
71
+
72
+ # Factory
73
+ 'ReportFactory',
74
+ 'ReportBuilder',
75
+ 'create_report',
76
+ 'create_multiple_reports',
77
+ 'get_default_factory',
78
+
79
+ # Formatters
80
+ 'DateFormatter',
81
+ 'NumberFormatter',
82
+ 'TextFormatter',
83
+ 'MarkdownFormatter',
84
+ 'CSVFormatter',
85
+ 'JSONFormatter',
86
+ 'MetricFormatter',
87
+
88
+ # Data models
89
+ 'CommitData',
90
+ 'PullRequestData',
91
+ 'DeveloperMetrics',
92
+ 'ProjectMetrics',
93
+ 'WeeklyMetrics',
94
+ 'TicketMetrics',
95
+ 'DORAMetrics',
96
+ 'ReportSummary',
97
+ 'DeveloperIdentity',
98
+ 'CommitType',
99
+ 'WorkStyle'
100
+ ]