gitflow-analytics 1.0.3__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gitflow_analytics/_version.py +1 -1
- gitflow_analytics/classification/__init__.py +31 -0
- gitflow_analytics/classification/batch_classifier.py +752 -0
- gitflow_analytics/classification/classifier.py +464 -0
- gitflow_analytics/classification/feature_extractor.py +725 -0
- gitflow_analytics/classification/linguist_analyzer.py +574 -0
- gitflow_analytics/classification/model.py +455 -0
- gitflow_analytics/cli.py +4108 -350
- gitflow_analytics/cli_rich.py +198 -48
- gitflow_analytics/config/__init__.py +43 -0
- gitflow_analytics/config/errors.py +261 -0
- gitflow_analytics/config/loader.py +904 -0
- gitflow_analytics/config/profiles.py +264 -0
- gitflow_analytics/config/repository.py +124 -0
- gitflow_analytics/config/schema.py +441 -0
- gitflow_analytics/config/validator.py +154 -0
- gitflow_analytics/config.py +44 -508
- gitflow_analytics/core/analyzer.py +1209 -98
- gitflow_analytics/core/cache.py +1337 -29
- gitflow_analytics/core/data_fetcher.py +1193 -0
- gitflow_analytics/core/identity.py +363 -14
- gitflow_analytics/core/metrics_storage.py +526 -0
- gitflow_analytics/core/progress.py +372 -0
- gitflow_analytics/core/schema_version.py +269 -0
- gitflow_analytics/extractors/ml_tickets.py +1100 -0
- gitflow_analytics/extractors/story_points.py +8 -1
- gitflow_analytics/extractors/tickets.py +749 -11
- gitflow_analytics/identity_llm/__init__.py +6 -0
- gitflow_analytics/identity_llm/analysis_pass.py +231 -0
- gitflow_analytics/identity_llm/analyzer.py +464 -0
- gitflow_analytics/identity_llm/models.py +76 -0
- gitflow_analytics/integrations/github_integration.py +175 -11
- gitflow_analytics/integrations/jira_integration.py +461 -24
- gitflow_analytics/integrations/orchestrator.py +124 -1
- gitflow_analytics/metrics/activity_scoring.py +322 -0
- gitflow_analytics/metrics/branch_health.py +470 -0
- gitflow_analytics/metrics/dora.py +379 -20
- gitflow_analytics/models/database.py +843 -53
- gitflow_analytics/pm_framework/__init__.py +115 -0
- gitflow_analytics/pm_framework/adapters/__init__.py +50 -0
- gitflow_analytics/pm_framework/adapters/jira_adapter.py +1845 -0
- gitflow_analytics/pm_framework/base.py +406 -0
- gitflow_analytics/pm_framework/models.py +211 -0
- gitflow_analytics/pm_framework/orchestrator.py +652 -0
- gitflow_analytics/pm_framework/registry.py +333 -0
- gitflow_analytics/qualitative/__init__.py +9 -10
- gitflow_analytics/qualitative/chatgpt_analyzer.py +259 -0
- gitflow_analytics/qualitative/classifiers/__init__.py +3 -3
- gitflow_analytics/qualitative/classifiers/change_type.py +518 -244
- gitflow_analytics/qualitative/classifiers/domain_classifier.py +272 -165
- gitflow_analytics/qualitative/classifiers/intent_analyzer.py +321 -222
- gitflow_analytics/qualitative/classifiers/llm/__init__.py +35 -0
- gitflow_analytics/qualitative/classifiers/llm/base.py +193 -0
- gitflow_analytics/qualitative/classifiers/llm/batch_processor.py +383 -0
- gitflow_analytics/qualitative/classifiers/llm/cache.py +479 -0
- gitflow_analytics/qualitative/classifiers/llm/cost_tracker.py +435 -0
- gitflow_analytics/qualitative/classifiers/llm/openai_client.py +403 -0
- gitflow_analytics/qualitative/classifiers/llm/prompts.py +373 -0
- gitflow_analytics/qualitative/classifiers/llm/response_parser.py +287 -0
- gitflow_analytics/qualitative/classifiers/llm_commit_classifier.py +607 -0
- gitflow_analytics/qualitative/classifiers/risk_analyzer.py +215 -189
- gitflow_analytics/qualitative/core/__init__.py +4 -4
- gitflow_analytics/qualitative/core/llm_fallback.py +239 -235
- gitflow_analytics/qualitative/core/nlp_engine.py +157 -148
- gitflow_analytics/qualitative/core/pattern_cache.py +214 -192
- gitflow_analytics/qualitative/core/processor.py +381 -248
- gitflow_analytics/qualitative/enhanced_analyzer.py +2236 -0
- gitflow_analytics/qualitative/example_enhanced_usage.py +420 -0
- gitflow_analytics/qualitative/models/__init__.py +7 -7
- gitflow_analytics/qualitative/models/schemas.py +155 -121
- gitflow_analytics/qualitative/utils/__init__.py +4 -4
- gitflow_analytics/qualitative/utils/batch_processor.py +136 -123
- gitflow_analytics/qualitative/utils/cost_tracker.py +142 -140
- gitflow_analytics/qualitative/utils/metrics.py +172 -158
- gitflow_analytics/qualitative/utils/text_processing.py +146 -104
- gitflow_analytics/reports/__init__.py +100 -0
- gitflow_analytics/reports/analytics_writer.py +539 -14
- gitflow_analytics/reports/base.py +648 -0
- gitflow_analytics/reports/branch_health_writer.py +322 -0
- gitflow_analytics/reports/classification_writer.py +924 -0
- gitflow_analytics/reports/cli_integration.py +427 -0
- gitflow_analytics/reports/csv_writer.py +1676 -212
- gitflow_analytics/reports/data_models.py +504 -0
- gitflow_analytics/reports/database_report_generator.py +427 -0
- gitflow_analytics/reports/example_usage.py +344 -0
- gitflow_analytics/reports/factory.py +499 -0
- gitflow_analytics/reports/formatters.py +698 -0
- gitflow_analytics/reports/html_generator.py +1116 -0
- gitflow_analytics/reports/interfaces.py +489 -0
- gitflow_analytics/reports/json_exporter.py +2770 -0
- gitflow_analytics/reports/narrative_writer.py +2287 -158
- gitflow_analytics/reports/story_point_correlation.py +1144 -0
- gitflow_analytics/reports/weekly_trends_writer.py +389 -0
- gitflow_analytics/training/__init__.py +5 -0
- gitflow_analytics/training/model_loader.py +377 -0
- gitflow_analytics/training/pipeline.py +550 -0
- gitflow_analytics/tui/__init__.py +1 -1
- gitflow_analytics/tui/app.py +129 -126
- gitflow_analytics/tui/screens/__init__.py +3 -3
- gitflow_analytics/tui/screens/analysis_progress_screen.py +188 -179
- gitflow_analytics/tui/screens/configuration_screen.py +154 -178
- gitflow_analytics/tui/screens/loading_screen.py +100 -110
- gitflow_analytics/tui/screens/main_screen.py +89 -72
- gitflow_analytics/tui/screens/results_screen.py +305 -281
- gitflow_analytics/tui/widgets/__init__.py +2 -2
- gitflow_analytics/tui/widgets/data_table.py +67 -69
- gitflow_analytics/tui/widgets/export_modal.py +76 -76
- gitflow_analytics/tui/widgets/progress_widget.py +41 -46
- gitflow_analytics-1.3.6.dist-info/METADATA +1015 -0
- gitflow_analytics-1.3.6.dist-info/RECORD +122 -0
- gitflow_analytics-1.0.3.dist-info/METADATA +0 -490
- gitflow_analytics-1.0.3.dist-info/RECORD +0 -62
- {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/WHEEL +0 -0
- {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/entry_points.txt +0 -0
- {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/licenses/LICENSE +0 -0
- {gitflow_analytics-1.0.3.dist-info → gitflow_analytics-1.3.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
"""Base platform adapter interface and capabilities definition.
|
|
2
|
+
|
|
3
|
+
This module provides the abstract base class and capability definitions that all
|
|
4
|
+
PM platform adapters must implement. It includes common utility methods for
|
|
5
|
+
data normalization and error handling.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from abc import ABC, abstractmethod
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Any, Optional
|
|
12
|
+
|
|
13
|
+
from .models import IssueType, Priority, UnifiedIssue, UnifiedProject, UnifiedSprint, UnifiedUser
|
|
14
|
+
|
|
15
|
+
# Configure logger for PM framework
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class PlatformCapabilities:
|
|
20
|
+
"""Defines what capabilities a platform adapter supports.
|
|
21
|
+
|
|
22
|
+
WHY: Different PM platforms have varying feature sets. This class allows
|
|
23
|
+
adapters to declare their capabilities so the orchestrator can gracefully
|
|
24
|
+
handle missing features and inform users about platform limitations.
|
|
25
|
+
|
|
26
|
+
DESIGN DECISION: Use explicit capability flags rather than trying operations
|
|
27
|
+
and catching exceptions. This approach is more predictable and provides
|
|
28
|
+
better user feedback about what features are available.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self) -> None:
|
|
32
|
+
# Core capabilities - most platforms support these
|
|
33
|
+
self.supports_projects = True
|
|
34
|
+
self.supports_issues = True
|
|
35
|
+
|
|
36
|
+
# Advanced capabilities - vary by platform
|
|
37
|
+
self.supports_sprints = False
|
|
38
|
+
self.supports_time_tracking = False
|
|
39
|
+
self.supports_story_points = False
|
|
40
|
+
self.supports_custom_fields = False
|
|
41
|
+
self.supports_issue_linking = False
|
|
42
|
+
self.supports_comments = False
|
|
43
|
+
self.supports_attachments = False
|
|
44
|
+
self.supports_workflows = False
|
|
45
|
+
self.supports_bulk_operations = False
|
|
46
|
+
|
|
47
|
+
# Rate limiting info - critical for API management
|
|
48
|
+
self.rate_limit_requests_per_hour = 1000
|
|
49
|
+
self.rate_limit_burst_size = 100
|
|
50
|
+
|
|
51
|
+
# Pagination info - for efficient data retrieval
|
|
52
|
+
self.max_results_per_page = 100
|
|
53
|
+
self.supports_cursor_pagination = False
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class BasePlatformAdapter(ABC):
|
|
57
|
+
"""Abstract base class for all platform adapters.
|
|
58
|
+
|
|
59
|
+
This class defines the standard interface that all PM platform adapters
|
|
60
|
+
must implement. It provides common utility methods for data normalization
|
|
61
|
+
and includes comprehensive error handling patterns.
|
|
62
|
+
|
|
63
|
+
WHY: Standardized interface ensures consistent behavior across all platforms
|
|
64
|
+
while allowing platform-specific implementations. Common utility methods
|
|
65
|
+
reduce code duplication and ensure consistent data transformation.
|
|
66
|
+
|
|
67
|
+
DESIGN DECISION: Use abstract methods for required operations and default
|
|
68
|
+
implementations for optional features. This approach ensures essential
|
|
69
|
+
functionality while providing flexibility for platform differences.
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
def __init__(self, config: dict[str, Any]) -> None:
|
|
73
|
+
"""Initialize the platform adapter with configuration.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
config: Platform-specific configuration including authentication
|
|
77
|
+
credentials, API endpoints, and feature settings.
|
|
78
|
+
"""
|
|
79
|
+
self.config = config
|
|
80
|
+
self.platform_name = self._get_platform_name()
|
|
81
|
+
self.capabilities = self._get_capabilities()
|
|
82
|
+
self._client = None
|
|
83
|
+
|
|
84
|
+
# Set up logging for this adapter
|
|
85
|
+
self.logger = logging.getLogger(f"{__name__}.{self.platform_name}")
|
|
86
|
+
self.logger.info(f"Initializing {self.platform_name} adapter")
|
|
87
|
+
|
|
88
|
+
@abstractmethod
|
|
89
|
+
def _get_platform_name(self) -> str:
|
|
90
|
+
"""Return the platform name (e.g., 'jira', 'azure_devops').
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
String identifier for this platform, used in logging and data tagging.
|
|
94
|
+
"""
|
|
95
|
+
pass
|
|
96
|
+
|
|
97
|
+
@abstractmethod
|
|
98
|
+
def _get_capabilities(self) -> PlatformCapabilities:
|
|
99
|
+
"""Return the capabilities supported by this platform.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
PlatformCapabilities object describing what features this adapter supports.
|
|
103
|
+
"""
|
|
104
|
+
pass
|
|
105
|
+
|
|
106
|
+
@abstractmethod
|
|
107
|
+
def authenticate(self) -> bool:
|
|
108
|
+
"""Authenticate with the platform.
|
|
109
|
+
|
|
110
|
+
WHY: Authentication is platform-specific but required for all adapters.
|
|
111
|
+
This method establishes the connection and validates credentials.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
True if authentication successful, False otherwise.
|
|
115
|
+
|
|
116
|
+
Raises:
|
|
117
|
+
ConnectionError: If authentication fails due to network issues.
|
|
118
|
+
ValueError: If credentials are invalid or missing.
|
|
119
|
+
"""
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
@abstractmethod
|
|
123
|
+
def test_connection(self) -> dict[str, Any]:
|
|
124
|
+
"""Test connection and return platform status information.
|
|
125
|
+
|
|
126
|
+
WHY: Provides diagnostic information for troubleshooting connection
|
|
127
|
+
issues and validating configuration before full data collection.
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Dictionary containing connection status, platform version info,
|
|
131
|
+
and any diagnostic details.
|
|
132
|
+
"""
|
|
133
|
+
pass
|
|
134
|
+
|
|
135
|
+
@abstractmethod
|
|
136
|
+
def get_projects(self) -> list[UnifiedProject]:
|
|
137
|
+
"""Retrieve all accessible projects from the platform.
|
|
138
|
+
|
|
139
|
+
WHY: Projects are the primary organizational unit for most PM platforms.
|
|
140
|
+
This method discovers available projects for subsequent issue retrieval.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
List of UnifiedProject objects representing all accessible projects.
|
|
144
|
+
|
|
145
|
+
Raises:
|
|
146
|
+
ConnectionError: If API request fails due to network issues.
|
|
147
|
+
PermissionError: If user lacks permission to access projects.
|
|
148
|
+
"""
|
|
149
|
+
pass
|
|
150
|
+
|
|
151
|
+
@abstractmethod
|
|
152
|
+
def get_issues(
|
|
153
|
+
self,
|
|
154
|
+
project_id: str,
|
|
155
|
+
since: Optional[datetime] = None,
|
|
156
|
+
issue_types: Optional[list[IssueType]] = None,
|
|
157
|
+
) -> list[UnifiedIssue]:
|
|
158
|
+
"""Retrieve issues for a specific project.
|
|
159
|
+
|
|
160
|
+
WHY: Issues are the core work items that need to be correlated with
|
|
161
|
+
Git commits. This method fetches issue data with optional filtering
|
|
162
|
+
to optimize performance and focus on relevant timeframes.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
project_id: Project identifier to retrieve issues from.
|
|
166
|
+
since: Optional datetime to filter issues updated after this date.
|
|
167
|
+
issue_types: Optional list of issue types to filter by.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
List of UnifiedIssue objects for the specified project.
|
|
171
|
+
|
|
172
|
+
Raises:
|
|
173
|
+
ConnectionError: If API request fails due to network issues.
|
|
174
|
+
ValueError: If project_id is invalid or not accessible.
|
|
175
|
+
"""
|
|
176
|
+
pass
|
|
177
|
+
|
|
178
|
+
# Optional methods with default implementations
|
|
179
|
+
def get_sprints(self, project_id: str) -> list[UnifiedSprint]:
|
|
180
|
+
"""Retrieve sprints for a project.
|
|
181
|
+
|
|
182
|
+
Default implementation returns empty list for platforms that don't
|
|
183
|
+
support sprints. Override this method if the platform supports sprints.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
project_id: Project identifier to retrieve sprints from.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
List of UnifiedSprint objects, empty if not supported.
|
|
190
|
+
"""
|
|
191
|
+
if not self.capabilities.supports_sprints:
|
|
192
|
+
self.logger.debug(f"Sprints not supported by {self.platform_name}")
|
|
193
|
+
return []
|
|
194
|
+
raise NotImplementedError(f"get_sprints not implemented for {self.platform_name}")
|
|
195
|
+
|
|
196
|
+
def get_users(self, project_id: str) -> list[UnifiedUser]:
|
|
197
|
+
"""Retrieve users for a project.
|
|
198
|
+
|
|
199
|
+
Default implementation returns empty list. Override if the platform
|
|
200
|
+
provides user enumeration capabilities.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
project_id: Project identifier to retrieve users from.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
List of UnifiedUser objects, empty by default.
|
|
207
|
+
"""
|
|
208
|
+
self.logger.debug(f"User enumeration not implemented for {self.platform_name}")
|
|
209
|
+
return []
|
|
210
|
+
|
|
211
|
+
def get_issue_comments(self, issue_key: str) -> list[dict[str, Any]]:
|
|
212
|
+
"""Retrieve comments for an issue.
|
|
213
|
+
|
|
214
|
+
Default implementation returns empty list for platforms that don't
|
|
215
|
+
support comments. Override this method if comments are available.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
issue_key: Issue identifier to retrieve comments from.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
List of comment dictionaries, empty if not supported.
|
|
222
|
+
"""
|
|
223
|
+
if not self.capabilities.supports_comments:
|
|
224
|
+
self.logger.debug(f"Comments not supported by {self.platform_name}")
|
|
225
|
+
return []
|
|
226
|
+
raise NotImplementedError(f"get_issue_comments not implemented for {self.platform_name}")
|
|
227
|
+
|
|
228
|
+
def get_custom_fields(self, project_id: str) -> dict[str, Any]:
|
|
229
|
+
"""Retrieve custom field definitions for a project.
|
|
230
|
+
|
|
231
|
+
Default implementation returns empty dict for platforms that don't
|
|
232
|
+
support custom fields. Override if custom fields are available.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
project_id: Project identifier to retrieve custom fields from.
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Dictionary of custom field definitions, empty if not supported.
|
|
239
|
+
"""
|
|
240
|
+
if not self.capabilities.supports_custom_fields:
|
|
241
|
+
self.logger.debug(f"Custom fields not supported by {self.platform_name}")
|
|
242
|
+
return {}
|
|
243
|
+
raise NotImplementedError(f"get_custom_fields not implemented for {self.platform_name}")
|
|
244
|
+
|
|
245
|
+
# Utility methods for data normalization
|
|
246
|
+
def _normalize_date(self, date_str: Optional[str]) -> Optional[datetime]:
|
|
247
|
+
"""Normalize date string to datetime object.
|
|
248
|
+
|
|
249
|
+
WHY: Different platforms use different date formats. This utility
|
|
250
|
+
method handles common formats to ensure consistent datetime objects
|
|
251
|
+
throughout the system.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
date_str: Date string in various formats, or None.
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Normalized datetime object, or None if parsing fails.
|
|
258
|
+
"""
|
|
259
|
+
if not date_str:
|
|
260
|
+
return None
|
|
261
|
+
|
|
262
|
+
# Handle common date formats from different platforms
|
|
263
|
+
formats = [
|
|
264
|
+
"%Y-%m-%dT%H:%M:%S.%fZ", # ISO with microseconds (GitHub, Linear)
|
|
265
|
+
"%Y-%m-%dT%H:%M:%SZ", # ISO without microseconds (JIRA)
|
|
266
|
+
"%Y-%m-%dT%H:%M:%S%z", # ISO with timezone (Azure DevOps)
|
|
267
|
+
"%Y-%m-%dT%H:%M:%S.%f%z", # ISO with microseconds and timezone
|
|
268
|
+
"%Y-%m-%d %H:%M:%S", # Common SQL format
|
|
269
|
+
"%Y-%m-%d", # Date only
|
|
270
|
+
]
|
|
271
|
+
|
|
272
|
+
for fmt in formats:
|
|
273
|
+
try:
|
|
274
|
+
return datetime.strptime(date_str, fmt)
|
|
275
|
+
except ValueError:
|
|
276
|
+
continue
|
|
277
|
+
|
|
278
|
+
self.logger.warning(f"Could not parse date: {date_str}")
|
|
279
|
+
return None
|
|
280
|
+
|
|
281
|
+
def _map_priority(self, platform_priority: str) -> Priority:
|
|
282
|
+
"""Map platform-specific priority to unified priority.
|
|
283
|
+
|
|
284
|
+
WHY: Different platforms use different priority schemes (numeric,
|
|
285
|
+
named, etc.). This method normalizes priorities to enable consistent
|
|
286
|
+
priority-based analysis across platforms.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
platform_priority: Priority value from the platform.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Unified Priority enum value.
|
|
293
|
+
"""
|
|
294
|
+
if not platform_priority:
|
|
295
|
+
return Priority.UNKNOWN
|
|
296
|
+
|
|
297
|
+
priority_lower = platform_priority.lower()
|
|
298
|
+
|
|
299
|
+
# Common priority mappings across platforms
|
|
300
|
+
priority_mapping = {
|
|
301
|
+
# Critical/Urgent priorities
|
|
302
|
+
"highest": Priority.CRITICAL,
|
|
303
|
+
"critical": Priority.CRITICAL,
|
|
304
|
+
"urgent": Priority.CRITICAL,
|
|
305
|
+
"1": Priority.CRITICAL,
|
|
306
|
+
"blocker": Priority.CRITICAL,
|
|
307
|
+
# High priorities
|
|
308
|
+
"high": Priority.HIGH,
|
|
309
|
+
"important": Priority.HIGH,
|
|
310
|
+
"2": Priority.HIGH,
|
|
311
|
+
"major": Priority.HIGH,
|
|
312
|
+
# Medium/Normal priorities
|
|
313
|
+
"medium": Priority.MEDIUM,
|
|
314
|
+
"normal": Priority.MEDIUM,
|
|
315
|
+
"3": Priority.MEDIUM,
|
|
316
|
+
"moderate": Priority.MEDIUM,
|
|
317
|
+
# Low priorities
|
|
318
|
+
"low": Priority.LOW,
|
|
319
|
+
"minor": Priority.LOW,
|
|
320
|
+
"4": Priority.LOW,
|
|
321
|
+
# Trivial priorities
|
|
322
|
+
"trivial": Priority.TRIVIAL,
|
|
323
|
+
"lowest": Priority.TRIVIAL,
|
|
324
|
+
"5": Priority.TRIVIAL,
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
mapped_priority = priority_mapping.get(priority_lower, Priority.UNKNOWN)
|
|
328
|
+
if mapped_priority == Priority.UNKNOWN:
|
|
329
|
+
self.logger.debug(f"Unknown priority '{platform_priority}' mapped to UNKNOWN")
|
|
330
|
+
|
|
331
|
+
return mapped_priority
|
|
332
|
+
|
|
333
|
+
def _extract_story_points(self, custom_fields: dict[str, Any]) -> Optional[int]:
|
|
334
|
+
"""Extract story points from custom fields.
|
|
335
|
+
|
|
336
|
+
WHY: Story points are critical for velocity tracking but stored
|
|
337
|
+
differently across platforms (custom fields, dedicated fields, etc.).
|
|
338
|
+
This method attempts to find story points using common field names.
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
custom_fields: Dictionary of custom field values from the platform.
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Story points as integer, or None if not found.
|
|
345
|
+
"""
|
|
346
|
+
if not custom_fields:
|
|
347
|
+
return None
|
|
348
|
+
|
|
349
|
+
# Common story point field names across platforms
|
|
350
|
+
story_point_fields = [
|
|
351
|
+
"story_points",
|
|
352
|
+
"storypoints",
|
|
353
|
+
"story_point_estimate",
|
|
354
|
+
"customfield_10016",
|
|
355
|
+
"customfield_10021", # Common JIRA fields
|
|
356
|
+
"Microsoft.VSTS.Scheduling.StoryPoints", # Azure DevOps
|
|
357
|
+
"effort",
|
|
358
|
+
"size",
|
|
359
|
+
"complexity",
|
|
360
|
+
"points",
|
|
361
|
+
]
|
|
362
|
+
|
|
363
|
+
for field_name in story_point_fields:
|
|
364
|
+
if field_name in custom_fields:
|
|
365
|
+
value = custom_fields[field_name]
|
|
366
|
+
if isinstance(value, (int, float)):
|
|
367
|
+
return int(value)
|
|
368
|
+
elif isinstance(value, str) and value.strip().isdigit():
|
|
369
|
+
return int(value.strip())
|
|
370
|
+
elif isinstance(value, str):
|
|
371
|
+
# Handle decimal story points
|
|
372
|
+
try:
|
|
373
|
+
return int(float(value.strip()))
|
|
374
|
+
except (ValueError, AttributeError):
|
|
375
|
+
continue
|
|
376
|
+
|
|
377
|
+
return None
|
|
378
|
+
|
|
379
|
+
def _handle_api_error(self, error: Exception, operation: str) -> None:
|
|
380
|
+
"""Handle API errors with consistent logging and error reporting.
|
|
381
|
+
|
|
382
|
+
WHY: API errors are common when integrating with external platforms.
|
|
383
|
+
This method provides consistent error handling and logging to help
|
|
384
|
+
diagnose issues and provide meaningful user feedback.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
error: The exception that occurred.
|
|
388
|
+
operation: Description of the operation that failed.
|
|
389
|
+
|
|
390
|
+
Raises:
|
|
391
|
+
The original exception after logging appropriate details.
|
|
392
|
+
"""
|
|
393
|
+
error_msg = f"{operation} failed for {self.platform_name}: {str(error)}"
|
|
394
|
+
|
|
395
|
+
# Log different error types at appropriate levels
|
|
396
|
+
if "rate limit" in str(error).lower():
|
|
397
|
+
self.logger.warning(f"Rate limit exceeded: {error_msg}")
|
|
398
|
+
elif "unauthorized" in str(error).lower() or "403" in str(error):
|
|
399
|
+
self.logger.error(f"Authentication error: {error_msg}")
|
|
400
|
+
elif "not found" in str(error).lower() or "404" in str(error):
|
|
401
|
+
self.logger.warning(f"Resource not found: {error_msg}")
|
|
402
|
+
else:
|
|
403
|
+
self.logger.error(f"API error: {error_msg}")
|
|
404
|
+
|
|
405
|
+
# Re-raise the original exception for caller handling
|
|
406
|
+
raise error
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""Unified data models for platform-agnostic project management integration.
|
|
2
|
+
|
|
3
|
+
This module defines standardized data structures that normalize data from
|
|
4
|
+
different PM platforms (JIRA, Azure DevOps, Linear, etc.) into a common format
|
|
5
|
+
for consistent analytics across GitFlow Analytics.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Any, Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class IssueType(Enum):
|
|
15
|
+
"""Standardized issue types across platforms.
|
|
16
|
+
|
|
17
|
+
Maps platform-specific issue types to unified categories for consistent
|
|
18
|
+
analytics and reporting across different PM tools.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
EPIC = "epic"
|
|
22
|
+
STORY = "story"
|
|
23
|
+
TASK = "task"
|
|
24
|
+
BUG = "bug"
|
|
25
|
+
DEFECT = "defect"
|
|
26
|
+
FEATURE = "feature"
|
|
27
|
+
IMPROVEMENT = "improvement"
|
|
28
|
+
SUBTASK = "subtask"
|
|
29
|
+
INCIDENT = "incident"
|
|
30
|
+
UNKNOWN = "unknown"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class IssueStatus(Enum):
|
|
34
|
+
"""Standardized issue statuses across platforms.
|
|
35
|
+
|
|
36
|
+
Maps platform-specific workflow states to unified status categories
|
|
37
|
+
for consistent progress tracking and DORA metrics calculation.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
BACKLOG = "backlog"
|
|
41
|
+
TODO = "todo"
|
|
42
|
+
IN_PROGRESS = "in_progress"
|
|
43
|
+
IN_REVIEW = "in_review"
|
|
44
|
+
TESTING = "testing"
|
|
45
|
+
DONE = "done"
|
|
46
|
+
CANCELLED = "cancelled"
|
|
47
|
+
BLOCKED = "blocked"
|
|
48
|
+
UNKNOWN = "unknown"
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class Priority(Enum):
|
|
52
|
+
"""Standardized priority levels across platforms.
|
|
53
|
+
|
|
54
|
+
Maps platform-specific priority schemes to unified levels for
|
|
55
|
+
consistent prioritization analysis and workload assessment.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
CRITICAL = "critical"
|
|
59
|
+
HIGH = "high"
|
|
60
|
+
MEDIUM = "medium"
|
|
61
|
+
LOW = "low"
|
|
62
|
+
TRIVIAL = "trivial"
|
|
63
|
+
UNKNOWN = "unknown"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class UnifiedUser:
|
|
68
|
+
"""Platform-agnostic user representation.
|
|
69
|
+
|
|
70
|
+
Normalizes user information from different PM platforms to enable
|
|
71
|
+
consistent identity resolution and correlation with Git commit authors.
|
|
72
|
+
|
|
73
|
+
WHY: Different platforms have varying user data structures, but we need
|
|
74
|
+
consistent user identification for accurate attribution analytics.
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
id: str # Platform-specific unique identifier
|
|
78
|
+
email: Optional[str] = None
|
|
79
|
+
display_name: Optional[str] = None
|
|
80
|
+
username: Optional[str] = None
|
|
81
|
+
platform: str = ""
|
|
82
|
+
is_active: bool = True
|
|
83
|
+
platform_data: dict[str, Any] = field(default_factory=dict)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class UnifiedProject:
|
|
88
|
+
"""Platform-agnostic project representation.
|
|
89
|
+
|
|
90
|
+
Standardizes project information across PM platforms for consistent
|
|
91
|
+
project-level analytics and repository correlation.
|
|
92
|
+
|
|
93
|
+
WHY: Projects are the primary organizational unit in most PM tools,
|
|
94
|
+
and we need consistent project identification for cross-platform analytics.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
id: str # Platform-specific unique identifier
|
|
98
|
+
key: str # Short identifier (e.g., "PROJ", used in issue keys)
|
|
99
|
+
name: str
|
|
100
|
+
description: Optional[str] = None
|
|
101
|
+
platform: str = ""
|
|
102
|
+
is_active: bool = True
|
|
103
|
+
created_date: Optional[datetime] = None
|
|
104
|
+
platform_data: dict[str, Any] = field(default_factory=dict)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
@dataclass
|
|
108
|
+
class UnifiedIssue:
|
|
109
|
+
"""Platform-agnostic issue representation.
|
|
110
|
+
|
|
111
|
+
The core data structure for PM platform integration. Normalizes issues,
|
|
112
|
+
tickets, work items, and tasks from different platforms into a unified
|
|
113
|
+
format for consistent analytics and Git correlation.
|
|
114
|
+
|
|
115
|
+
DESIGN DECISION: Extensive field set to support most PM platforms while
|
|
116
|
+
maintaining compatibility. Platform-specific data preserved in platform_data
|
|
117
|
+
for advanced use cases without breaking the unified interface.
|
|
118
|
+
|
|
119
|
+
WHY: Issues are the primary work tracking unit across all PM platforms.
|
|
120
|
+
Unified representation enables consistent story point tracking, velocity
|
|
121
|
+
calculation, and Git commit correlation regardless of underlying platform.
|
|
122
|
+
"""
|
|
123
|
+
|
|
124
|
+
# Core identification - required for all issues
|
|
125
|
+
id: str # Platform-specific unique identifier
|
|
126
|
+
key: str # Human-readable key (e.g., "PROJ-123", "GH-456")
|
|
127
|
+
platform: str # Source platform identifier
|
|
128
|
+
project_id: str # Parent project identifier
|
|
129
|
+
|
|
130
|
+
# Basic properties - common across most platforms
|
|
131
|
+
title: str
|
|
132
|
+
created_date: datetime # Required field, moved before optional ones
|
|
133
|
+
updated_date: datetime # Required field, moved before optional ones
|
|
134
|
+
|
|
135
|
+
# Optional basic properties
|
|
136
|
+
description: Optional[str] = None
|
|
137
|
+
issue_type: IssueType = IssueType.UNKNOWN
|
|
138
|
+
status: IssueStatus = IssueStatus.UNKNOWN
|
|
139
|
+
priority: Priority = Priority.UNKNOWN
|
|
140
|
+
|
|
141
|
+
# People - for identity resolution and ownership tracking
|
|
142
|
+
assignee: Optional[UnifiedUser] = None
|
|
143
|
+
reporter: Optional[UnifiedUser] = None
|
|
144
|
+
|
|
145
|
+
# Optional dates - critical for timeline analysis and DORA metrics
|
|
146
|
+
resolved_date: Optional[datetime] = None
|
|
147
|
+
due_date: Optional[datetime] = None
|
|
148
|
+
|
|
149
|
+
# Estimation and tracking - for velocity and capacity planning
|
|
150
|
+
story_points: Optional[int] = None
|
|
151
|
+
original_estimate_hours: Optional[float] = None
|
|
152
|
+
remaining_estimate_hours: Optional[float] = None
|
|
153
|
+
time_spent_hours: Optional[float] = None
|
|
154
|
+
|
|
155
|
+
# Relationships - for dependency analysis and epic breakdown
|
|
156
|
+
parent_issue_key: Optional[str] = None
|
|
157
|
+
subtasks: list[str] = field(default_factory=list)
|
|
158
|
+
linked_issues: list[dict[str, str]] = field(
|
|
159
|
+
default_factory=list
|
|
160
|
+
) # [{"key": "PROJ-456", "type": "blocks"}]
|
|
161
|
+
|
|
162
|
+
# Sprint/iteration info - for agile metrics and sprint analysis
|
|
163
|
+
sprint_id: Optional[str] = None
|
|
164
|
+
sprint_name: Optional[str] = None
|
|
165
|
+
|
|
166
|
+
# Labels and components - for categorization and filtering
|
|
167
|
+
labels: list[str] = field(default_factory=list)
|
|
168
|
+
components: list[str] = field(default_factory=list)
|
|
169
|
+
|
|
170
|
+
# Platform-specific data - preserves original platform information
|
|
171
|
+
platform_data: dict[str, Any] = field(default_factory=dict)
|
|
172
|
+
|
|
173
|
+
# GitFlow Analytics integration - correlates with Git data
|
|
174
|
+
linked_commits: list[str] = field(default_factory=list)
|
|
175
|
+
linked_prs: list[str] = field(default_factory=list)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
@dataclass
|
|
179
|
+
class UnifiedSprint:
|
|
180
|
+
"""Platform-agnostic sprint/iteration representation.
|
|
181
|
+
|
|
182
|
+
Normalizes sprint, iteration, and milestone data from different platforms
|
|
183
|
+
for consistent agile metrics and velocity tracking.
|
|
184
|
+
|
|
185
|
+
WHY: Sprint data is essential for calculating velocity, planning accuracy,
|
|
186
|
+
and team capacity metrics. Different platforms use different terminology
|
|
187
|
+
(sprints, iterations, milestones) but represent similar concepts.
|
|
188
|
+
"""
|
|
189
|
+
|
|
190
|
+
id: str # Platform-specific unique identifier
|
|
191
|
+
name: str
|
|
192
|
+
project_id: str
|
|
193
|
+
platform: str
|
|
194
|
+
|
|
195
|
+
# Dates - for sprint timeline analysis
|
|
196
|
+
start_date: Optional[datetime] = None
|
|
197
|
+
end_date: Optional[datetime] = None
|
|
198
|
+
|
|
199
|
+
# State - for active sprint identification
|
|
200
|
+
is_active: bool = False
|
|
201
|
+
is_completed: bool = False
|
|
202
|
+
|
|
203
|
+
# Metrics - for velocity and planning analysis
|
|
204
|
+
planned_story_points: Optional[int] = None
|
|
205
|
+
completed_story_points: Optional[int] = None
|
|
206
|
+
|
|
207
|
+
# Issues - for sprint content analysis
|
|
208
|
+
issue_keys: list[str] = field(default_factory=list)
|
|
209
|
+
|
|
210
|
+
# Platform-specific data - preserves original platform information
|
|
211
|
+
platform_data: dict[str, Any] = field(default_factory=dict)
|