gitflow-analytics 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. gitflow_analytics/__init__.py +11 -11
  2. gitflow_analytics/_version.py +2 -2
  3. gitflow_analytics/cli.py +612 -258
  4. gitflow_analytics/cli_rich.py +353 -0
  5. gitflow_analytics/config.py +251 -141
  6. gitflow_analytics/core/analyzer.py +140 -103
  7. gitflow_analytics/core/branch_mapper.py +132 -132
  8. gitflow_analytics/core/cache.py +240 -169
  9. gitflow_analytics/core/identity.py +210 -173
  10. gitflow_analytics/extractors/base.py +13 -11
  11. gitflow_analytics/extractors/story_points.py +70 -59
  12. gitflow_analytics/extractors/tickets.py +101 -87
  13. gitflow_analytics/integrations/github_integration.py +84 -77
  14. gitflow_analytics/integrations/jira_integration.py +116 -104
  15. gitflow_analytics/integrations/orchestrator.py +86 -85
  16. gitflow_analytics/metrics/dora.py +181 -177
  17. gitflow_analytics/models/database.py +190 -53
  18. gitflow_analytics/qualitative/__init__.py +30 -0
  19. gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
  20. gitflow_analytics/qualitative/classifiers/change_type.py +468 -0
  21. gitflow_analytics/qualitative/classifiers/domain_classifier.py +399 -0
  22. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +436 -0
  23. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +412 -0
  24. gitflow_analytics/qualitative/core/__init__.py +13 -0
  25. gitflow_analytics/qualitative/core/llm_fallback.py +653 -0
  26. gitflow_analytics/qualitative/core/nlp_engine.py +373 -0
  27. gitflow_analytics/qualitative/core/pattern_cache.py +457 -0
  28. gitflow_analytics/qualitative/core/processor.py +540 -0
  29. gitflow_analytics/qualitative/models/__init__.py +25 -0
  30. gitflow_analytics/qualitative/models/schemas.py +272 -0
  31. gitflow_analytics/qualitative/utils/__init__.py +13 -0
  32. gitflow_analytics/qualitative/utils/batch_processor.py +326 -0
  33. gitflow_analytics/qualitative/utils/cost_tracker.py +343 -0
  34. gitflow_analytics/qualitative/utils/metrics.py +347 -0
  35. gitflow_analytics/qualitative/utils/text_processing.py +243 -0
  36. gitflow_analytics/reports/analytics_writer.py +11 -4
  37. gitflow_analytics/reports/csv_writer.py +51 -31
  38. gitflow_analytics/reports/narrative_writer.py +16 -14
  39. gitflow_analytics/tui/__init__.py +5 -0
  40. gitflow_analytics/tui/app.py +721 -0
  41. gitflow_analytics/tui/screens/__init__.py +8 -0
  42. gitflow_analytics/tui/screens/analysis_progress_screen.py +487 -0
  43. gitflow_analytics/tui/screens/configuration_screen.py +547 -0
  44. gitflow_analytics/tui/screens/loading_screen.py +358 -0
  45. gitflow_analytics/tui/screens/main_screen.py +304 -0
  46. gitflow_analytics/tui/screens/results_screen.py +698 -0
  47. gitflow_analytics/tui/widgets/__init__.py +7 -0
  48. gitflow_analytics/tui/widgets/data_table.py +257 -0
  49. gitflow_analytics/tui/widgets/export_modal.py +301 -0
  50. gitflow_analytics/tui/widgets/progress_widget.py +192 -0
  51. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/METADATA +31 -4
  52. gitflow_analytics-1.0.3.dist-info/RECORD +62 -0
  53. gitflow_analytics-1.0.1.dist-info/RECORD +0 -31
  54. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/WHEEL +0 -0
  55. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/entry_points.txt +0 -0
  56. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/licenses/LICENSE +0 -0
  57. {gitflow_analytics-1.0.1.dist-info → gitflow_analytics-1.0.3.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,8 @@
1
1
  """GitHub API integration for PR and issue enrichment."""
2
+
2
3
  import time
3
4
  from datetime import datetime, timezone
4
- from typing import Any, Dict, List, Optional
5
+ from typing import Any, Optional
5
6
 
6
7
  from github import Github
7
8
  from github.GithubException import RateLimitExceededException, UnknownObjectException
@@ -11,157 +12,163 @@ from ..core.cache import GitAnalysisCache
11
12
 
12
13
  class GitHubIntegration:
13
14
  """Integrate with GitHub API for PR and issue data."""
14
-
15
- def __init__(self, token: str, cache: GitAnalysisCache,
16
- rate_limit_retries: int = 3, backoff_factor: int = 2,
17
- allowed_ticket_platforms: Optional[List[str]] = None):
15
+
16
+ def __init__(
17
+ self,
18
+ token: str,
19
+ cache: GitAnalysisCache,
20
+ rate_limit_retries: int = 3,
21
+ backoff_factor: int = 2,
22
+ allowed_ticket_platforms: Optional[list[str]] = None,
23
+ ):
18
24
  """Initialize GitHub integration."""
19
25
  self.github = Github(token)
20
26
  self.cache = cache
21
27
  self.rate_limit_retries = rate_limit_retries
22
28
  self.backoff_factor = backoff_factor
23
29
  self.allowed_ticket_platforms = allowed_ticket_platforms
24
-
25
- def enrich_repository_with_prs(self, repo_name: str, commits: List[Dict[str, Any]],
26
- since: datetime) -> List[Dict[str, Any]]:
30
+
31
+ def enrich_repository_with_prs(
32
+ self, repo_name: str, commits: list[dict[str, Any]], since: datetime
33
+ ) -> list[dict[str, Any]]:
27
34
  """Enrich repository commits with PR data."""
28
35
  try:
29
36
  repo = self.github.get_repo(repo_name)
30
37
  except UnknownObjectException:
31
38
  print(f" ⚠️ GitHub repo not found: {repo_name}")
32
39
  return []
33
-
40
+
34
41
  # Get PRs for the time period
35
42
  prs = self._get_pull_requests(repo, since)
36
-
43
+
37
44
  # Build commit to PR mapping
38
45
  commit_to_pr = {}
39
46
  for pr in prs:
40
47
  pr_data = self._extract_pr_data(pr)
41
-
48
+
42
49
  # Cache PR data
43
50
  self.cache.cache_pr(repo_name, pr_data)
44
-
51
+
45
52
  # Map commits to this PR
46
53
  for commit in pr.get_commits():
47
54
  commit_to_pr[commit.sha] = pr_data
48
-
55
+
49
56
  # Enrich commits with PR data
50
57
  enriched_prs = []
51
58
  for commit in commits:
52
- if commit['hash'] in commit_to_pr:
53
- pr_data = commit_to_pr[commit['hash']]
54
-
59
+ if commit["hash"] in commit_to_pr:
60
+ pr_data = commit_to_pr[commit["hash"]]
61
+
55
62
  # Use PR story points if commit doesn't have them
56
- if not commit.get('story_points') and pr_data.get('story_points'):
57
- commit['story_points'] = pr_data['story_points']
58
-
63
+ if not commit.get("story_points") and pr_data.get("story_points"):
64
+ commit["story_points"] = pr_data["story_points"]
65
+
59
66
  # Add PR reference
60
- commit['pr_number'] = pr_data['number']
61
- commit['pr_title'] = pr_data['title']
62
-
67
+ commit["pr_number"] = pr_data["number"]
68
+ commit["pr_title"] = pr_data["title"]
69
+
63
70
  # Add to PR list if not already there
64
71
  if pr_data not in enriched_prs:
65
72
  enriched_prs.append(pr_data)
66
-
73
+
67
74
  return enriched_prs
68
-
69
- def _get_pull_requests(self, repo, since: datetime) -> List[Any]:
75
+
76
+ def _get_pull_requests(self, repo, since: datetime) -> list[Any]:
70
77
  """Get pull requests with rate limit handling."""
71
78
  prs = []
72
-
79
+
73
80
  # Ensure since is timezone-aware for comparison with GitHub's timezone-aware datetimes
74
81
  if since.tzinfo is None:
75
82
  since = since.replace(tzinfo=timezone.utc)
76
-
83
+
77
84
  for attempt in range(self.rate_limit_retries):
78
85
  try:
79
86
  # Get all PRs updated since the date
80
- for pr in repo.get_pulls(state='all', sort='updated', direction='desc'):
87
+ for pr in repo.get_pulls(state="all", sort="updated", direction="desc"):
81
88
  if pr.updated_at < since:
82
89
  break
83
-
90
+
84
91
  # Only include PRs that were merged in our time period
85
92
  if pr.merged and pr.merged_at >= since:
86
93
  prs.append(pr)
87
-
94
+
88
95
  return prs
89
-
96
+
90
97
  except RateLimitExceededException:
91
98
  if attempt < self.rate_limit_retries - 1:
92
- wait_time = self.backoff_factor ** attempt
99
+ wait_time = self.backoff_factor**attempt
93
100
  print(f" ⏳ GitHub rate limit hit, waiting {wait_time}s...")
94
101
  time.sleep(wait_time)
95
102
  else:
96
103
  print(" ❌ GitHub rate limit exceeded, skipping PR enrichment")
97
104
  return []
98
-
105
+
99
106
  return prs
100
-
101
- def _extract_pr_data(self, pr) -> Dict[str, Any]:
107
+
108
+ def _extract_pr_data(self, pr) -> dict[str, Any]:
102
109
  """Extract relevant data from a GitHub PR object."""
103
110
  from ..extractors.story_points import StoryPointExtractor
104
111
  from ..extractors.tickets import TicketExtractor
105
-
112
+
106
113
  sp_extractor = StoryPointExtractor()
107
114
  ticket_extractor = TicketExtractor(allowed_platforms=self.allowed_ticket_platforms)
108
-
115
+
109
116
  # Extract story points from PR title and body
110
117
  pr_text = f"{pr.title} {pr.body or ''}"
111
118
  story_points = sp_extractor.extract_from_text(pr_text)
112
-
119
+
113
120
  # Extract ticket references
114
121
  tickets = ticket_extractor.extract_from_text(pr_text)
115
-
122
+
116
123
  # Get commit SHAs
117
124
  commit_hashes = [c.sha for c in pr.get_commits()]
118
-
125
+
119
126
  return {
120
- 'number': pr.number,
121
- 'title': pr.title,
122
- 'description': pr.body,
123
- 'author': pr.user.login,
124
- 'created_at': pr.created_at,
125
- 'merged_at': pr.merged_at,
126
- 'story_points': story_points,
127
- 'labels': [label.name for label in pr.labels],
128
- 'commit_hashes': commit_hashes,
129
- 'ticket_references': tickets,
130
- 'review_comments': pr.review_comments,
131
- 'changed_files': pr.changed_files,
132
- 'additions': pr.additions,
133
- 'deletions': pr.deletions
127
+ "number": pr.number,
128
+ "title": pr.title,
129
+ "description": pr.body,
130
+ "author": pr.user.login,
131
+ "created_at": pr.created_at,
132
+ "merged_at": pr.merged_at,
133
+ "story_points": story_points,
134
+ "labels": [label.name for label in pr.labels],
135
+ "commit_hashes": commit_hashes,
136
+ "ticket_references": tickets,
137
+ "review_comments": pr.review_comments,
138
+ "changed_files": pr.changed_files,
139
+ "additions": pr.additions,
140
+ "deletions": pr.deletions,
134
141
  }
135
-
136
- def calculate_pr_metrics(self, prs: List[Dict[str, Any]]) -> Dict[str, Any]:
142
+
143
+ def calculate_pr_metrics(self, prs: list[dict[str, Any]]) -> dict[str, Any]:
137
144
  """Calculate PR-level metrics."""
138
145
  if not prs:
139
146
  return {
140
- 'avg_pr_size': 0,
141
- 'avg_pr_lifetime_hours': 0,
142
- 'avg_files_per_pr': 0,
143
- 'total_review_comments': 0
147
+ "avg_pr_size": 0,
148
+ "avg_pr_lifetime_hours": 0,
149
+ "avg_files_per_pr": 0,
150
+ "total_review_comments": 0,
144
151
  }
145
-
146
- total_size = sum(pr['additions'] + pr['deletions'] for pr in prs)
147
- total_files = sum(pr.get('changed_files', 0) for pr in prs)
148
- total_comments = sum(pr.get('review_comments', 0) for pr in prs)
149
-
152
+
153
+ total_size = sum(pr["additions"] + pr["deletions"] for pr in prs)
154
+ total_files = sum(pr.get("changed_files", 0) for pr in prs)
155
+ total_comments = sum(pr.get("review_comments", 0) for pr in prs)
156
+
150
157
  # Calculate average PR lifetime
151
158
  lifetimes = []
152
159
  for pr in prs:
153
- if pr.get('merged_at') and pr.get('created_at'):
154
- lifetime = (pr['merged_at'] - pr['created_at']).total_seconds() / 3600
160
+ if pr.get("merged_at") and pr.get("created_at"):
161
+ lifetime = (pr["merged_at"] - pr["created_at"]).total_seconds() / 3600
155
162
  lifetimes.append(lifetime)
156
-
163
+
157
164
  avg_lifetime = sum(lifetimes) / len(lifetimes) if lifetimes else 0
158
-
165
+
159
166
  return {
160
- 'total_prs': len(prs),
161
- 'avg_pr_size': total_size / len(prs),
162
- 'avg_pr_lifetime_hours': avg_lifetime,
163
- 'avg_files_per_pr': total_files / len(prs),
164
- 'total_review_comments': total_comments,
165
- 'prs_with_story_points': sum(1 for pr in prs if pr.get('story_points')),
166
- 'story_point_coverage': sum(1 for pr in prs if pr.get('story_points')) / len(prs) * 100
167
- }
167
+ "total_prs": len(prs),
168
+ "avg_pr_size": total_size / len(prs),
169
+ "avg_pr_lifetime_hours": avg_lifetime,
170
+ "avg_files_per_pr": total_files / len(prs),
171
+ "total_review_comments": total_comments,
172
+ "prs_with_story_points": sum(1 for pr in prs if pr.get("story_points")),
173
+ "story_point_coverage": sum(1 for pr in prs if pr.get("story_points")) / len(prs) * 100,
174
+ }
@@ -1,6 +1,7 @@
1
1
  """JIRA API integration for story point and ticket enrichment."""
2
+
2
3
  import base64
3
- from typing import Any, Dict, List, Optional, Set
4
+ from typing import Any, Optional
4
5
 
5
6
  import requests
6
7
  from requests.exceptions import RequestException
@@ -10,12 +11,17 @@ from ..core.cache import GitAnalysisCache
10
11
 
11
12
  class JIRAIntegration:
12
13
  """Integrate with JIRA API for ticket and story point data."""
13
-
14
- def __init__(self, base_url: str, username: str, api_token: str,
15
- cache: GitAnalysisCache,
16
- story_point_fields: Optional[List[str]] = None):
14
+
15
+ def __init__(
16
+ self,
17
+ base_url: str,
18
+ username: str,
19
+ api_token: str,
20
+ cache: GitAnalysisCache,
21
+ story_point_fields: Optional[list[str]] = None,
22
+ ):
17
23
  """Initialize JIRA integration.
18
-
24
+
19
25
  Args:
20
26
  base_url: JIRA instance base URL (e.g., https://company.atlassian.net)
21
27
  username: JIRA username/email
@@ -23,74 +29,74 @@ class JIRAIntegration:
23
29
  cache: Git analysis cache for storing JIRA data
24
30
  story_point_fields: List of custom field IDs for story points
25
31
  """
26
- self.base_url = base_url.rstrip('/')
32
+ self.base_url = base_url.rstrip("/")
27
33
  self.cache = cache
28
-
34
+
29
35
  # Set up authentication
30
36
  credentials = base64.b64encode(f"{username}:{api_token}".encode()).decode()
31
37
  self.headers = {
32
38
  "Authorization": f"Basic {credentials}",
33
39
  "Accept": "application/json",
34
- "Content-Type": "application/json"
40
+ "Content-Type": "application/json",
35
41
  }
36
-
42
+
37
43
  # Default story point field names/IDs
38
44
  self.story_point_fields = story_point_fields or [
39
45
  "customfield_10016", # Common story points field
40
46
  "customfield_10021", # Alternative field
41
- "Story Points", # Field name
42
- "storypoints", # Alternative name
47
+ "Story Points", # Field name
48
+ "storypoints", # Alternative name
43
49
  "customfield_10002", # Another common ID
44
50
  ]
45
-
51
+
46
52
  # Cache for field mapping
47
53
  self._field_mapping = None
48
-
49
- def enrich_commits_with_jira_data(self, commits: List[Dict[str, Any]]) -> None:
54
+
55
+ def enrich_commits_with_jira_data(self, commits: list[dict[str, Any]]) -> None:
50
56
  """Enrich commits with JIRA story points by looking up ticket references.
51
-
57
+
52
58
  Args:
53
59
  commits: List of commit dictionaries to enrich
54
60
  """
55
61
  # Collect all unique JIRA tickets from commits
56
62
  jira_tickets = set()
57
63
  for commit in commits:
58
- ticket_refs = commit.get('ticket_references', [])
64
+ ticket_refs = commit.get("ticket_references", [])
59
65
  for ref in ticket_refs:
60
- if isinstance(ref, dict) and ref.get('platform') == 'jira':
61
- jira_tickets.add(ref['id'])
66
+ if isinstance(ref, dict) and ref.get("platform") == "jira":
67
+ jira_tickets.add(ref["id"])
62
68
  elif isinstance(ref, str) and self._is_jira_ticket(ref):
63
69
  jira_tickets.add(ref)
64
-
70
+
65
71
  if not jira_tickets:
66
72
  return
67
-
73
+
68
74
  # Fetch ticket data from JIRA
69
75
  ticket_data = self._fetch_tickets_batch(list(jira_tickets))
70
-
76
+
71
77
  # Enrich commits with story points
72
78
  for commit in commits:
73
79
  commit_story_points = 0
74
- ticket_refs = commit.get('ticket_references', [])
75
-
80
+ ticket_refs = commit.get("ticket_references", [])
81
+
76
82
  for ref in ticket_refs:
77
83
  ticket_id = None
78
- if isinstance(ref, dict) and ref.get('platform') == 'jira':
79
- ticket_id = ref['id']
84
+ if isinstance(ref, dict) and ref.get("platform") == "jira":
85
+ ticket_id = ref["id"]
80
86
  elif isinstance(ref, str) and self._is_jira_ticket(ref):
81
87
  ticket_id = ref
82
-
88
+
83
89
  if ticket_id and ticket_id in ticket_data:
84
- points = ticket_data[ticket_id].get('story_points', 0)
90
+ points = ticket_data[ticket_id].get("story_points", 0)
85
91
  if points:
86
92
  commit_story_points = max(commit_story_points, points)
87
-
93
+
88
94
  if commit_story_points > 0:
89
- commit['story_points'] = commit_story_points
90
-
91
- def enrich_prs_with_jira_data(self, prs: List[Dict[str, Any]]) -> None:
95
+ commit["story_points"] = commit_story_points
96
+
97
+ def enrich_prs_with_jira_data(self, prs: list[dict[str, Any]]) -> None:
92
98
  """Enrich PRs with JIRA story points.
93
-
99
+
94
100
  Args:
95
101
  prs: List of PR dictionaries to enrich
96
102
  """
@@ -98,51 +104,51 @@ class JIRAIntegration:
98
104
  for pr in prs:
99
105
  pr_text = f"{pr.get('title', '')} {pr.get('description', '')}"
100
106
  jira_tickets = self._extract_jira_tickets(pr_text)
101
-
107
+
102
108
  if jira_tickets:
103
109
  ticket_data = self._fetch_tickets_batch(list(jira_tickets))
104
-
110
+
105
111
  # Use the highest story point value found
106
112
  max_points = 0
107
113
  for ticket_id in jira_tickets:
108
114
  if ticket_id in ticket_data:
109
- points = ticket_data[ticket_id].get('story_points', 0)
115
+ points = ticket_data[ticket_id].get("story_points", 0)
110
116
  max_points = max(max_points, points)
111
-
117
+
112
118
  if max_points > 0:
113
- pr['story_points'] = max_points
114
-
115
- def _fetch_tickets_batch(self, ticket_ids: List[str]) -> Dict[str, Dict[str, Any]]:
119
+ pr["story_points"] = max_points
120
+
121
+ def _fetch_tickets_batch(self, ticket_ids: list[str]) -> dict[str, dict[str, Any]]:
116
122
  """Fetch multiple tickets from JIRA API.
117
-
123
+
118
124
  Args:
119
125
  ticket_ids: List of JIRA ticket IDs
120
-
126
+
121
127
  Returns:
122
128
  Dictionary mapping ticket ID to ticket data
123
129
  """
124
130
  if not ticket_ids:
125
131
  return {}
126
-
132
+
127
133
  # Check cache first
128
134
  cached_tickets = {}
129
135
  tickets_to_fetch = []
130
-
136
+
131
137
  for ticket_id in ticket_ids:
132
138
  cached = self._get_cached_ticket(ticket_id)
133
139
  if cached:
134
140
  cached_tickets[ticket_id] = cached
135
141
  else:
136
142
  tickets_to_fetch.append(ticket_id)
137
-
143
+
138
144
  # Fetch missing tickets from JIRA
139
145
  if tickets_to_fetch:
140
146
  # JIRA JQL has a limit, so batch the requests
141
147
  batch_size = 50
142
148
  for i in range(0, len(tickets_to_fetch), batch_size):
143
- batch = tickets_to_fetch[i:i + batch_size]
149
+ batch = tickets_to_fetch[i : i + batch_size]
144
150
  jql = f"key in ({','.join(batch)})"
145
-
151
+
146
152
  try:
147
153
  response = requests.get(
148
154
  f"{self.base_url}/rest/api/3/search",
@@ -150,33 +156,33 @@ class JIRAIntegration:
150
156
  params={
151
157
  "jql": jql,
152
158
  "fields": "*all", # Get all fields to find story points
153
- "maxResults": batch_size
154
- }
159
+ "maxResults": batch_size,
160
+ },
155
161
  )
156
162
  response.raise_for_status()
157
-
163
+
158
164
  data = response.json()
159
- for issue in data.get('issues', []):
165
+ for issue in data.get("issues", []):
160
166
  ticket_data = self._extract_ticket_data(issue)
161
- cached_tickets[ticket_data['id']] = ticket_data
162
- self._cache_ticket(ticket_data['id'], ticket_data)
163
-
167
+ cached_tickets[ticket_data["id"]] = ticket_data
168
+ self._cache_ticket(ticket_data["id"], ticket_data)
169
+
164
170
  except RequestException as e:
165
171
  print(f" ⚠️ Failed to fetch JIRA tickets: {e}")
166
-
172
+
167
173
  return cached_tickets
168
-
169
- def _extract_ticket_data(self, issue: Dict[str, Any]) -> Dict[str, Any]:
174
+
175
+ def _extract_ticket_data(self, issue: dict[str, Any]) -> dict[str, Any]:
170
176
  """Extract relevant data from JIRA issue.
171
-
177
+
172
178
  Args:
173
179
  issue: JIRA issue data from API
174
-
180
+
175
181
  Returns:
176
182
  Dictionary with extracted ticket data
177
183
  """
178
- fields = issue.get('fields', {})
179
-
184
+ fields = issue.get("fields", {})
185
+
180
186
  # Extract story points from various possible fields
181
187
  story_points = 0
182
188
  for field_id in self.story_point_fields:
@@ -186,87 +192,93 @@ class JIRAIntegration:
186
192
  break
187
193
  except (ValueError, TypeError):
188
194
  continue
189
-
195
+
190
196
  return {
191
- 'id': issue['key'],
192
- 'summary': fields.get('summary', ''),
193
- 'status': fields.get('status', {}).get('name', ''),
194
- 'story_points': int(story_points) if story_points else 0,
195
- 'assignee': fields.get('assignee', {}).get('displayName', '') if fields.get('assignee') else '',
196
- 'created': fields.get('created', ''),
197
- 'updated': fields.get('updated', '')
197
+ "id": issue["key"],
198
+ "summary": fields.get("summary", ""),
199
+ "status": fields.get("status", {}).get("name", ""),
200
+ "story_points": int(story_points) if story_points else 0,
201
+ "assignee": (
202
+ fields.get("assignee", {}).get("displayName", "") if fields.get("assignee") else ""
203
+ ),
204
+ "created": fields.get("created", ""),
205
+ "updated": fields.get("updated", ""),
198
206
  }
199
-
207
+
200
208
  def _is_jira_ticket(self, text: str) -> bool:
201
209
  """Check if text matches JIRA ticket pattern."""
202
210
  import re
203
- return bool(re.match(r'^[A-Z]{2,10}-\d+$', text))
204
-
205
- def _extract_jira_tickets(self, text: str) -> Set[str]:
211
+
212
+ return bool(re.match(r"^[A-Z]{2,10}-\d+$", text))
213
+
214
+ def _extract_jira_tickets(self, text: str) -> set[str]:
206
215
  """Extract JIRA ticket IDs from text."""
207
216
  import re
208
- pattern = r'([A-Z]{2,10}-\d+)'
217
+
218
+ pattern = r"([A-Z]{2,10}-\d+)"
209
219
  matches = re.findall(pattern, text)
210
220
  return set(matches)
211
-
212
- def _get_cached_ticket(self, ticket_id: str) -> Optional[Dict[str, Any]]:
221
+
222
+ def _get_cached_ticket(self, ticket_id: str) -> Optional[dict[str, Any]]:
213
223
  """Get ticket data from cache."""
214
224
  # TODO: Implement cache lookup using self.cache
215
225
  # For now, return None to always fetch from API
216
226
  return None
217
-
218
- def _cache_ticket(self, ticket_id: str, ticket_data: Dict[str, Any]) -> None:
227
+
228
+ def _cache_ticket(self, ticket_id: str, ticket_data: dict[str, Any]) -> None:
219
229
  """Cache ticket data."""
220
230
  # TODO: Implement cache storage using self.cache
221
231
  pass
222
-
232
+
223
233
  def validate_connection(self) -> bool:
224
234
  """Validate JIRA connection and credentials.
225
-
235
+
226
236
  Returns:
227
237
  True if connection is valid
228
238
  """
229
239
  try:
230
- response = requests.get(
231
- f"{self.base_url}/rest/api/3/myself",
232
- headers=self.headers
233
- )
240
+ response = requests.get(f"{self.base_url}/rest/api/3/myself", headers=self.headers)
234
241
  response.raise_for_status()
235
242
  return True
236
243
  except RequestException as e:
237
244
  print(f" ❌ JIRA connection failed: {e}")
238
245
  return False
239
-
240
- def discover_fields(self) -> Dict[str, Dict[str, str]]:
246
+
247
+ def discover_fields(self) -> dict[str, dict[str, str]]:
241
248
  """Discover all available fields in JIRA instance.
242
-
249
+
243
250
  Returns:
244
251
  Dictionary mapping field IDs to their names and types
245
252
  """
246
253
  try:
247
- response = requests.get(
248
- f"{self.base_url}/rest/api/3/field",
249
- headers=self.headers
250
- )
254
+ response = requests.get(f"{self.base_url}/rest/api/3/field", headers=self.headers)
251
255
  response.raise_for_status()
252
-
256
+
253
257
  fields = {}
254
258
  for field in response.json():
255
- field_id = field.get('id', '')
256
- field_name = field.get('name', '')
257
- field_type = field.get('schema', {}).get('type', 'unknown') if field.get('schema') else 'unknown'
258
-
259
+ field_id = field.get("id", "")
260
+ field_name = field.get("name", "")
261
+ field_type = (
262
+ field.get("schema", {}).get("type", "unknown")
263
+ if field.get("schema")
264
+ else "unknown"
265
+ )
266
+
259
267
  # Look for potential story point fields
260
- if any(term in field_name.lower() for term in ['story', 'point', 'estimate', 'size']):
268
+ if any(
269
+ term in field_name.lower() for term in ["story", "point", "estimate", "size"]
270
+ ):
261
271
  fields[field_id] = {
262
- 'name': field_name,
263
- 'type': field_type,
264
- 'is_custom': field.get('custom', False)
272
+ "name": field_name,
273
+ "type": field_type,
274
+ "is_custom": field.get("custom", False),
265
275
  }
266
- print(f" 📊 Potential story point field: {field_id} = '{field_name}' (type: {field_type})")
267
-
276
+ print(
277
+ f" 📊 Potential story point field: {field_id} = '{field_name}' (type: {field_type})"
278
+ )
279
+
268
280
  return fields
269
-
281
+
270
282
  except RequestException as e:
271
283
  print(f" ⚠️ Failed to discover JIRA fields: {e}")
272
- return {}
284
+ return {}