gitflow-analytics 1.0.0__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. gitflow_analytics/__init__.py +11 -9
  2. gitflow_analytics/_version.py +2 -2
  3. gitflow_analytics/cli.py +691 -243
  4. gitflow_analytics/cli_rich.py +353 -0
  5. gitflow_analytics/config.py +389 -96
  6. gitflow_analytics/core/analyzer.py +175 -78
  7. gitflow_analytics/core/branch_mapper.py +132 -132
  8. gitflow_analytics/core/cache.py +242 -173
  9. gitflow_analytics/core/identity.py +214 -178
  10. gitflow_analytics/extractors/base.py +13 -11
  11. gitflow_analytics/extractors/story_points.py +70 -59
  12. gitflow_analytics/extractors/tickets.py +111 -88
  13. gitflow_analytics/integrations/github_integration.py +91 -77
  14. gitflow_analytics/integrations/jira_integration.py +284 -0
  15. gitflow_analytics/integrations/orchestrator.py +99 -72
  16. gitflow_analytics/metrics/dora.py +183 -179
  17. gitflow_analytics/models/database.py +191 -54
  18. gitflow_analytics/qualitative/__init__.py +30 -0
  19. gitflow_analytics/qualitative/classifiers/__init__.py +13 -0
  20. gitflow_analytics/qualitative/classifiers/change_type.py +468 -0
  21. gitflow_analytics/qualitative/classifiers/domain_classifier.py +399 -0
  22. gitflow_analytics/qualitative/classifiers/intent_analyzer.py +436 -0
  23. gitflow_analytics/qualitative/classifiers/risk_analyzer.py +412 -0
  24. gitflow_analytics/qualitative/core/__init__.py +13 -0
  25. gitflow_analytics/qualitative/core/llm_fallback.py +653 -0
  26. gitflow_analytics/qualitative/core/nlp_engine.py +373 -0
  27. gitflow_analytics/qualitative/core/pattern_cache.py +457 -0
  28. gitflow_analytics/qualitative/core/processor.py +540 -0
  29. gitflow_analytics/qualitative/models/__init__.py +25 -0
  30. gitflow_analytics/qualitative/models/schemas.py +272 -0
  31. gitflow_analytics/qualitative/utils/__init__.py +13 -0
  32. gitflow_analytics/qualitative/utils/batch_processor.py +326 -0
  33. gitflow_analytics/qualitative/utils/cost_tracker.py +343 -0
  34. gitflow_analytics/qualitative/utils/metrics.py +347 -0
  35. gitflow_analytics/qualitative/utils/text_processing.py +243 -0
  36. gitflow_analytics/reports/analytics_writer.py +25 -8
  37. gitflow_analytics/reports/csv_writer.py +60 -32
  38. gitflow_analytics/reports/narrative_writer.py +21 -15
  39. gitflow_analytics/tui/__init__.py +5 -0
  40. gitflow_analytics/tui/app.py +721 -0
  41. gitflow_analytics/tui/screens/__init__.py +8 -0
  42. gitflow_analytics/tui/screens/analysis_progress_screen.py +487 -0
  43. gitflow_analytics/tui/screens/configuration_screen.py +547 -0
  44. gitflow_analytics/tui/screens/loading_screen.py +358 -0
  45. gitflow_analytics/tui/screens/main_screen.py +304 -0
  46. gitflow_analytics/tui/screens/results_screen.py +698 -0
  47. gitflow_analytics/tui/widgets/__init__.py +7 -0
  48. gitflow_analytics/tui/widgets/data_table.py +257 -0
  49. gitflow_analytics/tui/widgets/export_modal.py +301 -0
  50. gitflow_analytics/tui/widgets/progress_widget.py +192 -0
  51. gitflow_analytics-1.0.3.dist-info/METADATA +490 -0
  52. gitflow_analytics-1.0.3.dist-info/RECORD +62 -0
  53. gitflow_analytics-1.0.0.dist-info/METADATA +0 -201
  54. gitflow_analytics-1.0.0.dist-info/RECORD +0 -30
  55. {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/WHEEL +0 -0
  56. {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/entry_points.txt +0 -0
  57. {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/licenses/LICENSE +0 -0
  58. {gitflow_analytics-1.0.0.dist-info → gitflow_analytics-1.0.3.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,9 @@
1
1
  """GitHub API integration for PR and issue enrichment."""
2
- from datetime import datetime, timedelta
3
- from typing import List, Dict, Any, Optional
2
+
4
3
  import time
4
+ from datetime import datetime, timezone
5
+ from typing import Any, Optional
6
+
5
7
  from github import Github
6
8
  from github.GithubException import RateLimitExceededException, UnknownObjectException
7
9
 
@@ -10,151 +12,163 @@ from ..core.cache import GitAnalysisCache
10
12
 
11
13
  class GitHubIntegration:
12
14
  """Integrate with GitHub API for PR and issue data."""
13
-
14
- def __init__(self, token: str, cache: GitAnalysisCache,
15
- rate_limit_retries: int = 3, backoff_factor: int = 2):
15
+
16
+ def __init__(
17
+ self,
18
+ token: str,
19
+ cache: GitAnalysisCache,
20
+ rate_limit_retries: int = 3,
21
+ backoff_factor: int = 2,
22
+ allowed_ticket_platforms: Optional[list[str]] = None,
23
+ ):
16
24
  """Initialize GitHub integration."""
17
25
  self.github = Github(token)
18
26
  self.cache = cache
19
27
  self.rate_limit_retries = rate_limit_retries
20
28
  self.backoff_factor = backoff_factor
21
-
22
- def enrich_repository_with_prs(self, repo_name: str, commits: List[Dict[str, Any]],
23
- since: datetime) -> List[Dict[str, Any]]:
29
+ self.allowed_ticket_platforms = allowed_ticket_platforms
30
+
31
+ def enrich_repository_with_prs(
32
+ self, repo_name: str, commits: list[dict[str, Any]], since: datetime
33
+ ) -> list[dict[str, Any]]:
24
34
  """Enrich repository commits with PR data."""
25
35
  try:
26
36
  repo = self.github.get_repo(repo_name)
27
37
  except UnknownObjectException:
28
38
  print(f" ⚠️ GitHub repo not found: {repo_name}")
29
39
  return []
30
-
40
+
31
41
  # Get PRs for the time period
32
42
  prs = self._get_pull_requests(repo, since)
33
-
43
+
34
44
  # Build commit to PR mapping
35
45
  commit_to_pr = {}
36
46
  for pr in prs:
37
47
  pr_data = self._extract_pr_data(pr)
38
-
48
+
39
49
  # Cache PR data
40
50
  self.cache.cache_pr(repo_name, pr_data)
41
-
51
+
42
52
  # Map commits to this PR
43
53
  for commit in pr.get_commits():
44
54
  commit_to_pr[commit.sha] = pr_data
45
-
55
+
46
56
  # Enrich commits with PR data
47
57
  enriched_prs = []
48
58
  for commit in commits:
49
- if commit['hash'] in commit_to_pr:
50
- pr_data = commit_to_pr[commit['hash']]
51
-
59
+ if commit["hash"] in commit_to_pr:
60
+ pr_data = commit_to_pr[commit["hash"]]
61
+
52
62
  # Use PR story points if commit doesn't have them
53
- if not commit.get('story_points') and pr_data.get('story_points'):
54
- commit['story_points'] = pr_data['story_points']
55
-
63
+ if not commit.get("story_points") and pr_data.get("story_points"):
64
+ commit["story_points"] = pr_data["story_points"]
65
+
56
66
  # Add PR reference
57
- commit['pr_number'] = pr_data['number']
58
- commit['pr_title'] = pr_data['title']
59
-
67
+ commit["pr_number"] = pr_data["number"]
68
+ commit["pr_title"] = pr_data["title"]
69
+
60
70
  # Add to PR list if not already there
61
71
  if pr_data not in enriched_prs:
62
72
  enriched_prs.append(pr_data)
63
-
73
+
64
74
  return enriched_prs
65
-
66
- def _get_pull_requests(self, repo, since: datetime) -> List[Any]:
75
+
76
+ def _get_pull_requests(self, repo, since: datetime) -> list[Any]:
67
77
  """Get pull requests with rate limit handling."""
68
78
  prs = []
69
-
79
+
80
+ # Ensure since is timezone-aware for comparison with GitHub's timezone-aware datetimes
81
+ if since.tzinfo is None:
82
+ since = since.replace(tzinfo=timezone.utc)
83
+
70
84
  for attempt in range(self.rate_limit_retries):
71
85
  try:
72
86
  # Get all PRs updated since the date
73
- for pr in repo.get_pulls(state='all', sort='updated', direction='desc'):
87
+ for pr in repo.get_pulls(state="all", sort="updated", direction="desc"):
74
88
  if pr.updated_at < since:
75
89
  break
76
-
90
+
77
91
  # Only include PRs that were merged in our time period
78
92
  if pr.merged and pr.merged_at >= since:
79
93
  prs.append(pr)
80
-
94
+
81
95
  return prs
82
-
96
+
83
97
  except RateLimitExceededException:
84
98
  if attempt < self.rate_limit_retries - 1:
85
- wait_time = self.backoff_factor ** attempt
99
+ wait_time = self.backoff_factor**attempt
86
100
  print(f" ⏳ GitHub rate limit hit, waiting {wait_time}s...")
87
101
  time.sleep(wait_time)
88
102
  else:
89
103
  print(" ❌ GitHub rate limit exceeded, skipping PR enrichment")
90
104
  return []
91
-
105
+
92
106
  return prs
93
-
94
- def _extract_pr_data(self, pr) -> Dict[str, Any]:
107
+
108
+ def _extract_pr_data(self, pr) -> dict[str, Any]:
95
109
  """Extract relevant data from a GitHub PR object."""
96
110
  from ..extractors.story_points import StoryPointExtractor
97
111
  from ..extractors.tickets import TicketExtractor
98
-
112
+
99
113
  sp_extractor = StoryPointExtractor()
100
- ticket_extractor = TicketExtractor()
101
-
114
+ ticket_extractor = TicketExtractor(allowed_platforms=self.allowed_ticket_platforms)
115
+
102
116
  # Extract story points from PR title and body
103
117
  pr_text = f"{pr.title} {pr.body or ''}"
104
118
  story_points = sp_extractor.extract_from_text(pr_text)
105
-
119
+
106
120
  # Extract ticket references
107
121
  tickets = ticket_extractor.extract_from_text(pr_text)
108
-
122
+
109
123
  # Get commit SHAs
110
124
  commit_hashes = [c.sha for c in pr.get_commits()]
111
-
125
+
112
126
  return {
113
- 'number': pr.number,
114
- 'title': pr.title,
115
- 'description': pr.body,
116
- 'author': pr.user.login,
117
- 'created_at': pr.created_at,
118
- 'merged_at': pr.merged_at,
119
- 'story_points': story_points,
120
- 'labels': [label.name for label in pr.labels],
121
- 'commit_hashes': commit_hashes,
122
- 'ticket_references': tickets,
123
- 'review_comments': pr.review_comments,
124
- 'changed_files': pr.changed_files,
125
- 'additions': pr.additions,
126
- 'deletions': pr.deletions
127
+ "number": pr.number,
128
+ "title": pr.title,
129
+ "description": pr.body,
130
+ "author": pr.user.login,
131
+ "created_at": pr.created_at,
132
+ "merged_at": pr.merged_at,
133
+ "story_points": story_points,
134
+ "labels": [label.name for label in pr.labels],
135
+ "commit_hashes": commit_hashes,
136
+ "ticket_references": tickets,
137
+ "review_comments": pr.review_comments,
138
+ "changed_files": pr.changed_files,
139
+ "additions": pr.additions,
140
+ "deletions": pr.deletions,
127
141
  }
128
-
129
- def calculate_pr_metrics(self, prs: List[Dict[str, Any]]) -> Dict[str, Any]:
142
+
143
+ def calculate_pr_metrics(self, prs: list[dict[str, Any]]) -> dict[str, Any]:
130
144
  """Calculate PR-level metrics."""
131
145
  if not prs:
132
146
  return {
133
- 'avg_pr_size': 0,
134
- 'avg_pr_lifetime_hours': 0,
135
- 'avg_files_per_pr': 0,
136
- 'total_review_comments': 0
147
+ "avg_pr_size": 0,
148
+ "avg_pr_lifetime_hours": 0,
149
+ "avg_files_per_pr": 0,
150
+ "total_review_comments": 0,
137
151
  }
138
-
139
- total_size = sum(pr['additions'] + pr['deletions'] for pr in prs)
140
- total_files = sum(pr.get('changed_files', 0) for pr in prs)
141
- total_comments = sum(pr.get('review_comments', 0) for pr in prs)
142
-
152
+
153
+ total_size = sum(pr["additions"] + pr["deletions"] for pr in prs)
154
+ total_files = sum(pr.get("changed_files", 0) for pr in prs)
155
+ total_comments = sum(pr.get("review_comments", 0) for pr in prs)
156
+
143
157
  # Calculate average PR lifetime
144
158
  lifetimes = []
145
159
  for pr in prs:
146
- if pr.get('merged_at') and pr.get('created_at'):
147
- lifetime = (pr['merged_at'] - pr['created_at']).total_seconds() / 3600
160
+ if pr.get("merged_at") and pr.get("created_at"):
161
+ lifetime = (pr["merged_at"] - pr["created_at"]).total_seconds() / 3600
148
162
  lifetimes.append(lifetime)
149
-
163
+
150
164
  avg_lifetime = sum(lifetimes) / len(lifetimes) if lifetimes else 0
151
-
165
+
152
166
  return {
153
- 'total_prs': len(prs),
154
- 'avg_pr_size': total_size / len(prs),
155
- 'avg_pr_lifetime_hours': avg_lifetime,
156
- 'avg_files_per_pr': total_files / len(prs),
157
- 'total_review_comments': total_comments,
158
- 'prs_with_story_points': sum(1 for pr in prs if pr.get('story_points')),
159
- 'story_point_coverage': sum(1 for pr in prs if pr.get('story_points')) / len(prs) * 100
160
- }
167
+ "total_prs": len(prs),
168
+ "avg_pr_size": total_size / len(prs),
169
+ "avg_pr_lifetime_hours": avg_lifetime,
170
+ "avg_files_per_pr": total_files / len(prs),
171
+ "total_review_comments": total_comments,
172
+ "prs_with_story_points": sum(1 for pr in prs if pr.get("story_points")),
173
+ "story_point_coverage": sum(1 for pr in prs if pr.get("story_points")) / len(prs) * 100,
174
+ }
@@ -0,0 +1,284 @@
1
+ """JIRA API integration for story point and ticket enrichment."""
2
+
3
+ import base64
4
+ from typing import Any, Optional
5
+
6
+ import requests
7
+ from requests.exceptions import RequestException
8
+
9
+ from ..core.cache import GitAnalysisCache
10
+
11
+
12
+ class JIRAIntegration:
13
+ """Integrate with JIRA API for ticket and story point data."""
14
+
15
+ def __init__(
16
+ self,
17
+ base_url: str,
18
+ username: str,
19
+ api_token: str,
20
+ cache: GitAnalysisCache,
21
+ story_point_fields: Optional[list[str]] = None,
22
+ ):
23
+ """Initialize JIRA integration.
24
+
25
+ Args:
26
+ base_url: JIRA instance base URL (e.g., https://company.atlassian.net)
27
+ username: JIRA username/email
28
+ api_token: JIRA API token
29
+ cache: Git analysis cache for storing JIRA data
30
+ story_point_fields: List of custom field IDs for story points
31
+ """
32
+ self.base_url = base_url.rstrip("/")
33
+ self.cache = cache
34
+
35
+ # Set up authentication
36
+ credentials = base64.b64encode(f"{username}:{api_token}".encode()).decode()
37
+ self.headers = {
38
+ "Authorization": f"Basic {credentials}",
39
+ "Accept": "application/json",
40
+ "Content-Type": "application/json",
41
+ }
42
+
43
+ # Default story point field names/IDs
44
+ self.story_point_fields = story_point_fields or [
45
+ "customfield_10016", # Common story points field
46
+ "customfield_10021", # Alternative field
47
+ "Story Points", # Field name
48
+ "storypoints", # Alternative name
49
+ "customfield_10002", # Another common ID
50
+ ]
51
+
52
+ # Cache for field mapping
53
+ self._field_mapping = None
54
+
55
+ def enrich_commits_with_jira_data(self, commits: list[dict[str, Any]]) -> None:
56
+ """Enrich commits with JIRA story points by looking up ticket references.
57
+
58
+ Args:
59
+ commits: List of commit dictionaries to enrich
60
+ """
61
+ # Collect all unique JIRA tickets from commits
62
+ jira_tickets = set()
63
+ for commit in commits:
64
+ ticket_refs = commit.get("ticket_references", [])
65
+ for ref in ticket_refs:
66
+ if isinstance(ref, dict) and ref.get("platform") == "jira":
67
+ jira_tickets.add(ref["id"])
68
+ elif isinstance(ref, str) and self._is_jira_ticket(ref):
69
+ jira_tickets.add(ref)
70
+
71
+ if not jira_tickets:
72
+ return
73
+
74
+ # Fetch ticket data from JIRA
75
+ ticket_data = self._fetch_tickets_batch(list(jira_tickets))
76
+
77
+ # Enrich commits with story points
78
+ for commit in commits:
79
+ commit_story_points = 0
80
+ ticket_refs = commit.get("ticket_references", [])
81
+
82
+ for ref in ticket_refs:
83
+ ticket_id = None
84
+ if isinstance(ref, dict) and ref.get("platform") == "jira":
85
+ ticket_id = ref["id"]
86
+ elif isinstance(ref, str) and self._is_jira_ticket(ref):
87
+ ticket_id = ref
88
+
89
+ if ticket_id and ticket_id in ticket_data:
90
+ points = ticket_data[ticket_id].get("story_points", 0)
91
+ if points:
92
+ commit_story_points = max(commit_story_points, points)
93
+
94
+ if commit_story_points > 0:
95
+ commit["story_points"] = commit_story_points
96
+
97
+ def enrich_prs_with_jira_data(self, prs: list[dict[str, Any]]) -> None:
98
+ """Enrich PRs with JIRA story points.
99
+
100
+ Args:
101
+ prs: List of PR dictionaries to enrich
102
+ """
103
+ # Similar to commits, extract JIRA tickets from PR titles/descriptions
104
+ for pr in prs:
105
+ pr_text = f"{pr.get('title', '')} {pr.get('description', '')}"
106
+ jira_tickets = self._extract_jira_tickets(pr_text)
107
+
108
+ if jira_tickets:
109
+ ticket_data = self._fetch_tickets_batch(list(jira_tickets))
110
+
111
+ # Use the highest story point value found
112
+ max_points = 0
113
+ for ticket_id in jira_tickets:
114
+ if ticket_id in ticket_data:
115
+ points = ticket_data[ticket_id].get("story_points", 0)
116
+ max_points = max(max_points, points)
117
+
118
+ if max_points > 0:
119
+ pr["story_points"] = max_points
120
+
121
+ def _fetch_tickets_batch(self, ticket_ids: list[str]) -> dict[str, dict[str, Any]]:
122
+ """Fetch multiple tickets from JIRA API.
123
+
124
+ Args:
125
+ ticket_ids: List of JIRA ticket IDs
126
+
127
+ Returns:
128
+ Dictionary mapping ticket ID to ticket data
129
+ """
130
+ if not ticket_ids:
131
+ return {}
132
+
133
+ # Check cache first
134
+ cached_tickets = {}
135
+ tickets_to_fetch = []
136
+
137
+ for ticket_id in ticket_ids:
138
+ cached = self._get_cached_ticket(ticket_id)
139
+ if cached:
140
+ cached_tickets[ticket_id] = cached
141
+ else:
142
+ tickets_to_fetch.append(ticket_id)
143
+
144
+ # Fetch missing tickets from JIRA
145
+ if tickets_to_fetch:
146
+ # JIRA JQL has a limit, so batch the requests
147
+ batch_size = 50
148
+ for i in range(0, len(tickets_to_fetch), batch_size):
149
+ batch = tickets_to_fetch[i : i + batch_size]
150
+ jql = f"key in ({','.join(batch)})"
151
+
152
+ try:
153
+ response = requests.get(
154
+ f"{self.base_url}/rest/api/3/search",
155
+ headers=self.headers,
156
+ params={
157
+ "jql": jql,
158
+ "fields": "*all", # Get all fields to find story points
159
+ "maxResults": batch_size,
160
+ },
161
+ )
162
+ response.raise_for_status()
163
+
164
+ data = response.json()
165
+ for issue in data.get("issues", []):
166
+ ticket_data = self._extract_ticket_data(issue)
167
+ cached_tickets[ticket_data["id"]] = ticket_data
168
+ self._cache_ticket(ticket_data["id"], ticket_data)
169
+
170
+ except RequestException as e:
171
+ print(f" ⚠️ Failed to fetch JIRA tickets: {e}")
172
+
173
+ return cached_tickets
174
+
175
+ def _extract_ticket_data(self, issue: dict[str, Any]) -> dict[str, Any]:
176
+ """Extract relevant data from JIRA issue.
177
+
178
+ Args:
179
+ issue: JIRA issue data from API
180
+
181
+ Returns:
182
+ Dictionary with extracted ticket data
183
+ """
184
+ fields = issue.get("fields", {})
185
+
186
+ # Extract story points from various possible fields
187
+ story_points = 0
188
+ for field_id in self.story_point_fields:
189
+ if field_id in fields and fields[field_id] is not None:
190
+ try:
191
+ story_points = float(fields[field_id])
192
+ break
193
+ except (ValueError, TypeError):
194
+ continue
195
+
196
+ return {
197
+ "id": issue["key"],
198
+ "summary": fields.get("summary", ""),
199
+ "status": fields.get("status", {}).get("name", ""),
200
+ "story_points": int(story_points) if story_points else 0,
201
+ "assignee": (
202
+ fields.get("assignee", {}).get("displayName", "") if fields.get("assignee") else ""
203
+ ),
204
+ "created": fields.get("created", ""),
205
+ "updated": fields.get("updated", ""),
206
+ }
207
+
208
+ def _is_jira_ticket(self, text: str) -> bool:
209
+ """Check if text matches JIRA ticket pattern."""
210
+ import re
211
+
212
+ return bool(re.match(r"^[A-Z]{2,10}-\d+$", text))
213
+
214
+ def _extract_jira_tickets(self, text: str) -> set[str]:
215
+ """Extract JIRA ticket IDs from text."""
216
+ import re
217
+
218
+ pattern = r"([A-Z]{2,10}-\d+)"
219
+ matches = re.findall(pattern, text)
220
+ return set(matches)
221
+
222
+ def _get_cached_ticket(self, ticket_id: str) -> Optional[dict[str, Any]]:
223
+ """Get ticket data from cache."""
224
+ # TODO: Implement cache lookup using self.cache
225
+ # For now, return None to always fetch from API
226
+ return None
227
+
228
+ def _cache_ticket(self, ticket_id: str, ticket_data: dict[str, Any]) -> None:
229
+ """Cache ticket data."""
230
+ # TODO: Implement cache storage using self.cache
231
+ pass
232
+
233
+ def validate_connection(self) -> bool:
234
+ """Validate JIRA connection and credentials.
235
+
236
+ Returns:
237
+ True if connection is valid
238
+ """
239
+ try:
240
+ response = requests.get(f"{self.base_url}/rest/api/3/myself", headers=self.headers)
241
+ response.raise_for_status()
242
+ return True
243
+ except RequestException as e:
244
+ print(f" ❌ JIRA connection failed: {e}")
245
+ return False
246
+
247
+ def discover_fields(self) -> dict[str, dict[str, str]]:
248
+ """Discover all available fields in JIRA instance.
249
+
250
+ Returns:
251
+ Dictionary mapping field IDs to their names and types
252
+ """
253
+ try:
254
+ response = requests.get(f"{self.base_url}/rest/api/3/field", headers=self.headers)
255
+ response.raise_for_status()
256
+
257
+ fields = {}
258
+ for field in response.json():
259
+ field_id = field.get("id", "")
260
+ field_name = field.get("name", "")
261
+ field_type = (
262
+ field.get("schema", {}).get("type", "unknown")
263
+ if field.get("schema")
264
+ else "unknown"
265
+ )
266
+
267
+ # Look for potential story point fields
268
+ if any(
269
+ term in field_name.lower() for term in ["story", "point", "estimate", "size"]
270
+ ):
271
+ fields[field_id] = {
272
+ "name": field_name,
273
+ "type": field_type,
274
+ "is_custom": field.get("custom", False),
275
+ }
276
+ print(
277
+ f" 📊 Potential story point field: {field_id} = '{field_name}' (type: {field_type})"
278
+ )
279
+
280
+ return fields
281
+
282
+ except RequestException as e:
283
+ print(f" ⚠️ Failed to discover JIRA fields: {e}")
284
+ return {}