mcp-ticketer 0.1.38__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  """Version information for mcp-ticketer package."""
2
2
 
3
- __version__ = "0.1.38"
3
+ __version__ = "0.2.0"
4
4
  __version_info__ = tuple(int(part) for part in __version__.split("."))
5
5
 
6
6
  # Package metadata
@@ -4,13 +4,14 @@ import builtins
4
4
  import os
5
5
  import re
6
6
  from datetime import datetime
7
- from typing import Any, Optional
7
+ from typing import Any, Dict, List, Optional
8
8
 
9
9
  import httpx
10
10
 
11
11
  from ..core.adapter import BaseAdapter
12
12
  from ..core.models import Comment, Epic, Priority, SearchQuery, Task, TicketState
13
13
  from ..core.registry import AdapterRegistry
14
+ from ..core.env_loader import load_adapter_config, validate_adapter_config
14
15
 
15
16
 
16
17
  class GitHubStateMapping:
@@ -151,21 +152,22 @@ class GitHubAdapter(BaseAdapter[Task]):
151
152
  """
152
153
  super().__init__(config)
153
154
 
155
+ # Load configuration with environment variable resolution
156
+ full_config = load_adapter_config("github", config)
157
+
158
+ # Validate required configuration
159
+ missing_keys = validate_adapter_config("github", full_config)
160
+ if missing_keys:
161
+ raise ValueError(f"GitHub adapter missing required configuration: {', '.join(missing_keys)}")
162
+
154
163
  # Get authentication token - support both 'api_key' and 'token' for compatibility
155
164
  self.token = (
156
- config.get("api_key") or config.get("token") or os.getenv("GITHUB_TOKEN")
165
+ full_config.get("api_key") or full_config.get("token") or full_config.get("token")
157
166
  )
158
- if not self.token:
159
- raise ValueError(
160
- "GitHub token required (config.api_key, config.token or GITHUB_TOKEN env var)"
161
- )
162
167
 
163
168
  # Get repository information
164
- self.owner = config.get("owner") or os.getenv("GITHUB_OWNER")
165
- self.repo = config.get("repo") or os.getenv("GITHUB_REPO")
166
-
167
- if not self.owner or not self.repo:
168
- raise ValueError("GitHub owner and repo are required")
169
+ self.owner = full_config.get("owner")
170
+ self.repo = full_config.get("repo")
169
171
 
170
172
  # API URLs
171
173
  self.api_url = config.get("api_url", "https://api.github.com")
@@ -1329,6 +1331,20 @@ Fixes #{issue_number}
1329
1331
  "message": f"Successfully linked PR #{pr_number} to issue #{issue_number}",
1330
1332
  }
1331
1333
 
1334
+ async def get_collaborators(self) -> List[Dict[str, Any]]:
1335
+ """Get repository collaborators."""
1336
+ response = await self.client.get(
1337
+ f"/repos/{self.owner}/{self.repo}/collaborators"
1338
+ )
1339
+ response.raise_for_status()
1340
+ return response.json()
1341
+
1342
+ async def get_current_user(self) -> Optional[Dict[str, Any]]:
1343
+ """Get current authenticated user information."""
1344
+ response = await self.client.get("/user")
1345
+ response.raise_for_status()
1346
+ return response.json()
1347
+
1332
1348
  async def close(self) -> None:
1333
1349
  """Close the HTTP client connection."""
1334
1350
  await self.client.aclose()
@@ -4,9 +4,10 @@ import asyncio
4
4
  import builtins
5
5
  import logging
6
6
  import os
7
+ import re
7
8
  from datetime import datetime
8
9
  from enum import Enum
9
- from typing import Any, Optional, Union
10
+ from typing import Any, Dict, List, Optional, Union
10
11
 
11
12
  import httpx
12
13
  from httpx import AsyncClient, HTTPStatusError, TimeoutException
@@ -14,10 +15,79 @@ from httpx import AsyncClient, HTTPStatusError, TimeoutException
14
15
  from ..core.adapter import BaseAdapter
15
16
  from ..core.models import Comment, Epic, Priority, SearchQuery, Task, TicketState
16
17
  from ..core.registry import AdapterRegistry
18
+ from ..core.env_loader import load_adapter_config, validate_adapter_config
17
19
 
18
20
  logger = logging.getLogger(__name__)
19
21
 
20
22
 
23
+ def parse_jira_datetime(date_str: str) -> Optional[datetime]:
24
+ """
25
+ Parse JIRA datetime strings which can be in various formats.
26
+
27
+ JIRA can return dates in formats like:
28
+ - 2025-10-24T14:12:18.771-0400
29
+ - 2025-10-24T14:12:18.771Z
30
+ - 2025-10-24T14:12:18.771+00:00
31
+ """
32
+ if not date_str:
33
+ return None
34
+
35
+ try:
36
+ # Handle Z timezone
37
+ if date_str.endswith('Z'):
38
+ return datetime.fromisoformat(date_str.replace('Z', '+00:00'))
39
+
40
+ # Handle timezone formats like -0400, +0500 (need to add colon)
41
+ if re.match(r'.*[+-]\d{4}$', date_str):
42
+ # Insert colon in timezone: -0400 -> -04:00
43
+ date_str = re.sub(r'([+-]\d{2})(\d{2})$', r'\1:\2', date_str)
44
+
45
+ return datetime.fromisoformat(date_str)
46
+
47
+ except (ValueError, TypeError) as e:
48
+ logger.warning(f"Failed to parse JIRA datetime '{date_str}': {e}")
49
+ return None
50
+
51
+
52
+ def extract_text_from_adf(adf_content: Union[str, Dict[str, Any]]) -> str:
53
+ """
54
+ Extract plain text from Atlassian Document Format (ADF).
55
+
56
+ Args:
57
+ adf_content: Either a string (already plain text) or ADF document dict
58
+
59
+ Returns:
60
+ Plain text string extracted from the ADF content
61
+ """
62
+ if isinstance(adf_content, str):
63
+ return adf_content
64
+
65
+ if not isinstance(adf_content, dict):
66
+ return str(adf_content) if adf_content else ""
67
+
68
+ def extract_text_recursive(node: Dict[str, Any]) -> str:
69
+ """Recursively extract text from ADF nodes."""
70
+ if not isinstance(node, dict):
71
+ return ""
72
+
73
+ # If this is a text node, return its text
74
+ if node.get("type") == "text":
75
+ return node.get("text", "")
76
+
77
+ # If this node has content, process it recursively
78
+ content = node.get("content", [])
79
+ if isinstance(content, list):
80
+ return "".join(extract_text_recursive(child) for child in content)
81
+
82
+ return ""
83
+
84
+ try:
85
+ return extract_text_recursive(adf_content)
86
+ except Exception as e:
87
+ logger.warning(f"Failed to extract text from ADF: {e}")
88
+ return str(adf_content) if adf_content else ""
89
+
90
+
21
91
  class JiraIssueType(str, Enum):
22
92
  """Common JIRA issue types."""
23
93
 
@@ -60,21 +130,23 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
60
130
  """
61
131
  super().__init__(config)
62
132
 
63
- # Configuration
64
- self.server = config.get("server") or os.getenv("JIRA_SERVER", "")
65
- self.email = config.get("email") or os.getenv("JIRA_EMAIL", "")
66
- self.api_token = config.get("api_token") or os.getenv("JIRA_API_TOKEN", "")
67
- self.project_key = config.get("project_key") or os.getenv(
68
- "JIRA_PROJECT_KEY", ""
69
- )
70
- self.is_cloud = config.get("cloud", True)
71
- self.verify_ssl = config.get("verify_ssl", True)
72
- self.timeout = config.get("timeout", 30)
73
- self.max_retries = config.get("max_retries", 3)
133
+ # Load configuration with environment variable resolution
134
+ full_config = load_adapter_config("jira", config)
135
+
136
+ # Validate required configuration
137
+ missing_keys = validate_adapter_config("jira", full_config)
138
+ if missing_keys:
139
+ raise ValueError(f"JIRA adapter missing required configuration: {', '.join(missing_keys)}")
74
140
 
75
- # Validate required fields
76
- if not all([self.server, self.email, self.api_token]):
77
- raise ValueError("JIRA adapter requires server, email, and api_token")
141
+ # Configuration
142
+ self.server = full_config.get("server", "")
143
+ self.email = full_config.get("email", "")
144
+ self.api_token = full_config.get("api_token", "")
145
+ self.project_key = full_config.get("project_key", "")
146
+ self.is_cloud = full_config.get("cloud", True)
147
+ self.verify_ssl = full_config.get("verify_ssl", True)
148
+ self.timeout = full_config.get("timeout", 30)
149
+ self.max_retries = full_config.get("max_retries", 3)
78
150
 
79
151
  # Clean up server URL
80
152
  self.server = self.server.rstrip("/")
@@ -382,16 +454,8 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
382
454
  label.get("name", "") if isinstance(label, dict) else str(label)
383
455
  for label in fields.get("labels", [])
384
456
  ],
385
- "created_at": (
386
- datetime.fromisoformat(fields.get("created", "").replace("Z", "+00:00"))
387
- if fields.get("created")
388
- else None
389
- ),
390
- "updated_at": (
391
- datetime.fromisoformat(fields.get("updated", "").replace("Z", "+00:00"))
392
- if fields.get("updated")
393
- else None
394
- ),
457
+ "created_at": parse_jira_datetime(fields.get("created")),
458
+ "updated_at": parse_jira_datetime(fields.get("updated")),
395
459
  "metadata": {
396
460
  "jira": {
397
461
  "id": issue.get("id"),
@@ -457,9 +521,12 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
457
521
  "summary": ticket.title,
458
522
  "description": description,
459
523
  "labels": ticket.tags,
460
- "priority": {"name": self._map_priority_to_jira(ticket.priority)},
461
524
  }
462
525
 
526
+ # Only add priority for Tasks, not Epics (some JIRA configurations don't allow priority on Epics)
527
+ if isinstance(ticket, Task):
528
+ fields["priority"] = {"name": self._map_priority_to_jira(ticket.priority)}
529
+
463
530
  # Add project if creating new issue
464
531
  if not ticket.id and self.project_key:
465
532
  fields["project"] = {"key": self.project_key}
@@ -608,16 +675,16 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
608
675
 
609
676
  jql = " AND ".join(jql_parts) if jql_parts else "ORDER BY created DESC"
610
677
 
611
- # Search issues using the new API endpoint
678
+ # Search issues using the JIRA API endpoint
612
679
  data = await self._make_request(
613
- "POST",
614
- "search/jql", # Updated to use new API endpoint
615
- data={
680
+ "GET",
681
+ "search/jql", # JIRA search endpoint (new API v3)
682
+ params={
616
683
  "jql": jql,
617
684
  "startAt": offset,
618
685
  "maxResults": limit,
619
- "fields": ["*all"],
620
- "expand": ["renderedFields"],
686
+ "fields": "*all",
687
+ "expand": "renderedFields",
621
688
  },
622
689
  )
623
690
 
@@ -658,16 +725,16 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
658
725
 
659
726
  jql = " AND ".join(jql_parts) if jql_parts else "ORDER BY created DESC"
660
727
 
661
- # Execute search using the new API endpoint
728
+ # Execute search using the JIRA API endpoint
662
729
  data = await self._make_request(
663
- "POST",
664
- "search/jql", # Updated to use new API endpoint
665
- data={
730
+ "GET",
731
+ "search/jql", # JIRA search endpoint (new API v3)
732
+ params={
666
733
  "jql": jql,
667
734
  "startAt": query.offset,
668
735
  "maxResults": query.limit,
669
- "fields": ["*all"],
670
- "expand": ["renderedFields"],
736
+ "fields": "*all",
737
+ "expand": "renderedFields",
671
738
  },
672
739
  )
673
740
 
@@ -728,8 +795,24 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
728
795
 
729
796
  async def add_comment(self, comment: Comment) -> Comment:
730
797
  """Add a comment to a JIRA issue."""
731
- # Prepare comment data
732
- data = {"body": comment.content}
798
+ # Prepare comment data in Atlassian Document Format
799
+ data = {
800
+ "body": {
801
+ "type": "doc",
802
+ "version": 1,
803
+ "content": [
804
+ {
805
+ "type": "paragraph",
806
+ "content": [
807
+ {
808
+ "type": "text",
809
+ "text": comment.content
810
+ }
811
+ ]
812
+ }
813
+ ]
814
+ }
815
+ }
733
816
 
734
817
  # Add comment
735
818
  result = await self._make_request(
@@ -738,11 +821,7 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
738
821
 
739
822
  # Update comment with JIRA data
740
823
  comment.id = result.get("id")
741
- comment.created_at = (
742
- datetime.fromisoformat(result.get("created", "").replace("Z", "+00:00"))
743
- if result.get("created")
744
- else datetime.now()
745
- )
824
+ comment.created_at = parse_jira_datetime(result.get("created")) or datetime.now()
746
825
  comment.author = result.get("author", {}).get("displayName", comment.author)
747
826
  comment.metadata["jira"] = result
748
827
 
@@ -766,18 +845,16 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
766
845
  # Convert to Comment objects
767
846
  comments = []
768
847
  for comment_data in paginated:
848
+ # Extract text content from ADF format
849
+ body_content = comment_data.get("body", "")
850
+ text_content = extract_text_from_adf(body_content)
851
+
769
852
  comment = Comment(
770
853
  id=comment_data.get("id"),
771
854
  ticket_id=ticket_id,
772
855
  author=comment_data.get("author", {}).get("displayName", "Unknown"),
773
- content=comment_data.get("body", ""),
774
- created_at=(
775
- datetime.fromisoformat(
776
- comment_data.get("created", "").replace("Z", "+00:00")
777
- )
778
- if comment_data.get("created")
779
- else None
780
- ),
856
+ content=text_content,
857
+ created_at=parse_jira_datetime(comment_data.get("created")),
781
858
  metadata={"jira": comment_data},
782
859
  )
783
860
  comments.append(comment)
@@ -866,6 +943,61 @@ class JiraAdapter(BaseAdapter[Union[Epic, Task]]):
866
943
 
867
944
  return sprints_data.get("values", [])
868
945
 
946
+ async def get_project_users(self) -> List[Dict[str, Any]]:
947
+ """Get users who have access to the project."""
948
+ if not self.project_key:
949
+ return []
950
+
951
+ try:
952
+ # Get project role users
953
+ project_data = await self._make_request("GET", f"project/{self.project_key}")
954
+
955
+ # Get users from project roles
956
+ users = []
957
+ if "roles" in project_data:
958
+ for role_name, role_url in project_data["roles"].items():
959
+ # Extract role ID from URL
960
+ role_id = role_url.split("/")[-1]
961
+ try:
962
+ role_data = await self._make_request("GET", f"project/{self.project_key}/role/{role_id}")
963
+ if "actors" in role_data:
964
+ for actor in role_data["actors"]:
965
+ if actor.get("type") == "atlassian-user-role-actor":
966
+ users.append(actor.get("actorUser", {}))
967
+ except Exception:
968
+ # Skip if role access fails
969
+ continue
970
+
971
+ # Remove duplicates based on accountId
972
+ seen_ids = set()
973
+ unique_users = []
974
+ for user in users:
975
+ account_id = user.get("accountId")
976
+ if account_id and account_id not in seen_ids:
977
+ seen_ids.add(account_id)
978
+ unique_users.append(user)
979
+
980
+ return unique_users
981
+
982
+ except Exception:
983
+ # Fallback: try to get assignable users for the project
984
+ try:
985
+ users_data = await self._make_request(
986
+ "GET",
987
+ "user/assignable/search",
988
+ params={"project": self.project_key, "maxResults": 50}
989
+ )
990
+ return users_data if isinstance(users_data, list) else []
991
+ except Exception:
992
+ return []
993
+
994
+ async def get_current_user(self) -> Optional[Dict[str, Any]]:
995
+ """Get current authenticated user information."""
996
+ try:
997
+ return await self._make_request("GET", "myself")
998
+ except Exception:
999
+ return None
1000
+
869
1001
  async def close(self) -> None:
870
1002
  """Close the adapter and cleanup resources."""
871
1003
  # Clear caches
@@ -0,0 +1,24 @@
1
+ """Linear adapter for MCP Ticketer.
2
+
3
+ This module provides integration with Linear's GraphQL API for universal ticket management.
4
+ The adapter is split into multiple modules for better organization:
5
+
6
+ - adapter.py: Main LinearAdapter class with core functionality
7
+ - queries.py: GraphQL queries and fragments
8
+ - types.py: Linear-specific types and mappings
9
+ - client.py: GraphQL client management
10
+ - mappers.py: Data transformation between Linear and universal models
11
+
12
+ Usage:
13
+ from mcp_ticketer.adapters.linear import LinearAdapter
14
+
15
+ config = {
16
+ "api_key": "your_linear_api_key",
17
+ "team_id": "your_team_id"
18
+ }
19
+ adapter = LinearAdapter(config)
20
+ """
21
+
22
+ from .adapter import LinearAdapter
23
+
24
+ __all__ = ["LinearAdapter"]