xenfra-sdk 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,38 @@
1
+ # src/xenfra/dependencies.py
2
+
3
+ from fastapi import Depends, HTTPException, status
4
+ from fastapi.security import OAuth2PasswordBearer
5
+ from sqlmodel import Session, select
6
+ from xenfra.db.models import User
7
+ from xenfra.db.session import get_session
8
+ from xenfra.security import decode_token
9
+
10
+ oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/auth/token")
11
+
12
+
13
+ def get_current_user(
14
+ token: str = Depends(oauth2_scheme), session: Session = Depends(get_session)
15
+ ) -> User:
16
+ credentials_exception = HTTPException(
17
+ status_code=status.HTTP_401_UNAUTHORIZED,
18
+ detail="Could not validate credentials",
19
+ headers={"WWW-Authenticate": "Bearer"},
20
+ )
21
+ payload = decode_token(token)
22
+ if payload is None:
23
+ raise credentials_exception
24
+
25
+ email: str = payload.get("sub")
26
+ if email is None:
27
+ raise credentials_exception
28
+
29
+ user = session.exec(select(User).where(User.email == email)).first()
30
+ if user is None:
31
+ raise credentials_exception
32
+ return user
33
+
34
+
35
+ def get_current_active_user(current_user: User = Depends(get_current_user)) -> User:
36
+ if not current_user.is_active:
37
+ raise HTTPException(status_code=400, detail="Inactive user")
38
+ return current_user
@@ -0,0 +1,87 @@
1
+ import os
2
+ from pathlib import Path
3
+
4
+ from jinja2 import Environment, FileSystemLoader
5
+
6
+
7
+ def detect_framework(path="."):
8
+ """
9
+ Scans common Python project structures to guess the framework and entrypoint.
10
+ Returns: (framework_name, default_port, start_command) or (None, None, None)
11
+ """
12
+ project_root = Path(path).resolve()
13
+
14
+ # Check for Django first (common pattern: manage.py in root)
15
+ if (project_root / "manage.py").is_file():
16
+ project_name = project_root.name
17
+ return "django", 8000, f"gunicorn {project_name}.wsgi:application --bind 0.0.0.0:8000"
18
+
19
+ candidate_files = []
20
+
21
+ # Check directly in project root
22
+ for name in ["main.py", "app.py"]:
23
+ if (project_root / name).is_file():
24
+ candidate_files.append(project_root / name)
25
+
26
+ # Check in src/*/ (standard package layout)
27
+ for src_dir in project_root.glob("src/*"):
28
+ if src_dir.is_dir():
29
+ for name in ["main.py", "app.py"]:
30
+ if (src_dir / name).is_file():
31
+ candidate_files.append(src_dir / name)
32
+
33
+ for file_path in candidate_files:
34
+ with open(file_path, "r") as f:
35
+ content = f.read()
36
+
37
+ module_name = str(file_path.relative_to(project_root)).replace(os.sep, ".")[:-3]
38
+ if module_name.startswith("src."):
39
+ module_name = module_name[4:]
40
+
41
+ if "FastAPI" in content:
42
+ return "fastapi", 8000, f"uvicorn {module_name}:app --host 0.0.0.0 --port 8000"
43
+
44
+ if "Flask" in content:
45
+ return "flask", 5000, f"gunicorn {module_name}:app -b 0.0.0.0:5000"
46
+
47
+ return None, None, None
48
+
49
+
50
+ def generate_templated_assets(context: dict):
51
+ """
52
+ Generates deployment assets (Dockerfile, docker-compose.yml) using Jinja2 templates.
53
+
54
+ Args:
55
+ context: A dictionary containing information for rendering templates,
56
+ e.g., {'database': 'postgres', 'python_version': 'python:3.11-slim'}
57
+ """
58
+ # Path to the templates directory
59
+ template_dir = Path(__file__).parent / "templates"
60
+ env = Environment(loader=FileSystemLoader(template_dir))
61
+
62
+ # Detect framework specifics
63
+ framework, port, command = detect_framework()
64
+ if not framework:
65
+ print("Warning: No recognizable web framework detected.")
66
+ return []
67
+
68
+ # Merge detected context with provided context
69
+ render_context = {"port": port, "command": command, **context}
70
+
71
+ generated_files = []
72
+
73
+ # --- 1. Dockerfile ---
74
+ dockerfile_template = env.get_template("Dockerfile.j2")
75
+ dockerfile_content = dockerfile_template.render(render_context)
76
+ with open("Dockerfile", "w") as f:
77
+ f.write(dockerfile_content)
78
+ generated_files.append("Dockerfile")
79
+
80
+ # --- 2. docker-compose.yml ---
81
+ compose_template = env.get_template("docker-compose.yml.j2")
82
+ compose_content = compose_template.render(render_context)
83
+ with open("docker-compose.yml", "w") as f:
84
+ f.write(compose_content)
85
+ generated_files.append("docker-compose.yml")
86
+
87
+ return generated_files
xenfra_sdk/engine.py ADDED
@@ -0,0 +1,388 @@
1
+ # src/xenfra/engine.py
2
+
3
+ import os
4
+ import time
5
+ from datetime import datetime
6
+ from pathlib import Path
7
+ from typing import Optional
8
+
9
+ import digitalocean
10
+ import fabric
11
+ from dotenv import load_dotenv
12
+ from sqlmodel import Session, select
13
+
14
+ # Xenfra modules
15
+ from . import dockerizer, recipes
16
+ from .db.models import Project
17
+ from .db.session import get_session
18
+
19
+
20
+ class DeploymentError(Exception):
21
+ """Custom exception for deployment failures."""
22
+
23
+ def __init__(self, message, stage="Unknown"):
24
+ self.message = message
25
+ self.stage = stage
26
+ super().__init__(f"Deployment failed at stage '{stage}': {message}")
27
+
28
+
29
+ class InfraEngine:
30
+ """
31
+ The InfraEngine is the core of Xenfra. It handles all interactions
32
+ with the cloud provider and orchestrates the deployment lifecycle.
33
+ """
34
+
35
+ def __init__(self, token: str = None, db_session: Session = None):
36
+ """
37
+ Initializes the engine and validates the API token.
38
+ """
39
+ load_dotenv()
40
+ self.token = token or os.getenv("DIGITAL_OCEAN_TOKEN")
41
+ self.db_session = db_session or next(get_session())
42
+
43
+ if not self.token:
44
+ raise ValueError(
45
+ "DigitalOcean API token not found. Please set the DIGITAL_OCEAN_TOKEN environment variable."
46
+ )
47
+ try:
48
+ self.manager = digitalocean.Manager(token=self.token)
49
+ self.get_user_info()
50
+ except Exception as e:
51
+ raise ConnectionError(f"Failed to connect to DigitalOcean: {e}")
52
+
53
+ def _get_connection(self, ip_address: str):
54
+ """Establishes a Fabric connection to the server."""
55
+ private_key_path = str(Path.home() / ".ssh" / "id_rsa")
56
+ if not Path(private_key_path).exists():
57
+ raise DeploymentError("No private SSH key found at ~/.ssh/id_rsa.", stage="Setup")
58
+
59
+ return fabric.Connection(
60
+ host=ip_address,
61
+ user="root",
62
+ connect_kwargs={"key_filename": [private_key_path]},
63
+ )
64
+
65
+ def get_user_info(self):
66
+ """Retrieves user account information."""
67
+ return self.manager.get_account()
68
+
69
+ def list_servers(self):
70
+ """Retrieves a list of all Droplets."""
71
+ return self.manager.get_all_droplets()
72
+
73
+ def destroy_server(self, droplet_id: int, db_session: Session = None):
74
+ """Destroys a Droplet by its ID and removes it from the local DB."""
75
+ session = db_session or self.db_session
76
+
77
+ # Find the project in the local DB
78
+ statement = select(Project).where(Project.droplet_id == droplet_id)
79
+ project_to_delete = session.exec(statement).first()
80
+
81
+ # Destroy the droplet on DigitalOcean
82
+ droplet = digitalocean.Droplet(token=self.token, id=droplet_id)
83
+ droplet.destroy()
84
+
85
+ # If it was in our DB, delete it
86
+ if project_to_delete:
87
+ session.delete(project_to_delete)
88
+ session.commit()
89
+
90
+ def list_projects_from_db(self, db_session: Session = None):
91
+ """Lists all projects from the local database."""
92
+ session = db_session or self.db_session
93
+ statement = select(Project)
94
+ return session.exec(statement).all()
95
+
96
+ def sync_with_provider(self, db_session: Session = None):
97
+ """Reconciles the local database with the live state from DigitalOcean."""
98
+ session = db_session or self.db_session
99
+
100
+ # 1. Get live and local states
101
+ live_droplets = self.manager.get_all_droplets(tag_name="xenfra")
102
+ local_projects = self.list_projects_from_db(session)
103
+
104
+ live_map = {d.id: d for d in live_droplets}
105
+ local_map = {p.droplet_id: p for p in local_projects}
106
+
107
+ # 2. Reconcile
108
+ # Add new servers found on DO to our DB
109
+ for droplet_id, droplet in live_map.items():
110
+ if droplet_id not in local_map:
111
+ new_project = Project(
112
+ droplet_id=droplet.id,
113
+ name=droplet.name,
114
+ ip_address=droplet.ip_address,
115
+ status=droplet.status,
116
+ region=droplet.region["slug"],
117
+ size=droplet.size_slug,
118
+ )
119
+ session.add(new_project)
120
+
121
+ # Remove servers from our DB that no longer exist on DO
122
+ for project_id, project in local_map.items():
123
+ if project_id not in live_map:
124
+ session.delete(project)
125
+
126
+ session.commit()
127
+ return self.list_projects_from_db(session)
128
+
129
+ def stream_logs(self, droplet_id: int, db_session: Session = None):
130
+ """
131
+ Verifies a server exists and streams its logs in real-time.
132
+ """
133
+ session = db_session or self.db_session
134
+
135
+ # 1. Find project in local DB
136
+ statement = select(Project).where(Project.droplet_id == droplet_id)
137
+ project = session.exec(statement).first()
138
+ if not project:
139
+ raise DeploymentError(
140
+ f"Project with Droplet ID {droplet_id} not found in local database.",
141
+ stage="Log Streaming",
142
+ )
143
+
144
+ # 2. Just-in-Time Verification
145
+ try:
146
+ droplet = self.manager.get_droplet(droplet_id)
147
+ except digitalocean.baseapi.DataReadError as e:
148
+ if e.response.status_code == 404:
149
+ # The droplet doesn't exist, so remove it from our DB
150
+ session.delete(project)
151
+ session.commit()
152
+ raise DeploymentError(
153
+ f"Server '{project.name}' (ID: {droplet_id}) no longer exists on DigitalOcean. It has been removed from your local list.",
154
+ stage="Log Streaming",
155
+ )
156
+ else:
157
+ raise e
158
+
159
+ # 3. Stream logs
160
+ ip_address = droplet.ip_address
161
+ with self._get_connection(ip_address) as conn:
162
+ conn.run("cd /root/app && docker-compose logs -f app", pty=True)
163
+
164
+ def get_account_balance(self) -> dict:
165
+ """
166
+ Retrieves the current account balance from DigitalOcean.
167
+ Placeholder: Actual implementation needed.
168
+ """
169
+ # In a real scenario, this would call the DigitalOcean API for billing info
170
+ # For now, return mock data
171
+ return {
172
+ "month_to_date_balance": "0.00",
173
+ "account_balance": "0.00",
174
+ "month_to_date_usage": "0.00",
175
+ "generated_at": datetime.now().isoformat(),
176
+ }
177
+
178
+ def get_droplet_cost_estimates(self) -> list:
179
+ """
180
+ Retrieves a list of Xenfra-managed DigitalOcean droplets with their estimated monthly costs.
181
+ Placeholder: Actual implementation needed.
182
+ """
183
+ # In a real scenario, this would list droplets and calculate costs
184
+ # For now, return mock data
185
+ return []
186
+
187
+ def _ensure_ssh_key(self, logger):
188
+ """Ensures a local public SSH key is on DigitalOcean."""
189
+ pub_key_path = Path.home() / ".ssh" / "id_rsa.pub"
190
+ if not pub_key_path.exists():
191
+ raise DeploymentError(
192
+ "No SSH key found at ~/.ssh/id_rsa.pub. Please generate one.", stage="Setup"
193
+ )
194
+
195
+ with open(pub_key_path) as f:
196
+ pub_key_content = f.read()
197
+
198
+ existing_keys = self.manager.get_all_sshkeys()
199
+ for key in existing_keys:
200
+ if key.public_key.strip() == pub_key_content.strip():
201
+ logger(" - Found existing SSH key on DigitalOcean.")
202
+ return key
203
+
204
+ logger(" - No matching SSH key found. Creating a new one on DigitalOcean...")
205
+ key = digitalocean.SSHKey(
206
+ token=self.token, name="xenfra-cli-key", public_key=pub_key_content
207
+ )
208
+ key.create()
209
+ return key
210
+
211
+ def deploy_server(
212
+ self,
213
+ name: str,
214
+ region: str,
215
+ size: str,
216
+ image: str,
217
+ logger: callable,
218
+ user_id: int,
219
+ email: str,
220
+ domain: Optional[str] = None,
221
+ repo_url: Optional[str] = None,
222
+ db_session: Session = None,
223
+ **kwargs,
224
+ ):
225
+ """A stateful, blocking orchestrator for deploying a new server."""
226
+ droplet = None
227
+ session = db_session or self.db_session
228
+ try:
229
+ # === 1. SETUP STAGE ===
230
+ logger("\n[bold blue]PHASE 1: SETUP[/bold blue]")
231
+ ssh_key = self._ensure_ssh_key(logger)
232
+
233
+ # === 2. ASSET GENERATION STAGE ===
234
+ logger("\n[bold blue]PHASE 2: GENERATING DEPLOYMENT ASSETS[/bold blue]")
235
+ context = {
236
+ "email": email,
237
+ "domain": domain,
238
+ "repo_url": repo_url,
239
+ **kwargs, # Pass db config, etc.
240
+ }
241
+ files = dockerizer.generate_templated_assets(context)
242
+ for file in files:
243
+ logger(f" - Generated {file}")
244
+
245
+ # === 3. CLOUD-INIT STAGE ===
246
+ logger("\n[bold blue]PHASE 3: CREATING SERVER SETUP SCRIPT[/bold blue]")
247
+ cloud_init_script = recipes.generate_stack(context)
248
+ logger(" - Generated cloud-init script.")
249
+ logger(
250
+ f"--- Cloud-init script content ---\n{cloud_init_script}\n---------------------------------"
251
+ )
252
+
253
+ # === 4. DROPLET CREATION STAGE ===
254
+ logger("\n[bold blue]PHASE 4: PROVISIONING SERVER[/bold blue]")
255
+ droplet = digitalocean.Droplet(
256
+ token=self.token,
257
+ name=name,
258
+ region=region,
259
+ image=image,
260
+ size_slug=size,
261
+ ssh_keys=[ssh_key],
262
+ userdata=cloud_init_script,
263
+ tags=["xenfra"],
264
+ )
265
+ droplet.create()
266
+ logger(
267
+ f" - Droplet '{name}' creation initiated (ID: {droplet.id}). Waiting for it to become active..."
268
+ )
269
+
270
+ # === 5. POLLING STAGE ===
271
+ logger("\n[bold blue]PHASE 5: WAITING FOR SERVER SETUP[/bold blue]")
272
+ while True:
273
+ droplet.load()
274
+ if droplet.status == "active":
275
+ logger(" - Droplet is active. Waiting for SSH to be available...")
276
+ break
277
+ time.sleep(10)
278
+
279
+ ip_address = droplet.ip_address
280
+
281
+ # Retry SSH connection
282
+ conn = None
283
+ max_retries = 12 # 2-minute timeout for SSH
284
+ for i in range(max_retries):
285
+ try:
286
+ logger(f" - Attempting SSH connection ({i + 1}/{max_retries})...")
287
+ conn = self._get_connection(ip_address)
288
+ conn.open() # Explicitly open the connection
289
+ logger(" - SSH connection established.")
290
+ break
291
+ except Exception as e:
292
+ if i < max_retries - 1:
293
+ logger(" - SSH connection failed. Retrying in 10s...")
294
+ time.sleep(10)
295
+ else:
296
+ raise DeploymentError(
297
+ f"Failed to establish SSH connection: {e}", stage="Polling"
298
+ )
299
+
300
+ if not conn or not conn.is_connected:
301
+ raise DeploymentError("Could not establish SSH connection.", stage="Polling")
302
+
303
+ with conn:
304
+ for i in range(30): # 5-minute timeout for cloud-init
305
+ if conn.run("test -f /root/setup_complete", warn=True).ok:
306
+ logger(" - Cloud-init setup complete.")
307
+ break
308
+ time.sleep(10)
309
+ else:
310
+ raise DeploymentError(
311
+ "Server setup script failed to complete in time.", stage="Polling"
312
+ )
313
+
314
+ # === 6. CODE UPLOAD STAGE ===
315
+ logger("\n[bold blue]PHASE 6: UPLOADING APPLICATION CODE[/bold blue]")
316
+ with self._get_connection(ip_address) as conn:
317
+ # If repo_url is provided, clone it instead of uploading local code
318
+ if repo_url:
319
+ logger(f" - Cloning repository from {repo_url}...")
320
+ conn.run(f"git clone {repo_url} /root/app")
321
+ else:
322
+ fabric.transfer.Transfer(conn).upload(
323
+ ".", "/root/app", exclude=[".git", ".venv", "__pycache__"]
324
+ )
325
+ logger(" - Code upload complete.")
326
+
327
+ # === 7. FINAL DEPLOY STAGE ===
328
+ logger("\n[bold blue]PHASE 7: BUILDING AND DEPLOYING CONTAINERS[/bold blue]")
329
+ with self._get_connection(ip_address) as conn:
330
+ result = conn.run("cd /root/app && docker-compose up -d --build", hide=True)
331
+ if result.failed:
332
+ raise DeploymentError(f"docker-compose failed: {result.stderr}", stage="Deploy")
333
+ logger(" - Docker containers are building in the background...")
334
+
335
+ # === 8. VERIFICATION STAGE ===
336
+ logger("\n[bold blue]PHASE 8: VERIFYING DEPLOYMENT[/bold blue]")
337
+ app_port = context.get("port", 8000)
338
+ for i in range(24): # 2-minute timeout for health checks
339
+ logger(f" - Health check attempt {i + 1}/24...")
340
+ with self._get_connection(ip_address) as conn:
341
+ # Check if container is running
342
+ ps_result = conn.run("cd /root/app && docker-compose ps", hide=True)
343
+ if "running" not in ps_result.stdout:
344
+ time.sleep(5)
345
+ continue
346
+
347
+ # Check if application is responsive
348
+ curl_result = conn.run(
349
+ f"curl -s --fail http://localhost:{app_port}/", warn=True
350
+ )
351
+ if curl_result.ok:
352
+ logger(
353
+ "[bold green] - Health check passed! Application is live.[/bold green]"
354
+ )
355
+
356
+ # === 9. PERSISTENCE STAGE ===
357
+ logger("\n[bold blue]PHASE 9: SAVING DEPLOYMENT TO DATABASE[/bold blue]")
358
+ project = Project(
359
+ droplet_id=droplet.id,
360
+ name=droplet.name,
361
+ ip_address=ip_address,
362
+ status=droplet.status,
363
+ region=droplet.region["slug"],
364
+ size=droplet.size_slug,
365
+ user_id=user_id, # Save the user_id
366
+ )
367
+ session.add(project)
368
+ session.commit()
369
+ logger(" - Deployment saved.")
370
+
371
+ return droplet # Return the full droplet object
372
+ time.sleep(5)
373
+ else:
374
+ # On failure, get logs and destroy droplet
375
+ with self._get_connection(ip_address) as conn:
376
+ logs = conn.run("cd /root/app && docker-compose logs", hide=True).stdout
377
+ raise DeploymentError(
378
+ f"Application failed to become healthy in time. Logs:\n{logs}",
379
+ stage="Verification",
380
+ )
381
+
382
+ except Exception as e:
383
+ if droplet:
384
+ logger(
385
+ f"[bold red]Deployment failed. The server '{droplet.name}' will NOT be cleaned up for debugging purposes.[/bold red]"
386
+ )
387
+ # droplet.destroy() # Commented out for debugging
388
+ raise e
@@ -0,0 +1,19 @@
1
+ class XenfraError(Exception):
2
+ """Base exception for all SDK errors."""
3
+
4
+ pass
5
+
6
+
7
+ class AuthenticationError(XenfraError):
8
+ """Raised for issues related to authentication."""
9
+
10
+ pass
11
+
12
+
13
+ class XenfraAPIError(XenfraError):
14
+ """Raised when the API returns a non-2xx status code."""
15
+
16
+ def __init__(self, status_code: int, detail: str):
17
+ self.status_code = status_code
18
+ self.detail = detail
19
+ super().__init__(f"API Error {status_code}: {detail}")
@@ -0,0 +1,154 @@
1
+ # src/xenfra/mcp_client.py
2
+
3
+ import base64
4
+ import json
5
+ import os
6
+ import subprocess
7
+ import tempfile
8
+ from pathlib import Path
9
+
10
+
11
+ class MCPClient:
12
+ """
13
+ A client for communicating with a local github-mcp-server process.
14
+
15
+ This client starts the MCP server as a subprocess and interacts with it
16
+ over stdin and stdout to download a full repository to a temporary directory.
17
+ """
18
+
19
+ def __init__(self, mcp_server_path="github-mcp-server"):
20
+ """
21
+ Initializes the MCPClient.
22
+
23
+ Args:
24
+ mcp_server_path (str): The path to the github-mcp-server executable.
25
+ Assumes it's in the system's PATH by default.
26
+ """
27
+ self.mcp_server_path = mcp_server_path
28
+ self.process = None
29
+
30
+ def _start_server(self):
31
+ """Starts the github-mcp-server subprocess."""
32
+ if self.process and self.process.poll() is None:
33
+ return
34
+ try:
35
+ self.process = subprocess.Popen(
36
+ [self.mcp_server_path],
37
+ stdin=subprocess.PIPE,
38
+ stdout=subprocess.PIPE,
39
+ stderr=subprocess.PIPE,
40
+ text=True,
41
+ env=os.environ,
42
+ )
43
+ except FileNotFoundError:
44
+ raise RuntimeError(
45
+ f"'{self.mcp_server_path}' not found. Ensure github-mcp-server is installed and in your PATH."
46
+ )
47
+ except Exception as e:
48
+ raise RuntimeError(f"Failed to start MCP server: {e}")
49
+
50
+ def _stop_server(self):
51
+ """Stops the MCP server process."""
52
+ if self.process:
53
+ self.process.terminate()
54
+ self.process.wait()
55
+ self.process = None
56
+
57
+ def _send_request(self, method: str, params: dict) -> dict:
58
+ """Sends a JSON-RPC request and returns the response."""
59
+ if not self.process or self.process.poll() is not None:
60
+ self._start_server()
61
+
62
+ request = {"jsonrpc": "2.0", "id": os.urandom(4).hex(), "method": method, "params": params}
63
+
64
+ try:
65
+ self.process.stdin.write(json.dumps(request) + "\n")
66
+ self.process.stdin.flush()
67
+ response_line = self.process.stdout.readline()
68
+ if not response_line:
69
+ error_output = self.process.stderr.read()
70
+ raise RuntimeError(f"MCP server closed stream unexpectedly. Error: {error_output}")
71
+
72
+ response = json.loads(response_line)
73
+ if "error" in response:
74
+ raise RuntimeError(f"MCP server returned an error: {response['error']}")
75
+ return response.get("result", {})
76
+ except (BrokenPipeError, json.JSONDecodeError) as e:
77
+ raise RuntimeError(f"Failed to communicate with MCP server: {e}")
78
+
79
+ def download_repo_to_tempdir(self, repo_url: str, commit_sha: str = "HEAD") -> str:
80
+ """
81
+ Downloads an entire repository at a specific commit to a local temporary directory.
82
+
83
+ Args:
84
+ repo_url (str): The full URL of the GitHub repository.
85
+ commit_sha (str): The commit SHA to download. Defaults to "HEAD".
86
+
87
+ Returns:
88
+ The path to the temporary directory containing the downloaded code.
89
+ """
90
+ try:
91
+ parts = repo_url.strip("/").split("/")
92
+ owner = parts[-2]
93
+ repo_name = parts[-1].replace(".git", "")
94
+ except IndexError:
95
+ raise ValueError(
96
+ "Invalid repository URL format. Expected format: https://github.com/owner/repo"
97
+ )
98
+
99
+ print(f" [MCP] Fetching file tree for {owner}/{repo_name} at {commit_sha}...")
100
+ tree_result = self._send_request(
101
+ method="git.get_repository_tree",
102
+ params={"owner": owner, "repo": repo_name, "tree_sha": commit_sha, "recursive": True},
103
+ )
104
+
105
+ tree = tree_result.get("tree", [])
106
+ if not tree:
107
+ raise RuntimeError("Could not retrieve repository file tree.")
108
+
109
+ temp_dir = tempfile.mkdtemp(prefix=f"xenfra_{repo_name}_")
110
+ print(f" [MCP] Downloading to temporary directory: {temp_dir}")
111
+
112
+ for item in tree:
113
+ item_path = item.get("path")
114
+ item_type = item.get("type")
115
+
116
+ if not item_path or item_type != "blob": # Only handle files
117
+ continue
118
+
119
+ # For downloading content, we can use the commit_sha in the 'ref' parameter
120
+ # to ensure we get the content from the correct version.
121
+ content_result = self._send_request(
122
+ method="repos.get_file_contents",
123
+ params={"owner": owner, "repo": repo_name, "path": item_path, "ref": commit_sha},
124
+ )
125
+
126
+ content_b64 = content_result.get("content")
127
+ if content_b64 is None:
128
+ print(f" [MCP] [Warning] Could not get content for {item_path}")
129
+ continue
130
+
131
+ try:
132
+ # Content is base64 encoded, with newlines.
133
+ decoded_content = base64.b64decode(content_b64.replace("\n", ""))
134
+ except (base64.binascii.Error, TypeError):
135
+ print(f" [MCP] [Warning] Could not decode content for {item_path}")
136
+ continue
137
+
138
+ # Create file and parent directories in the temp location
139
+ local_file_path = Path(temp_dir) / item_path
140
+ local_file_path.parent.mkdir(parents=True, exist_ok=True)
141
+
142
+ # Write the file content
143
+ with open(local_file_path, "wb") as f:
144
+ f.write(decoded_content)
145
+
146
+ print(" [MCP] ✅ Repository download complete.")
147
+ return temp_dir
148
+
149
+ def __enter__(self):
150
+ self._start_server()
151
+ return self
152
+
153
+ def __exit__(self, exc_type, exc_val, exc_tb):
154
+ self._stop_server()