xenfra-sdk 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +21 -0
- xenfra_sdk/cli/__init__.py +0 -0
- xenfra_sdk/cli/main.py +226 -0
- xenfra_sdk/client.py +69 -0
- xenfra_sdk/client_with_hooks.py +275 -0
- xenfra_sdk/config.py +26 -0
- xenfra_sdk/db/__init__.py +0 -0
- xenfra_sdk/db/models.py +27 -0
- xenfra_sdk/db/session.py +30 -0
- xenfra_sdk/dependencies.py +38 -0
- xenfra_sdk/dockerizer.py +87 -0
- xenfra_sdk/engine.py +388 -0
- xenfra_sdk/exceptions.py +19 -0
- xenfra_sdk/mcp_client.py +154 -0
- xenfra_sdk/models.py +170 -0
- xenfra_sdk/patterns.json +14 -0
- xenfra_sdk/privacy.py +118 -0
- xenfra_sdk/recipes.py +25 -0
- xenfra_sdk/resources/__init__.py +0 -0
- xenfra_sdk/resources/base.py +3 -0
- xenfra_sdk/resources/deployments.py +83 -0
- xenfra_sdk/resources/intelligence.py +102 -0
- xenfra_sdk/resources/projects.py +95 -0
- xenfra_sdk/security.py +41 -0
- xenfra_sdk/templates/Dockerfile.j2 +25 -0
- xenfra_sdk/templates/cloud-init.sh.j2 +68 -0
- xenfra_sdk/templates/docker-compose.yml.j2 +33 -0
- xenfra_sdk/utils.py +70 -0
- xenfra_sdk-0.1.0.dist-info/METADATA +118 -0
- xenfra_sdk-0.1.0.dist-info/RECORD +31 -0
- xenfra_sdk-0.1.0.dist-info/WHEEL +4 -0
xenfra_sdk/models.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic models for the Xenfra SDK, representing API request and response data structures.
|
|
3
|
+
These models are used for data validation, serialization, and providing clear schemas
|
|
4
|
+
for external tools like OpenAI function calling.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from enum import Enum
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DeploymentStatus(str, Enum):
|
|
14
|
+
"""
|
|
15
|
+
Represents the possible statuses of a deployment.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
PENDING = "pending"
|
|
19
|
+
IN_PROGRESS = "in_progress"
|
|
20
|
+
SUCCESS = "success"
|
|
21
|
+
FAILED = "failed"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class SourceType(str, Enum):
|
|
25
|
+
"""
|
|
26
|
+
Represents the source type of a deployment.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
LOCAL = "local"
|
|
30
|
+
GIT = "git"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class Deployment(BaseModel):
|
|
34
|
+
"""
|
|
35
|
+
Represents a single deployment instance.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
id: str = Field(..., description="Unique identifier for the deployment")
|
|
39
|
+
projectId: str = Field(..., description="Identifier of the project being deployed")
|
|
40
|
+
status: DeploymentStatus = Field(..., description="Current status of the deployment")
|
|
41
|
+
source: str = Field(..., description="Source of the deployment (e.g., 'cli', 'api')")
|
|
42
|
+
created_at: datetime = Field(..., description="Timestamp when the deployment was created")
|
|
43
|
+
finished_at: datetime | None = Field(None, description="Timestamp when the deployment finished")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class DeploymentRecord(BaseModel):
|
|
47
|
+
"""
|
|
48
|
+
Represents a record of a completed deployment.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
deployment_id: str = Field(..., description="Unique identifier for this deployment instance.")
|
|
52
|
+
timestamp: datetime = Field(..., description="Timestamp of when the deployment succeeded.")
|
|
53
|
+
source_type: SourceType = Field(..., description="The type of the source code (local or git).")
|
|
54
|
+
source_identifier: str = Field(
|
|
55
|
+
...,
|
|
56
|
+
description="The identifier for the source (commit SHA for git, archive path for local).",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class BalanceRead(BaseModel):
|
|
61
|
+
"""
|
|
62
|
+
Represents a snapshot of the user's account balance and usage.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
month_to_date_balance: str = Field(
|
|
66
|
+
..., description="The account balance in USD at the beginning of the month."
|
|
67
|
+
)
|
|
68
|
+
account_balance: str = Field(..., description="The current total account balance in USD.")
|
|
69
|
+
month_to_date_usage: str = Field(
|
|
70
|
+
..., description="The total usage in USD for the current month."
|
|
71
|
+
)
|
|
72
|
+
generated_at: str = Field(
|
|
73
|
+
..., description="The timestamp when this balance report was generated."
|
|
74
|
+
)
|
|
75
|
+
error: str | None = Field(
|
|
76
|
+
None, description="Any error message associated with fetching the balance."
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class DropletCostRead(BaseModel):
|
|
81
|
+
"""
|
|
82
|
+
Represents the cost information for a single DigitalOcean Droplet.
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
id: int = Field(..., description="The unique identifier for the Droplet.")
|
|
86
|
+
name: str = Field(..., description="The user-given name of the Droplet.")
|
|
87
|
+
ip_address: str = Field(..., description="The public IP address of the Droplet.")
|
|
88
|
+
status: str = Field(
|
|
89
|
+
..., description="The current status of the Droplet (e.g., 'active', 'off')."
|
|
90
|
+
)
|
|
91
|
+
size_slug: str = Field(
|
|
92
|
+
..., description="The size slug representing the Droplet's resources (e.g., 's-1vcpu-1gb')."
|
|
93
|
+
)
|
|
94
|
+
monthly_price: float = Field(..., description="The monthly cost of this Droplet in USD.")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class ProjectRead(BaseModel):
|
|
98
|
+
"""
|
|
99
|
+
Represents a project, including its deployment status and estimated costs.
|
|
100
|
+
"""
|
|
101
|
+
|
|
102
|
+
id: int = Field(..., description="The unique identifier for the project.")
|
|
103
|
+
name: str = Field(..., description="The user-given name of the project.")
|
|
104
|
+
ip_address: str | None = Field(
|
|
105
|
+
None, description="The public IP address of the server running the project."
|
|
106
|
+
)
|
|
107
|
+
status: str = Field(
|
|
108
|
+
..., description="The current status of the project (e.g., 'LIVE', 'FAILED')."
|
|
109
|
+
)
|
|
110
|
+
region: str = Field(
|
|
111
|
+
..., description="The geographical region where the project is deployed (e.g., 'nyc3')."
|
|
112
|
+
)
|
|
113
|
+
size_slug: str = Field(..., description="The size slug of the server running the project.")
|
|
114
|
+
estimated_monthly_cost: float | None = Field(
|
|
115
|
+
None, description="The estimated monthly cost of the project's infrastructure in USD."
|
|
116
|
+
)
|
|
117
|
+
created_at: datetime = Field(..., description="The timestamp when the project was created.")
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
# Intelligence Service Models
|
|
121
|
+
|
|
122
|
+
class PatchObject(BaseModel):
|
|
123
|
+
"""
|
|
124
|
+
Represents a structured patch for a configuration file.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
file: str | None = Field(None, description="The name of the file to be patched (e.g., 'requirements.txt')")
|
|
128
|
+
operation: str | None = Field(None, description="The patch operation (e.g., 'add', 'replace')")
|
|
129
|
+
path: str | None = Field(None, description="A JSON-like path to the field to be changed")
|
|
130
|
+
value: str | None = Field(None, description="The new value to apply")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
class DiagnosisResponse(BaseModel):
|
|
134
|
+
"""
|
|
135
|
+
Response from the AI diagnosis endpoint.
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
diagnosis: str = Field(..., description="Human-readable explanation of the problem")
|
|
139
|
+
suggestion: str = Field(..., description="Recommended course of action")
|
|
140
|
+
patch: PatchObject | None = Field(None, description="Optional machine-applicable patch")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class PackageManagerOption(BaseModel):
|
|
144
|
+
"""
|
|
145
|
+
Represents a detected package manager option.
|
|
146
|
+
"""
|
|
147
|
+
|
|
148
|
+
manager: str = Field(..., description="Package manager name (uv, pip, poetry, npm, etc.)")
|
|
149
|
+
file: str = Field(..., description="Associated dependency file")
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class CodebaseAnalysisResponse(BaseModel):
|
|
153
|
+
"""
|
|
154
|
+
Response from the codebase analysis endpoint.
|
|
155
|
+
"""
|
|
156
|
+
|
|
157
|
+
framework: str = Field(..., description="Detected framework (fastapi, flask, django)")
|
|
158
|
+
port: int = Field(..., description="Detected application port")
|
|
159
|
+
database: str = Field(..., description="Detected database (postgresql, mysql, sqlite, none)")
|
|
160
|
+
cache: str | None = Field(None, description="Detected cache (redis, memcached, none)")
|
|
161
|
+
workers: list[str] | None = Field(None, description="Detected background workers (celery, rq)")
|
|
162
|
+
env_vars: list[str] | None = Field(None, description="Required environment variables")
|
|
163
|
+
package_manager: str = Field(..., description="Detected package manager (uv, pip, poetry, npm, pnpm, yarn, go, bundler)")
|
|
164
|
+
dependency_file: str = Field(..., description="Dependency manifest file (pyproject.toml, requirements.txt, package.json, go.mod, Gemfile)")
|
|
165
|
+
has_conflict: bool = Field(False, description="True if multiple package managers detected")
|
|
166
|
+
detected_package_managers: list[PackageManagerOption] | None = Field(None, description="All detected package managers (if conflict)")
|
|
167
|
+
instance_size: str = Field(..., description="Recommended instance size (basic, standard, premium)")
|
|
168
|
+
estimated_cost_monthly: float = Field(..., description="Estimated monthly cost in USD")
|
|
169
|
+
confidence: float = Field(..., description="Confidence score (0.0-1.0)")
|
|
170
|
+
notes: str | None = Field(None, description="Additional observations")
|
xenfra_sdk/patterns.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
{
|
|
2
|
+
"redaction_patterns": [
|
|
3
|
+
"dop_v1_[a-f0-9]{64}",
|
|
4
|
+
"[sp]k_live_[a-zA-Z0-9]{24,}",
|
|
5
|
+
"[sp]k_test_[a-zA-Z0-9]{24,}",
|
|
6
|
+
"(https?://)[^\\s:]+:[^\\s@]+",
|
|
7
|
+
"Bearer\\s[a-zA-Z0-9\\._\\-]{20,}",
|
|
8
|
+
"(password|pwd|pass)=([^\\s&]+)",
|
|
9
|
+
"AKIA[0-9A-Z]{16}",
|
|
10
|
+
"[0-9a-zA-Z/+]{40}",
|
|
11
|
+
"\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b",
|
|
12
|
+
"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}"
|
|
13
|
+
]
|
|
14
|
+
}
|
xenfra_sdk/privacy.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module contains the Privacy Scrubber for the Xenfra SDK.
|
|
3
|
+
Its purpose is to redact sensitive information from logs or other text
|
|
4
|
+
before it is sent to diagnostic endpoints, upholding privacy-first principles.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import re
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import List, Optional
|
|
11
|
+
|
|
12
|
+
import httpx # For fetching patterns from URL
|
|
13
|
+
|
|
14
|
+
# Path to the patterns file within the SDK
|
|
15
|
+
_PATTERNS_FILE_PATH = Path(__file__).parent / "patterns.json"
|
|
16
|
+
_REDACTION_PLACEHOLDER = "[REDACTED]"
|
|
17
|
+
_CACHED_PATTERNS: List[re.Pattern] = []
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _load_patterns_from_file(file_path: Path) -> List[str]:
|
|
21
|
+
"""Loads raw regex patterns from a JSON file."""
|
|
22
|
+
if not file_path.exists():
|
|
23
|
+
print(
|
|
24
|
+
f"Warning: Patterns file not found at {file_path}. No patterns will be used for scrubbing."
|
|
25
|
+
)
|
|
26
|
+
return []
|
|
27
|
+
try:
|
|
28
|
+
with open(file_path, "r") as f:
|
|
29
|
+
config = json.load(f)
|
|
30
|
+
return config.get("redaction_patterns", [])
|
|
31
|
+
except json.JSONDecodeError as e:
|
|
32
|
+
print(f"Error decoding patterns.json: {e}. Falling back to empty patterns.")
|
|
33
|
+
return []
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
async def _refresh_patterns_from_url(url: str) -> Optional[List[str]]:
|
|
37
|
+
"""
|
|
38
|
+
Fetches updated patterns from a URL asynchronously.
|
|
39
|
+
"""
|
|
40
|
+
try:
|
|
41
|
+
async with httpx.AsyncClient() as client:
|
|
42
|
+
response = await client.get(url, timeout=5.0)
|
|
43
|
+
response.raise_for_status()
|
|
44
|
+
config = response.json()
|
|
45
|
+
return config.get("redaction_patterns", [])
|
|
46
|
+
except httpx.RequestError as e:
|
|
47
|
+
print(f"Error fetching patterns from {url}: {e}")
|
|
48
|
+
return None
|
|
49
|
+
except json.JSONDecodeError as e:
|
|
50
|
+
print(f"Error decoding JSON from patterns URL {url}: {e}")
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
async def initialize_scrubber(refresh_from_url: Optional[str] = None):
|
|
55
|
+
"""
|
|
56
|
+
Initializes or refreshes the scrubber patterns.
|
|
57
|
+
Can optionally fetch patterns from a URL. This should be called on app startup.
|
|
58
|
+
"""
|
|
59
|
+
global _CACHED_PATTERNS
|
|
60
|
+
raw_patterns = []
|
|
61
|
+
|
|
62
|
+
if refresh_from_url:
|
|
63
|
+
refreshed = await _refresh_patterns_from_url(refresh_from_url)
|
|
64
|
+
if refreshed:
|
|
65
|
+
raw_patterns = refreshed
|
|
66
|
+
|
|
67
|
+
if not raw_patterns: # Fallback to file if no refresh URL or refresh failed
|
|
68
|
+
raw_patterns = _load_patterns_from_file(_PATTERNS_FILE_PATH)
|
|
69
|
+
|
|
70
|
+
_CACHED_PATTERNS = [re.compile(p, re.IGNORECASE) for p in raw_patterns]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
# Initialize patterns on module load (synchronously for initial load)
|
|
74
|
+
# For dynamic refresh, initialize_scrubber should be called during app startup
|
|
75
|
+
_raw_initial_patterns = _load_patterns_from_file(_PATTERNS_FILE_PATH)
|
|
76
|
+
_CACHED_PATTERNS = [re.compile(p, re.IGNORECASE) for p in _raw_initial_patterns]
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def scrub_logs(logs: str) -> str:
|
|
80
|
+
"""
|
|
81
|
+
Redacts sensitive information from log strings using loaded patterns.
|
|
82
|
+
"""
|
|
83
|
+
if not logs:
|
|
84
|
+
return logs
|
|
85
|
+
|
|
86
|
+
scrubbed_logs = logs
|
|
87
|
+
for pattern_re in _CACHED_PATTERNS:
|
|
88
|
+
scrubbed_logs = pattern_re.sub(_REDACTION_PLACEHOLDER, scrubbed_logs)
|
|
89
|
+
|
|
90
|
+
return scrubbed_logs
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
# Example Usage
|
|
95
|
+
test_logs = """
|
|
96
|
+
Deployment failed. Error: Authentication failed with token dop_v1_abcdefghijklmnopqrstuvwxyz1234567890abcdef.
|
|
97
|
+
Connecting to database at postgres://user:mypassword@127.0.0.1:5432/mydb.
|
|
98
|
+
Received request from 192.168.1.100. User: test@example.com.
|
|
99
|
+
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.
|
|
100
|
+
eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.
|
|
101
|
+
SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c.
|
|
102
|
+
AWS Secret: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
# Test with file-based patterns
|
|
106
|
+
print("--- Original Logs ---")
|
|
107
|
+
print(test_logs)
|
|
108
|
+
print("\n--- Scrubbed Logs (from file) ---")
|
|
109
|
+
scrubbed_logs_from_file = scrub_logs(test_logs)
|
|
110
|
+
print(scrubbed_logs_from_file)
|
|
111
|
+
|
|
112
|
+
# Example of refreshing (conceptual)
|
|
113
|
+
# import asyncio
|
|
114
|
+
# async def demo_refresh():
|
|
115
|
+
# await initialize_scrubber(refresh_from_url="http://example.com/new-patterns.json")
|
|
116
|
+
# print("\n--- Scrubbed Logs (after conceptual refresh) ---")
|
|
117
|
+
# print(scrub_logs(test_logs))
|
|
118
|
+
# asyncio.run(demo_refresh())
|
xenfra_sdk/recipes.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
|
|
3
|
+
from jinja2 import Environment, FileSystemLoader
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def generate_stack(context: dict):
|
|
7
|
+
"""
|
|
8
|
+
Generates a cloud-init startup script from a Jinja2 template.
|
|
9
|
+
|
|
10
|
+
Args:
|
|
11
|
+
context: A dictionary containing information for rendering the template,
|
|
12
|
+
e.g., {'domain': 'example.com', 'email': 'user@example.com'}
|
|
13
|
+
"""
|
|
14
|
+
# Path to the templates directory
|
|
15
|
+
template_dir = Path(__file__).parent / "templates"
|
|
16
|
+
env = Environment(loader=FileSystemLoader(template_dir))
|
|
17
|
+
|
|
18
|
+
template = env.get_template("cloud-init.sh.j2")
|
|
19
|
+
|
|
20
|
+
# The non-dockerized logic has been removed as we are focusing on
|
|
21
|
+
# a purely Docker-based deployment strategy for simplicity and scalability.
|
|
22
|
+
# The context will contain all necessary variables for the template.
|
|
23
|
+
script = template.render(context)
|
|
24
|
+
|
|
25
|
+
return script
|
|
File without changes
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
# Import Deployment model when it's defined in models.py
|
|
4
|
+
# from ..models import Deployment
|
|
5
|
+
from ..exceptions import XenfraAPIError, XenfraError # Add XenfraError
|
|
6
|
+
from .base import BaseManager
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class DeploymentsManager(BaseManager):
|
|
12
|
+
def create(self, project_name: str, git_repo: str, branch: str, framework: str) -> dict:
|
|
13
|
+
"""Creates a new deployment."""
|
|
14
|
+
try:
|
|
15
|
+
payload = {
|
|
16
|
+
"project_name": project_name,
|
|
17
|
+
"git_repo": git_repo,
|
|
18
|
+
"branch": branch,
|
|
19
|
+
"framework": framework,
|
|
20
|
+
}
|
|
21
|
+
response = self._client._request("POST", "/deployments", json=payload)
|
|
22
|
+
response.raise_for_status()
|
|
23
|
+
# Assuming the API returns a dict, which will be parsed into a Deployment model
|
|
24
|
+
return response.json()
|
|
25
|
+
except XenfraAPIError:
|
|
26
|
+
raise
|
|
27
|
+
except Exception as e:
|
|
28
|
+
raise XenfraError(f"Failed to create deployment: {e}")
|
|
29
|
+
|
|
30
|
+
def get_status(self, deployment_id: str) -> dict:
|
|
31
|
+
"""Get status for a specific deployment.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
deployment_id: The unique identifier for the deployment.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
dict: Deployment status information including state, progress, etc.
|
|
38
|
+
|
|
39
|
+
Raises:
|
|
40
|
+
XenfraAPIError: If the API returns an error (e.g., 404 not found).
|
|
41
|
+
XenfraError: If there's a network or parsing error.
|
|
42
|
+
"""
|
|
43
|
+
try:
|
|
44
|
+
response = self._client._request("GET", f"/deployments/{deployment_id}/status")
|
|
45
|
+
logger.debug(f"DeploymentsManager.get_status({deployment_id}) response: {response.status_code}")
|
|
46
|
+
response.raise_for_status()
|
|
47
|
+
return response.json()
|
|
48
|
+
except XenfraAPIError:
|
|
49
|
+
raise # Re-raise API errors
|
|
50
|
+
except Exception as e:
|
|
51
|
+
raise XenfraError(f"Failed to get status for deployment {deployment_id}: {e}")
|
|
52
|
+
|
|
53
|
+
def get_logs(self, deployment_id: str) -> str:
|
|
54
|
+
"""Get logs for a specific deployment.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
deployment_id: The unique identifier for the deployment.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
str: The deployment logs as plain text.
|
|
61
|
+
|
|
62
|
+
Raises:
|
|
63
|
+
XenfraAPIError: If the API returns an error (e.g., 404 not found).
|
|
64
|
+
XenfraError: If there's a network or parsing error.
|
|
65
|
+
"""
|
|
66
|
+
try:
|
|
67
|
+
response = self._client._request("GET", f"/deployments/{deployment_id}/logs")
|
|
68
|
+
logger.debug(f"DeploymentsManager.get_logs({deployment_id}) response: {response.status_code}")
|
|
69
|
+
response.raise_for_status()
|
|
70
|
+
|
|
71
|
+
# Parse response - API should return {"logs": "log content"}
|
|
72
|
+
data = response.json()
|
|
73
|
+
logs = data.get("logs", "")
|
|
74
|
+
|
|
75
|
+
if not logs:
|
|
76
|
+
logger.warning(f"No logs found for deployment {deployment_id}")
|
|
77
|
+
|
|
78
|
+
return logs
|
|
79
|
+
|
|
80
|
+
except XenfraAPIError:
|
|
81
|
+
raise # Re-raise API errors
|
|
82
|
+
except Exception as e:
|
|
83
|
+
raise XenfraError(f"Failed to get logs for deployment {deployment_id}: {e}")
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Intelligence resource manager for Xenfra SDK.
|
|
3
|
+
Provides AI-powered deployment diagnosis and codebase analysis.
|
|
4
|
+
"""
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
from ..exceptions import XenfraAPIError, XenfraError
|
|
8
|
+
from ..models import CodebaseAnalysisResponse, DiagnosisResponse
|
|
9
|
+
from .base import BaseManager
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class IntelligenceManager(BaseManager):
|
|
15
|
+
"""
|
|
16
|
+
Manager for AI-powered intelligence operations.
|
|
17
|
+
|
|
18
|
+
Provides:
|
|
19
|
+
- Deployment failure diagnosis (Zen Nod)
|
|
20
|
+
- Codebase analysis for zero-config init (Zen Init)
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def diagnose(
|
|
24
|
+
self,
|
|
25
|
+
logs: str,
|
|
26
|
+
package_manager: str | None = None,
|
|
27
|
+
dependency_file: str | None = None
|
|
28
|
+
) -> DiagnosisResponse:
|
|
29
|
+
"""
|
|
30
|
+
Diagnose deployment failure from logs using AI.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
logs: The deployment logs to analyze
|
|
34
|
+
package_manager: Optional package manager context (uv, pip, poetry, npm, etc.)
|
|
35
|
+
If provided, AI will target this manager's dependency file
|
|
36
|
+
dependency_file: Optional dependency file context (pyproject.toml, requirements.txt, etc.)
|
|
37
|
+
If provided, AI will suggest patches for this file
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
DiagnosisResponse with diagnosis, suggestion, and optional patch
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
XenfraAPIError: If the API request fails
|
|
44
|
+
XenfraError: If parsing the response fails
|
|
45
|
+
"""
|
|
46
|
+
try:
|
|
47
|
+
# Build request payload
|
|
48
|
+
payload = {"logs": logs}
|
|
49
|
+
if package_manager:
|
|
50
|
+
payload["package_manager"] = package_manager
|
|
51
|
+
if dependency_file:
|
|
52
|
+
payload["dependency_file"] = dependency_file
|
|
53
|
+
|
|
54
|
+
response = self._client._request(
|
|
55
|
+
"POST",
|
|
56
|
+
"/intelligence/diagnose",
|
|
57
|
+
json=payload
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
logger.debug(
|
|
61
|
+
f"IntelligenceManager.diagnose response: status={response.status_code}"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
response.raise_for_status()
|
|
65
|
+
return DiagnosisResponse(**response.json())
|
|
66
|
+
except XenfraAPIError:
|
|
67
|
+
raise
|
|
68
|
+
except Exception as e:
|
|
69
|
+
raise XenfraError(f"Failed to diagnose logs: {e}")
|
|
70
|
+
|
|
71
|
+
def analyze_codebase(self, code_snippets: dict[str, str]) -> CodebaseAnalysisResponse:
|
|
72
|
+
"""
|
|
73
|
+
Analyze codebase to detect framework, dependencies, and deployment config.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
code_snippets: Dictionary of filename -> content
|
|
77
|
+
e.g., {"main.py": "...", "requirements.txt": "..."}
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
CodebaseAnalysisResponse with detected configuration
|
|
81
|
+
|
|
82
|
+
Raises:
|
|
83
|
+
XenfraAPIError: If the API request fails
|
|
84
|
+
XenfraError: If parsing the response fails
|
|
85
|
+
"""
|
|
86
|
+
try:
|
|
87
|
+
response = self._client._request(
|
|
88
|
+
"POST",
|
|
89
|
+
"/intelligence/analyze-codebase",
|
|
90
|
+
json={"code_snippets": code_snippets}
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
logger.debug(
|
|
94
|
+
f"IntelligenceManager.analyze_codebase response: status={response.status_code}"
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
response.raise_for_status()
|
|
98
|
+
return CodebaseAnalysisResponse(**response.json())
|
|
99
|
+
except XenfraAPIError:
|
|
100
|
+
raise
|
|
101
|
+
except Exception as e:
|
|
102
|
+
raise XenfraError(f"Failed to analyze codebase: {e}")
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from ..exceptions import XenfraAPIError, XenfraError # Add XenfraError
|
|
4
|
+
from ..models import ProjectRead
|
|
5
|
+
from .base import BaseManager
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ProjectsManager(BaseManager):
|
|
11
|
+
def list(self) -> list[ProjectRead]:
|
|
12
|
+
"""Retrieves a list of all projects."""
|
|
13
|
+
try:
|
|
14
|
+
response = self._client._request("GET", "/projects/") # Added trailing slash
|
|
15
|
+
|
|
16
|
+
logger.debug(
|
|
17
|
+
f"ProjectsManager.list response: status={response.status_code}, "
|
|
18
|
+
f"body={response.text[:200]}..." # Truncate long responses
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
response.raise_for_status()
|
|
22
|
+
return [ProjectRead(**p) for p in response.json()["projects"]]
|
|
23
|
+
except XenfraAPIError:
|
|
24
|
+
raise # Re-raise API errors
|
|
25
|
+
except Exception as e:
|
|
26
|
+
# Handle other exceptions like JSON parsing errors
|
|
27
|
+
raise XenfraError(f"Failed to list projects: {e}")
|
|
28
|
+
|
|
29
|
+
def show(self, project_id: int) -> ProjectRead:
|
|
30
|
+
"""Get details for a specific project.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
project_id: The unique identifier for the project.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
ProjectRead: The project details.
|
|
37
|
+
|
|
38
|
+
Raises:
|
|
39
|
+
XenfraAPIError: If the API returns an error (e.g., 404 not found).
|
|
40
|
+
XenfraError: If there's a network or parsing error.
|
|
41
|
+
"""
|
|
42
|
+
try:
|
|
43
|
+
response = self._client._request("GET", f"/projects/{project_id}")
|
|
44
|
+
logger.debug(f"ProjectsManager.show({project_id}) response: {response.status_code}")
|
|
45
|
+
response.raise_for_status()
|
|
46
|
+
return ProjectRead(**response.json())
|
|
47
|
+
except XenfraAPIError:
|
|
48
|
+
raise # Re-raise API errors
|
|
49
|
+
except Exception as e:
|
|
50
|
+
raise XenfraError(f"Failed to get project {project_id}: {e}")
|
|
51
|
+
|
|
52
|
+
def create(
|
|
53
|
+
self,
|
|
54
|
+
name: str,
|
|
55
|
+
region: str = "nyc3",
|
|
56
|
+
size_slug: str = "s-1vcpu-1gb"
|
|
57
|
+
) -> ProjectRead:
|
|
58
|
+
"""Create a new project.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
name: The name for the new project.
|
|
62
|
+
region: The DigitalOcean region (default: nyc3).
|
|
63
|
+
size_slug: The droplet size slug (default: s-1vcpu-1gb).
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
ProjectRead: The newly created project details.
|
|
67
|
+
|
|
68
|
+
Raises:
|
|
69
|
+
XenfraAPIError: If the API returns an error.
|
|
70
|
+
XenfraError: If there's a network or parsing error.
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
payload = {
|
|
74
|
+
"name": name,
|
|
75
|
+
"region": region,
|
|
76
|
+
"size_slug": size_slug
|
|
77
|
+
}
|
|
78
|
+
logger.debug(f"ProjectsManager.create payload: {payload}")
|
|
79
|
+
response = self._client._request("POST", "/projects/", json=payload)
|
|
80
|
+
response.raise_for_status()
|
|
81
|
+
return ProjectRead(**response.json())
|
|
82
|
+
except XenfraAPIError:
|
|
83
|
+
raise
|
|
84
|
+
except Exception as e:
|
|
85
|
+
raise XenfraError(f"Failed to create project '{name}': {e}")
|
|
86
|
+
|
|
87
|
+
def delete(self, project_id: str) -> None:
|
|
88
|
+
"""Deletes a project."""
|
|
89
|
+
try:
|
|
90
|
+
response = self._client._request("DELETE", f"/projects/{project_id}")
|
|
91
|
+
response.raise_for_status()
|
|
92
|
+
except XenfraAPIError:
|
|
93
|
+
raise
|
|
94
|
+
except Exception as e:
|
|
95
|
+
raise XenfraError(f"Failed to delete project {project_id}: {e}")
|
xenfra_sdk/security.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# src/xenfra_sdk/security.py
|
|
2
|
+
"""
|
|
3
|
+
Security utilities for the Xenfra SDK.
|
|
4
|
+
Provides token encryption/decryption for storing OAuth credentials.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from cryptography.fernet import Fernet
|
|
11
|
+
|
|
12
|
+
# --- Configuration from Environment ---
|
|
13
|
+
# These should be set in the service's environment
|
|
14
|
+
ENCRYPTION_KEY = os.getenv("ENCRYPTION_KEY", "")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _get_fernet() -> Optional[Fernet]:
|
|
18
|
+
"""Get Fernet instance for encryption/decryption."""
|
|
19
|
+
if not ENCRYPTION_KEY:
|
|
20
|
+
return None
|
|
21
|
+
try:
|
|
22
|
+
return Fernet(ENCRYPTION_KEY.encode())
|
|
23
|
+
except Exception:
|
|
24
|
+
return None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# --- Token Encryption ---
|
|
28
|
+
def encrypt_token(token: str) -> str:
|
|
29
|
+
"""Encrypts a token using Fernet symmetric encryption."""
|
|
30
|
+
fernet = _get_fernet()
|
|
31
|
+
if fernet is None:
|
|
32
|
+
raise ValueError("ENCRYPTION_KEY environment variable is not set or invalid")
|
|
33
|
+
return fernet.encrypt(token.encode()).decode()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def decrypt_token(encrypted_token: str) -> str:
|
|
37
|
+
"""Decrypts a token."""
|
|
38
|
+
fernet = _get_fernet()
|
|
39
|
+
if fernet is None:
|
|
40
|
+
raise ValueError("ENCRYPTION_KEY environment variable is not set or invalid")
|
|
41
|
+
return fernet.decrypt(encrypted_token.encode()).decode()
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# Dockerfile template for Python web applications
|
|
2
|
+
FROM {{ python_version | default('python:3.11-slim') }}
|
|
3
|
+
|
|
4
|
+
WORKDIR /app
|
|
5
|
+
|
|
6
|
+
# Install uv, our preferred package manager
|
|
7
|
+
RUN apt-get update && apt-get install -y curl && \
|
|
8
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh && \
|
|
9
|
+
apt-get remove -y curl && \
|
|
10
|
+
apt-get clean && \
|
|
11
|
+
rm -rf /var/lib/apt/lists/*
|
|
12
|
+
|
|
13
|
+
COPY requirements.txt .
|
|
14
|
+
|
|
15
|
+
# Install dependencies
|
|
16
|
+
RUN /root/.cargo/bin/uv pip install --system --no-cache -r requirements.txt
|
|
17
|
+
|
|
18
|
+
COPY . .
|
|
19
|
+
|
|
20
|
+
# Expose the application port
|
|
21
|
+
EXPOSE {{ port | default(8000) }}
|
|
22
|
+
|
|
23
|
+
# The command to run the application will be in docker-compose.yml
|
|
24
|
+
# This allows for more flexibility
|
|
25
|
+
|