xenfra-sdk 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xenfra_sdk/__init__.py +61 -21
- xenfra_sdk/cli/main.py +226 -226
- xenfra_sdk/client.py +3 -0
- xenfra_sdk/config.py +26 -26
- xenfra_sdk/db/models.py +24 -24
- xenfra_sdk/db/session.py +30 -30
- xenfra_sdk/dependencies.py +39 -39
- xenfra_sdk/detection.py +396 -0
- xenfra_sdk/dockerizer.py +195 -104
- xenfra_sdk/engine.py +741 -471
- xenfra_sdk/exceptions.py +19 -19
- xenfra_sdk/manifest.py +212 -0
- xenfra_sdk/mcp_client.py +154 -154
- xenfra_sdk/models.py +184 -183
- xenfra_sdk/orchestrator.py +666 -0
- xenfra_sdk/patterns.json +13 -13
- xenfra_sdk/privacy.py +153 -153
- xenfra_sdk/recipes.py +26 -26
- xenfra_sdk/resources/base.py +3 -3
- xenfra_sdk/resources/deployments.py +278 -235
- xenfra_sdk/resources/files.py +101 -0
- xenfra_sdk/resources/intelligence.py +102 -95
- xenfra_sdk/security.py +41 -41
- xenfra_sdk/security_scanner.py +431 -0
- xenfra_sdk/templates/Caddyfile.j2 +14 -0
- xenfra_sdk/templates/Dockerfile.j2 +41 -25
- xenfra_sdk/templates/cloud-init.sh.j2 +90 -90
- xenfra_sdk/templates/docker-compose-multi.yml.j2 +29 -0
- xenfra_sdk/templates/docker-compose.yml.j2 +30 -27
- xenfra_sdk-0.2.3.dist-info/METADATA +116 -0
- xenfra_sdk-0.2.3.dist-info/RECORD +38 -0
- {xenfra_sdk-0.2.1.dist-info → xenfra_sdk-0.2.3.dist-info}/WHEEL +2 -2
- xenfra_sdk-0.2.1.dist-info/METADATA +0 -118
- xenfra_sdk-0.2.1.dist-info/RECORD +0 -31
|
@@ -1,235 +1,278 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import logging
|
|
3
|
-
from typing import Iterator
|
|
4
|
-
|
|
5
|
-
# Import Deployment model when it's defined in models.py
|
|
6
|
-
# from ..models import Deployment
|
|
7
|
-
from ..exceptions import XenfraAPIError, XenfraError # Add XenfraError
|
|
8
|
-
from ..utils import safe_get_json_field, safe_json_parse
|
|
9
|
-
from .base import BaseManager
|
|
10
|
-
|
|
11
|
-
logger = logging.getLogger(__name__)
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class DeploymentsManager(BaseManager):
|
|
15
|
-
def create(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True) -> dict:
|
|
16
|
-
"""Creates a new deployment."""
|
|
17
|
-
try:
|
|
18
|
-
payload = {
|
|
19
|
-
"project_name": project_name,
|
|
20
|
-
"git_repo": git_repo,
|
|
21
|
-
"branch": branch,
|
|
22
|
-
"framework": framework,
|
|
23
|
-
}
|
|
24
|
-
if region:
|
|
25
|
-
payload["region"] = region
|
|
26
|
-
if size_slug:
|
|
27
|
-
payload["size_slug"] = size_slug
|
|
28
|
-
if is_dockerized is not None:
|
|
29
|
-
payload["is_dockerized"] = is_dockerized
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Iterator
|
|
4
|
+
|
|
5
|
+
# Import Deployment model when it's defined in models.py
|
|
6
|
+
# from ..models import Deployment
|
|
7
|
+
from ..exceptions import XenfraAPIError, XenfraError # Add XenfraError
|
|
8
|
+
from ..utils import safe_get_json_field, safe_json_parse
|
|
9
|
+
from .base import BaseManager
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DeploymentsManager(BaseManager):
|
|
15
|
+
def create(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True, port: int = None, command: str = None, entrypoint: str = None, database: str = None, package_manager: str = None, dependency_file: str = None, file_manifest: list = None, cleanup_on_failure: bool = False, services: list = None, mode: str = None) -> dict:
|
|
16
|
+
"""Creates a new deployment."""
|
|
17
|
+
try:
|
|
18
|
+
payload = {
|
|
19
|
+
"project_name": project_name,
|
|
20
|
+
"git_repo": git_repo,
|
|
21
|
+
"branch": branch,
|
|
22
|
+
"framework": framework,
|
|
23
|
+
}
|
|
24
|
+
if region:
|
|
25
|
+
payload["region"] = region
|
|
26
|
+
if size_slug:
|
|
27
|
+
payload["size_slug"] = size_slug
|
|
28
|
+
if is_dockerized is not None:
|
|
29
|
+
payload["is_dockerized"] = is_dockerized
|
|
30
|
+
if port:
|
|
31
|
+
payload["port"] = port
|
|
32
|
+
if command:
|
|
33
|
+
payload["command"] = command
|
|
34
|
+
if entrypoint:
|
|
35
|
+
payload["entrypoint"] = entrypoint
|
|
36
|
+
if database:
|
|
37
|
+
payload["database"] = database
|
|
38
|
+
if package_manager:
|
|
39
|
+
payload["package_manager"] = package_manager
|
|
40
|
+
if dependency_file:
|
|
41
|
+
payload["dependency_file"] = dependency_file
|
|
42
|
+
if file_manifest:
|
|
43
|
+
payload["file_manifest"] = file_manifest
|
|
44
|
+
if cleanup_on_failure:
|
|
45
|
+
payload["cleanup_on_failure"] = True
|
|
46
|
+
# Microservices support
|
|
47
|
+
if services:
|
|
48
|
+
payload["services"] = services
|
|
49
|
+
if mode:
|
|
50
|
+
payload["mode"] = mode
|
|
51
|
+
|
|
52
|
+
response = self._client._request("POST", "/deployments", json=payload)
|
|
53
|
+
# Safe JSON parsing
|
|
54
|
+
return safe_json_parse(response)
|
|
55
|
+
except XenfraAPIError:
|
|
56
|
+
raise
|
|
57
|
+
except Exception as e:
|
|
58
|
+
raise XenfraError(f"Failed to create deployment: {e}")
|
|
59
|
+
|
|
60
|
+
def get_status(self, deployment_id: str) -> dict:
|
|
61
|
+
"""Get status for a specific deployment.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
deployment_id: The unique identifier for the deployment.
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
dict: Deployment status information including state, progress, etc.
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
XenfraAPIError: If the API returns an error (e.g., 404 not found).
|
|
71
|
+
XenfraError: If there's a network or parsing error.
|
|
72
|
+
"""
|
|
73
|
+
try:
|
|
74
|
+
response = self._client._request("GET", f"/deployments/{deployment_id}/status")
|
|
75
|
+
logger.debug(
|
|
76
|
+
f"DeploymentsManager.get_status({deployment_id}) response: {response.status_code}"
|
|
77
|
+
)
|
|
78
|
+
# Safe JSON parsing - _request() already handles status codes
|
|
79
|
+
return safe_json_parse(response)
|
|
80
|
+
except XenfraAPIError:
|
|
81
|
+
raise # Re-raise API errors
|
|
82
|
+
except Exception as e:
|
|
83
|
+
raise XenfraError(f"Failed to get status for deployment {deployment_id}: {e}")
|
|
84
|
+
|
|
85
|
+
def get_logs(self, deployment_id: str) -> str:
|
|
86
|
+
"""Get logs for a specific deployment.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
deployment_id: The unique identifier for the deployment.
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
str: The deployment logs as plain text.
|
|
93
|
+
|
|
94
|
+
Raises:
|
|
95
|
+
XenfraAPIError: If the API returns an error (e.g., 404 not found).
|
|
96
|
+
XenfraError: If there's a network or parsing error.
|
|
97
|
+
"""
|
|
98
|
+
try:
|
|
99
|
+
response = self._client._request("GET", f"/deployments/{deployment_id}/logs")
|
|
100
|
+
logger.debug(
|
|
101
|
+
f"DeploymentsManager.get_logs({deployment_id}) response: {response.status_code}"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Safe JSON parsing with structure validation - _request() already handles status codes
|
|
105
|
+
data = safe_json_parse(response)
|
|
106
|
+
if not isinstance(data, dict):
|
|
107
|
+
raise XenfraError(f"Expected dictionary response, got {type(data).__name__}")
|
|
108
|
+
|
|
109
|
+
logs = safe_get_json_field(data, "logs", "")
|
|
110
|
+
|
|
111
|
+
if not logs:
|
|
112
|
+
logger.warning(f"No logs found for deployment {deployment_id}")
|
|
113
|
+
|
|
114
|
+
return logs
|
|
115
|
+
|
|
116
|
+
except XenfraAPIError:
|
|
117
|
+
raise # Re-raise API errors
|
|
118
|
+
except Exception as e:
|
|
119
|
+
raise XenfraError(f"Failed to get logs for deployment {deployment_id}: {e}")
|
|
120
|
+
|
|
121
|
+
def create_stream(self, project_name: str, git_repo: str, branch: str, framework: str, region: str = None, size_slug: str = None, is_dockerized: bool = True, port: int = None, command: str = None, entrypoint: str = None, database: str = None, package_manager: str = None, dependency_file: str = None, file_manifest: list = None, cleanup_on_failure: bool = False, services: list = None, mode: str = None) -> Iterator[dict]:
|
|
122
|
+
"""
|
|
123
|
+
Creates a new deployment with real-time SSE log streaming.
|
|
124
|
+
|
|
125
|
+
Yields SSE events as dictionaries with 'event' and 'data' keys.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
project_name: Name of the project
|
|
129
|
+
git_repo: Git repository URL (optional if file_manifest provided)
|
|
130
|
+
branch: Git branch to deploy
|
|
131
|
+
framework: Framework type (fastapi, flask, django)
|
|
132
|
+
region: DigitalOcean region (optional)
|
|
133
|
+
size_slug: DigitalOcean droplet size (optional)
|
|
134
|
+
is_dockerized: Whether to use Docker (optional)
|
|
135
|
+
port: Application port (optional, default 8000)
|
|
136
|
+
command: Start command (optional, auto-detected if not provided)
|
|
137
|
+
entrypoint: Application entrypoint (optional, e.g. 'todo.main:app')
|
|
138
|
+
database: Database type (optional, e.g. 'postgres')
|
|
139
|
+
package_manager: Package manager (optional, e.g. 'pip', 'uv')
|
|
140
|
+
dependency_file: Dependency file (optional, e.g. 'requirements.txt')
|
|
141
|
+
file_manifest: List of files for delta upload [{path, sha, size}, ...]
|
|
142
|
+
cleanup_on_failure: Automatically cleanup resources if deployment fails (optional)
|
|
143
|
+
services: List of service definitions for multi-service deployments (optional)
|
|
144
|
+
mode: Deployment mode - 'monolithic', 'single-droplet', or 'multi-droplet' (optional)
|
|
145
|
+
|
|
146
|
+
Yields:
|
|
147
|
+
dict: SSE events with 'event' and 'data' fields
|
|
148
|
+
|
|
149
|
+
Example:
|
|
150
|
+
for event in client.deployments.create_stream(...):
|
|
151
|
+
if event['event'] == 'log':
|
|
152
|
+
print(event['data'])
|
|
153
|
+
elif event['event'] == 'deployment_complete':
|
|
154
|
+
print("Done!")
|
|
155
|
+
"""
|
|
156
|
+
payload = {
|
|
157
|
+
"project_name": project_name,
|
|
158
|
+
"git_repo": git_repo,
|
|
159
|
+
"branch": branch,
|
|
160
|
+
"framework": framework,
|
|
161
|
+
}
|
|
162
|
+
if region:
|
|
163
|
+
payload["region"] = region
|
|
164
|
+
if size_slug:
|
|
165
|
+
payload["size_slug"] = size_slug
|
|
166
|
+
if is_dockerized is not None:
|
|
167
|
+
payload["is_dockerized"] = is_dockerized
|
|
168
|
+
if port:
|
|
169
|
+
payload["port"] = port
|
|
170
|
+
if command:
|
|
171
|
+
payload["command"] = command
|
|
172
|
+
if entrypoint:
|
|
173
|
+
payload["entrypoint"] = entrypoint
|
|
174
|
+
if database:
|
|
175
|
+
payload["database"] = database
|
|
176
|
+
if package_manager:
|
|
177
|
+
payload["package_manager"] = package_manager
|
|
178
|
+
if dependency_file:
|
|
179
|
+
payload["dependency_file"] = dependency_file
|
|
180
|
+
if file_manifest:
|
|
181
|
+
payload["file_manifest"] = file_manifest
|
|
182
|
+
if cleanup_on_failure:
|
|
183
|
+
payload["cleanup_on_failure"] = True
|
|
184
|
+
# Microservices support
|
|
185
|
+
if services:
|
|
186
|
+
payload["services"] = services
|
|
187
|
+
if mode:
|
|
188
|
+
payload["mode"] = mode
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
# Use httpx to stream the SSE response
|
|
192
|
+
import httpx
|
|
193
|
+
import os
|
|
194
|
+
|
|
195
|
+
headers = {
|
|
196
|
+
"Authorization": f"Bearer {self._client._token}",
|
|
197
|
+
"Accept": "text/event-stream",
|
|
198
|
+
"Content-Type": "application/json",
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
# Use streaming API URL if available (bypasses Cloudflare timeout)
|
|
202
|
+
# Otherwise fall back to regular API URL
|
|
203
|
+
streaming_api_url = os.getenv("XENFRA_STREAMING_API_URL")
|
|
204
|
+
if streaming_api_url:
|
|
205
|
+
base_url = streaming_api_url
|
|
206
|
+
else:
|
|
207
|
+
# Local/dev/production: use regular API URL
|
|
208
|
+
base_url = self._client.api_url
|
|
209
|
+
|
|
210
|
+
url = f"{base_url}/deployments/stream"
|
|
211
|
+
|
|
212
|
+
with httpx.stream(
|
|
213
|
+
"POST",
|
|
214
|
+
url,
|
|
215
|
+
json=payload,
|
|
216
|
+
headers=headers,
|
|
217
|
+
timeout=600.0, # 10 minute timeout for deployments
|
|
218
|
+
) as response:
|
|
219
|
+
# Check status before consuming stream
|
|
220
|
+
if response.status_code not in [200, 201, 202]:
|
|
221
|
+
# For error responses from streaming endpoint, read via iteration
|
|
222
|
+
error_text = ""
|
|
223
|
+
try:
|
|
224
|
+
for chunk in response.iter_bytes():
|
|
225
|
+
error_text += chunk.decode('utf-8', errors='ignore')
|
|
226
|
+
if len(error_text) > 1000: # Limit error message size
|
|
227
|
+
break
|
|
228
|
+
if not error_text:
|
|
229
|
+
error_text = "Unknown error"
|
|
230
|
+
except Exception as e:
|
|
231
|
+
error_text = f"Could not read error response: {e}"
|
|
232
|
+
|
|
233
|
+
raise XenfraAPIError(
|
|
234
|
+
status_code=response.status_code,
|
|
235
|
+
detail=f"Deployment failed: {error_text}"
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# Parse SSE events
|
|
239
|
+
current_event = None # Initialize before loop
|
|
240
|
+
for line in response.iter_lines():
|
|
241
|
+
# No need to explicitly decode if iter_lines is used on a decoded response,
|
|
242
|
+
# but if it returns bytes, we decode it.
|
|
243
|
+
if isinstance(line, bytes):
|
|
244
|
+
line = line.decode('utf-8', errors='ignore')
|
|
245
|
+
|
|
246
|
+
line = line.strip()
|
|
247
|
+
if not line:
|
|
248
|
+
continue
|
|
249
|
+
|
|
250
|
+
# SSE format: "event: eventname" or "data: eventdata"
|
|
251
|
+
if line.startswith("event:"):
|
|
252
|
+
current_event = line[6:].strip()
|
|
253
|
+
elif line.startswith("data:"):
|
|
254
|
+
data = line[5:].strip()
|
|
255
|
+
|
|
256
|
+
# Get event type (default to "message" if no event line was sent)
|
|
257
|
+
event_type = current_event if current_event is not None else "message"
|
|
258
|
+
|
|
259
|
+
# Skip keep-alive events (used to prevent proxy timeouts)
|
|
260
|
+
if event_type == "keep-alive":
|
|
261
|
+
current_event = None
|
|
262
|
+
continue
|
|
263
|
+
|
|
264
|
+
try:
|
|
265
|
+
# Try to parse as JSON
|
|
266
|
+
data_parsed = json.loads(data)
|
|
267
|
+
yield {"event": event_type, "data": data_parsed}
|
|
268
|
+
except json.JSONDecodeError:
|
|
269
|
+
# If not JSON, yield as plain text
|
|
270
|
+
yield {"event": event_type, "data": data}
|
|
271
|
+
|
|
272
|
+
# Reset current_event after yielding
|
|
273
|
+
current_event = None
|
|
274
|
+
|
|
275
|
+
except httpx.HTTPError as e:
|
|
276
|
+
raise XenfraError(f"HTTP error during streaming deployment: {e}")
|
|
277
|
+
except Exception as e:
|
|
278
|
+
raise XenfraError(f"Failed to create streaming deployment: {e}")
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Files resource manager for delta uploads.
|
|
3
|
+
|
|
4
|
+
Provides methods to check file cache status and upload files to the server.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, List
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class FilesManager:
|
|
11
|
+
"""Manager for file upload operations."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, client):
|
|
14
|
+
"""
|
|
15
|
+
Initialize the FilesManager.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
client: The XenfraClient instance.
|
|
19
|
+
"""
|
|
20
|
+
self._client = client
|
|
21
|
+
|
|
22
|
+
def check(self, files: List[Dict]) -> Dict:
|
|
23
|
+
"""
|
|
24
|
+
Check which files are missing from server cache.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
files: List of file info dicts with keys: path, sha, size
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Dict with keys:
|
|
31
|
+
- missing: List of SHA hashes that need to be uploaded
|
|
32
|
+
- cached: Number of files already cached on server
|
|
33
|
+
"""
|
|
34
|
+
payload = {
|
|
35
|
+
"files": [
|
|
36
|
+
{"path": f["path"], "sha": f["sha"], "size": f["size"]}
|
|
37
|
+
for f in files
|
|
38
|
+
]
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
response = self._client._request("POST", "/files/check", json=payload)
|
|
42
|
+
return response.json()
|
|
43
|
+
|
|
44
|
+
def upload(self, content: bytes, sha: str, path: str) -> Dict:
|
|
45
|
+
"""
|
|
46
|
+
Upload a single file to the server.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
content: Raw file content as bytes
|
|
50
|
+
sha: SHA256 hash of the content
|
|
51
|
+
path: Relative file path
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Dict with keys: sha, size, stored
|
|
55
|
+
"""
|
|
56
|
+
import httpx
|
|
57
|
+
|
|
58
|
+
headers = {
|
|
59
|
+
"Authorization": f"Bearer {self._client._token}",
|
|
60
|
+
"Content-Type": "application/octet-stream",
|
|
61
|
+
"X-Xenfra-Sha": sha,
|
|
62
|
+
"X-Xenfra-Path": path,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
response = httpx.post(
|
|
66
|
+
f"{self._client.api_url}/files/upload",
|
|
67
|
+
content=content,
|
|
68
|
+
headers=headers,
|
|
69
|
+
timeout=120.0, # 2 minutes for large files
|
|
70
|
+
)
|
|
71
|
+
response.raise_for_status()
|
|
72
|
+
return response.json()
|
|
73
|
+
|
|
74
|
+
def upload_files(self, files: List[Dict], missing_shas: List[str], progress_callback=None) -> int:
|
|
75
|
+
"""
|
|
76
|
+
Upload multiple files that are missing from the server.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
files: List of file info dicts with keys: path, sha, size, abs_path
|
|
80
|
+
missing_shas: List of SHA hashes that need to be uploaded
|
|
81
|
+
progress_callback: Optional callback(uploaded_count, total_count)
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Number of files uploaded
|
|
85
|
+
"""
|
|
86
|
+
missing_set = set(missing_shas)
|
|
87
|
+
files_to_upload = [f for f in files if f["sha"] in missing_set]
|
|
88
|
+
total = len(files_to_upload)
|
|
89
|
+
uploaded = 0
|
|
90
|
+
|
|
91
|
+
for file_info in files_to_upload:
|
|
92
|
+
with open(file_info["abs_path"], "rb") as f:
|
|
93
|
+
content = f.read()
|
|
94
|
+
|
|
95
|
+
self.upload(content, file_info["sha"], file_info["path"])
|
|
96
|
+
uploaded += 1
|
|
97
|
+
|
|
98
|
+
if progress_callback:
|
|
99
|
+
progress_callback(uploaded, total)
|
|
100
|
+
|
|
101
|
+
return uploaded
|