llama-deploy-core 0.3.0a12__py3-none-any.whl → 0.3.0a14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/core/client/manage_client.py +108 -86
- llama_deploy/core/deployment_config.py +5 -2
- llama_deploy/core/schema/__init__.py +4 -1
- llama_deploy/core/schema/base.py +0 -9
- llama_deploy/core/schema/deployments.py +7 -0
- llama_deploy/core/schema/projects.py +14 -0
- llama_deploy/core/schema/public.py +7 -0
- llama_deploy/core/server/manage_api/__init__.py +5 -1
- llama_deploy/core/server/manage_api/_abstract_deployments_service.py +17 -1
- llama_deploy/core/server/manage_api/_create_deployments_router.py +25 -7
- {llama_deploy_core-0.3.0a12.dist-info → llama_deploy_core-0.3.0a14.dist-info}/METADATA +2 -1
- llama_deploy_core-0.3.0a14.dist-info/RECORD +21 -0
- llama_deploy_core-0.3.0a12.dist-info/RECORD +0 -20
- {llama_deploy_core-0.3.0a12.dist-info → llama_deploy_core-0.3.0a14.dist-info}/WHEEL +0 -0
|
@@ -1,8 +1,10 @@
|
|
|
1
|
-
import
|
|
2
|
-
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from contextlib import asynccontextmanager
|
|
4
|
+
from typing import AsyncIterator, Callable, List
|
|
3
5
|
|
|
4
6
|
import httpx
|
|
5
|
-
from llama_deploy.core.schema
|
|
7
|
+
from llama_deploy.core.schema import LogEvent
|
|
6
8
|
from llama_deploy.core.schema.deployments import (
|
|
7
9
|
DeploymentCreate,
|
|
8
10
|
DeploymentResponse,
|
|
@@ -14,30 +16,41 @@ from llama_deploy.core.schema.git_validation import (
|
|
|
14
16
|
RepositoryValidationResponse,
|
|
15
17
|
)
|
|
16
18
|
from llama_deploy.core.schema.projects import ProjectsListResponse, ProjectSummary
|
|
19
|
+
from llama_deploy.core.schema.public import VersionResponse
|
|
17
20
|
|
|
18
21
|
|
|
19
22
|
class ClientError(Exception):
|
|
20
23
|
"""Base class for client errors."""
|
|
21
24
|
|
|
22
|
-
def __init__(self, message: str) -> None:
|
|
25
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
23
26
|
super().__init__(message)
|
|
27
|
+
self.status_code = status_code
|
|
24
28
|
|
|
25
29
|
|
|
26
30
|
class BaseClient:
|
|
27
|
-
def __init__(self, base_url: str) -> None:
|
|
31
|
+
def __init__(self, base_url: str, api_key: str | None = None) -> None:
|
|
28
32
|
self.base_url = base_url.rstrip("/")
|
|
29
|
-
|
|
33
|
+
|
|
34
|
+
headers: dict[str, str] = {}
|
|
35
|
+
if api_key:
|
|
36
|
+
headers["Authorization"] = f"Bearer {api_key}"
|
|
37
|
+
|
|
38
|
+
self.client = httpx.AsyncClient(
|
|
30
39
|
base_url=self.base_url,
|
|
40
|
+
headers=headers,
|
|
31
41
|
event_hooks={"response": [self._handle_response]},
|
|
32
42
|
)
|
|
33
|
-
self.hookless_client = httpx.
|
|
43
|
+
self.hookless_client = httpx.AsyncClient(
|
|
44
|
+
base_url=self.base_url, headers=headers
|
|
45
|
+
)
|
|
34
46
|
|
|
35
|
-
def _handle_response(self, response: httpx.Response) -> None:
|
|
47
|
+
async def _handle_response(self, response: httpx.Response) -> None:
|
|
36
48
|
try:
|
|
37
49
|
response.raise_for_status()
|
|
38
50
|
except httpx.HTTPStatusError as e:
|
|
39
51
|
try:
|
|
40
|
-
|
|
52
|
+
# Ensure content is loaded for JSON/text extraction
|
|
53
|
+
await response.aread()
|
|
41
54
|
error_data = e.response.json()
|
|
42
55
|
if isinstance(error_data, dict) and "detail" in error_data:
|
|
43
56
|
error_message = error_data["detail"]
|
|
@@ -45,24 +58,44 @@ class BaseClient:
|
|
|
45
58
|
error_message = str(error_data)
|
|
46
59
|
except (ValueError, KeyError):
|
|
47
60
|
error_message = e.response.text
|
|
48
|
-
raise ClientError(
|
|
61
|
+
raise ClientError(
|
|
62
|
+
f"HTTP {e.response.status_code}: {error_message}",
|
|
63
|
+
e.response.status_code,
|
|
64
|
+
) from e
|
|
49
65
|
except httpx.RequestError as e:
|
|
50
66
|
raise ClientError(f"Request failed: {e}") from e
|
|
51
67
|
|
|
68
|
+
async def aclose(self) -> None:
|
|
69
|
+
await self.client.aclose()
|
|
70
|
+
await self.hookless_client.aclose()
|
|
71
|
+
|
|
52
72
|
|
|
53
73
|
class ControlPlaneClient(BaseClient):
|
|
54
74
|
"""Unscoped client for non-project endpoints."""
|
|
55
75
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
76
|
+
@classmethod
|
|
77
|
+
@asynccontextmanager
|
|
78
|
+
async def ctx(
|
|
79
|
+
cls, base_url: str, api_key: str | None = None
|
|
80
|
+
) -> AsyncIterator[ControlPlaneClient]:
|
|
81
|
+
client = cls(base_url, api_key)
|
|
82
|
+
try:
|
|
83
|
+
yield client
|
|
84
|
+
finally:
|
|
85
|
+
try:
|
|
86
|
+
await client.aclose()
|
|
87
|
+
except Exception:
|
|
88
|
+
pass
|
|
89
|
+
|
|
90
|
+
def __init__(self, base_url: str, api_key: str | None = None) -> None:
|
|
91
|
+
super().__init__(base_url, api_key)
|
|
59
92
|
|
|
60
|
-
def server_version(self) ->
|
|
61
|
-
response = self.client.get("/version")
|
|
62
|
-
return response.json()
|
|
93
|
+
async def server_version(self) -> VersionResponse:
|
|
94
|
+
response = await self.client.get("/api/v1beta1/deployments-public/version")
|
|
95
|
+
return VersionResponse.model_validate(response.json())
|
|
63
96
|
|
|
64
|
-
def list_projects(self) -> List[ProjectSummary]:
|
|
65
|
-
response = self.client.get("/api/v1beta1/deployments/list-projects")
|
|
97
|
+
async def list_projects(self) -> List[ProjectSummary]:
|
|
98
|
+
response = await self.client.get("/api/v1beta1/deployments/list-projects")
|
|
66
99
|
projects_response = ProjectsListResponse.model_validate(response.json())
|
|
67
100
|
return [project for project in projects_response.projects]
|
|
68
101
|
|
|
@@ -70,66 +103,81 @@ class ControlPlaneClient(BaseClient):
|
|
|
70
103
|
class ProjectClient(BaseClient):
|
|
71
104
|
"""Project-scoped client for deployment operations."""
|
|
72
105
|
|
|
106
|
+
@classmethod
|
|
107
|
+
@asynccontextmanager
|
|
108
|
+
async def ctx(
|
|
109
|
+
cls, base_url: str, project_id: str, api_key: str | None = None
|
|
110
|
+
) -> AsyncIterator[ProjectClient]:
|
|
111
|
+
client = cls(base_url, project_id, api_key)
|
|
112
|
+
try:
|
|
113
|
+
yield client
|
|
114
|
+
finally:
|
|
115
|
+
try:
|
|
116
|
+
await client.aclose()
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
|
|
73
120
|
def __init__(
|
|
74
121
|
self,
|
|
75
122
|
base_url: str,
|
|
76
123
|
project_id: str,
|
|
124
|
+
api_key: str | None = None,
|
|
77
125
|
) -> None:
|
|
78
|
-
super().__init__(base_url)
|
|
126
|
+
super().__init__(base_url, api_key)
|
|
79
127
|
self.project_id = project_id
|
|
80
128
|
|
|
81
|
-
def list_deployments(self) -> List[DeploymentResponse]:
|
|
82
|
-
response = self.client.get(
|
|
129
|
+
async def list_deployments(self) -> List[DeploymentResponse]:
|
|
130
|
+
response = await self.client.get(
|
|
83
131
|
"/api/v1beta1/deployments",
|
|
84
132
|
params={"project_id": self.project_id},
|
|
85
133
|
)
|
|
86
134
|
deployments_response = DeploymentsListResponse.model_validate(response.json())
|
|
87
135
|
return [deployment for deployment in deployments_response.deployments]
|
|
88
136
|
|
|
89
|
-
def get_deployment(
|
|
137
|
+
async def get_deployment(
|
|
90
138
|
self, deployment_id: str, include_events: bool = False
|
|
91
139
|
) -> DeploymentResponse:
|
|
92
|
-
response = self.client.get(
|
|
140
|
+
response = await self.client.get(
|
|
93
141
|
f"/api/v1beta1/deployments/{deployment_id}",
|
|
94
142
|
params={"project_id": self.project_id, "include_events": include_events},
|
|
95
143
|
)
|
|
96
144
|
return DeploymentResponse.model_validate(response.json())
|
|
97
145
|
|
|
98
|
-
def create_deployment(
|
|
146
|
+
async def create_deployment(
|
|
99
147
|
self, deployment_data: DeploymentCreate
|
|
100
148
|
) -> DeploymentResponse:
|
|
101
|
-
response = self.client.post(
|
|
149
|
+
response = await self.client.post(
|
|
102
150
|
"/api/v1beta1/deployments",
|
|
103
151
|
params={"project_id": self.project_id},
|
|
104
152
|
json=deployment_data.model_dump(exclude_none=True),
|
|
105
153
|
)
|
|
106
154
|
return DeploymentResponse.model_validate(response.json())
|
|
107
155
|
|
|
108
|
-
def delete_deployment(self, deployment_id: str) -> None:
|
|
109
|
-
self.client.delete(
|
|
156
|
+
async def delete_deployment(self, deployment_id: str) -> None:
|
|
157
|
+
await self.client.delete(
|
|
110
158
|
f"/api/v1beta1/deployments/{deployment_id}",
|
|
111
159
|
params={"project_id": self.project_id},
|
|
112
160
|
)
|
|
113
161
|
|
|
114
|
-
def update_deployment(
|
|
162
|
+
async def update_deployment(
|
|
115
163
|
self,
|
|
116
164
|
deployment_id: str,
|
|
117
165
|
update_data: DeploymentUpdate,
|
|
118
166
|
) -> DeploymentResponse:
|
|
119
|
-
response = self.client.patch(
|
|
167
|
+
response = await self.client.patch(
|
|
120
168
|
f"/api/v1beta1/deployments/{deployment_id}",
|
|
121
169
|
params={"project_id": self.project_id},
|
|
122
170
|
json=update_data.model_dump(),
|
|
123
171
|
)
|
|
124
172
|
return DeploymentResponse.model_validate(response.json())
|
|
125
173
|
|
|
126
|
-
def validate_repository(
|
|
174
|
+
async def validate_repository(
|
|
127
175
|
self,
|
|
128
176
|
repo_url: str,
|
|
129
177
|
deployment_id: str | None = None,
|
|
130
178
|
pat: str | None = None,
|
|
131
179
|
) -> RepositoryValidationResponse:
|
|
132
|
-
response = self.client.post(
|
|
180
|
+
response = await self.client.post(
|
|
133
181
|
"/api/v1beta1/deployments/validate-repository",
|
|
134
182
|
params={"project_id": self.project_id},
|
|
135
183
|
json=RepositoryValidationRequest(
|
|
@@ -140,21 +188,19 @@ class ProjectClient(BaseClient):
|
|
|
140
188
|
)
|
|
141
189
|
return RepositoryValidationResponse.model_validate(response.json())
|
|
142
190
|
|
|
143
|
-
def stream_deployment_logs(
|
|
191
|
+
async def stream_deployment_logs(
|
|
144
192
|
self,
|
|
145
193
|
deployment_id: str,
|
|
146
194
|
*,
|
|
147
195
|
include_init_containers: bool = False,
|
|
148
196
|
since_seconds: int | None = None,
|
|
149
197
|
tail_lines: int | None = None,
|
|
150
|
-
) ->
|
|
198
|
+
) -> AsyncIterator[LogEvent]:
|
|
151
199
|
"""Stream logs as LogEvent items from the control plane using SSE.
|
|
152
200
|
|
|
153
|
-
|
|
201
|
+
Yields `LogEvent` models until the stream ends (e.g., rollout completes).
|
|
154
202
|
"""
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
params = {
|
|
203
|
+
params: dict[str, object] = {
|
|
158
204
|
"project_id": self.project_id,
|
|
159
205
|
"include_init_containers": include_init_containers,
|
|
160
206
|
}
|
|
@@ -166,54 +212,30 @@ class ProjectClient(BaseClient):
|
|
|
166
212
|
url = f"/api/v1beta1/deployments/{deployment_id}/logs"
|
|
167
213
|
headers = {"Accept": "text/event-stream"}
|
|
168
214
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
"GET", url, params=params, headers=headers, timeout=None
|
|
173
|
-
)
|
|
174
|
-
)
|
|
175
|
-
try:
|
|
215
|
+
async with self.hookless_client.stream(
|
|
216
|
+
"GET", url, params=params, headers=headers, timeout=None
|
|
217
|
+
) as response:
|
|
176
218
|
response.raise_for_status()
|
|
177
|
-
except Exception:
|
|
178
|
-
stack.close()
|
|
179
|
-
raise
|
|
180
|
-
|
|
181
|
-
return stack.close, _iterate_log_stream(response, stack.close)
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
def _iterate_log_stream(
|
|
185
|
-
response: httpx.Response, closer: "Closer"
|
|
186
|
-
) -> Iterator[LogEvent]:
|
|
187
|
-
event_name: str | None = None
|
|
188
|
-
data_lines: list[str] = []
|
|
189
|
-
|
|
190
|
-
try:
|
|
191
|
-
for line in response.iter_lines():
|
|
192
|
-
if line is None:
|
|
193
|
-
continue
|
|
194
|
-
line = line.decode() if isinstance(line, (bytes, bytearray)) else line
|
|
195
|
-
print("got line", line)
|
|
196
|
-
if line.startswith("event:"):
|
|
197
|
-
event_name = line[len("event:") :].strip()
|
|
198
|
-
elif line.startswith("data:"):
|
|
199
|
-
data_lines.append(line[len("data:") :].lstrip())
|
|
200
|
-
elif line.strip() == "":
|
|
201
|
-
if event_name == "log" and data_lines:
|
|
202
|
-
data_str = "\n".join(data_lines)
|
|
203
|
-
try:
|
|
204
|
-
yield LogEvent.model_validate_json(data_str)
|
|
205
|
-
print("yielded log event", data_str)
|
|
206
|
-
except Exception:
|
|
207
|
-
# If parsing fails, skip malformed event
|
|
208
|
-
pass
|
|
209
|
-
# reset for next event
|
|
210
|
-
event_name = None
|
|
211
|
-
data_lines = []
|
|
212
|
-
finally:
|
|
213
|
-
try:
|
|
214
|
-
closer()
|
|
215
|
-
except Exception:
|
|
216
|
-
pass
|
|
217
|
-
|
|
218
219
|
|
|
219
|
-
|
|
220
|
+
event_name: str | None = None
|
|
221
|
+
data_lines: list[str] = []
|
|
222
|
+
async for line in response.aiter_lines():
|
|
223
|
+
if line is None:
|
|
224
|
+
continue
|
|
225
|
+
line = line.decode() if isinstance(line, (bytes, bytearray)) else line
|
|
226
|
+
if line.startswith("event:"):
|
|
227
|
+
event_name = line[len("event:") :].strip()
|
|
228
|
+
elif line.startswith("data:"):
|
|
229
|
+
data_lines.append(line[len("data:") :].lstrip())
|
|
230
|
+
elif line.strip() == "":
|
|
231
|
+
if event_name == "log" and data_lines:
|
|
232
|
+
data_str = "\n".join(data_lines)
|
|
233
|
+
try:
|
|
234
|
+
yield LogEvent.model_validate_json(data_str)
|
|
235
|
+
except Exception:
|
|
236
|
+
pass
|
|
237
|
+
event_name = None
|
|
238
|
+
data_lines = []
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
Closer = Callable[[], None]
|
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import json
|
|
4
4
|
import tomllib
|
|
5
5
|
from pathlib import Path
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any, TypeVar
|
|
7
7
|
|
|
8
8
|
import yaml
|
|
9
9
|
from llama_deploy.core.path_util import validate_path_traversal
|
|
@@ -229,7 +229,10 @@ class DeploymentConfig(BaseModel):
|
|
|
229
229
|
)
|
|
230
230
|
|
|
231
231
|
|
|
232
|
-
|
|
232
|
+
T = TypeVar("T")
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def _pick_non_default(a: T, b: T, default: T) -> T:
|
|
233
236
|
if a != default:
|
|
234
237
|
return a
|
|
235
238
|
return b or default
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from .base import Base
|
|
1
|
+
from .base import Base
|
|
2
2
|
from .deployments import (
|
|
3
3
|
DeploymentCreate,
|
|
4
4
|
DeploymentResponse,
|
|
@@ -6,10 +6,12 @@ from .deployments import (
|
|
|
6
6
|
DeploymentUpdate,
|
|
7
7
|
LlamaDeploymentPhase,
|
|
8
8
|
LlamaDeploymentSpec,
|
|
9
|
+
LogEvent,
|
|
9
10
|
apply_deployment_update,
|
|
10
11
|
)
|
|
11
12
|
from .git_validation import RepositoryValidationRequest, RepositoryValidationResponse
|
|
12
13
|
from .projects import ProjectsListResponse, ProjectSummary
|
|
14
|
+
from .public import VersionResponse
|
|
13
15
|
|
|
14
16
|
__all__ = [
|
|
15
17
|
"Base",
|
|
@@ -25,4 +27,5 @@ __all__ = [
|
|
|
25
27
|
"RepositoryValidationRequest",
|
|
26
28
|
"ProjectSummary",
|
|
27
29
|
"ProjectsListResponse",
|
|
30
|
+
"VersionResponse",
|
|
28
31
|
]
|
llama_deploy/core/schema/base.py
CHANGED
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
from datetime import datetime
|
|
2
|
-
|
|
3
1
|
from pydantic import BaseModel, ConfigDict
|
|
4
2
|
|
|
5
3
|
base_config = ConfigDict(
|
|
@@ -20,10 +18,3 @@ base_config = ConfigDict(
|
|
|
20
18
|
|
|
21
19
|
class Base(BaseModel):
|
|
22
20
|
model_config = base_config
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class LogEvent(Base):
|
|
26
|
-
pod: str
|
|
27
|
-
container: str
|
|
28
|
-
text: str
|
|
29
|
-
timestamp: datetime
|
|
@@ -1,3 +1,7 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from pydantic import model_validator
|
|
4
|
+
|
|
1
5
|
from .base import Base
|
|
2
6
|
|
|
3
7
|
|
|
@@ -5,8 +9,18 @@ class ProjectSummary(Base):
|
|
|
5
9
|
"""Summary of a project with deployment count"""
|
|
6
10
|
|
|
7
11
|
project_id: str
|
|
12
|
+
project_name: str
|
|
8
13
|
deployment_count: int
|
|
9
14
|
|
|
15
|
+
@model_validator(mode="before")
|
|
16
|
+
@classmethod
|
|
17
|
+
def set_default_project_name(cls, data: Any) -> Any:
|
|
18
|
+
if isinstance(data, dict):
|
|
19
|
+
if "project_name" not in data or data.get("project_name") is None:
|
|
20
|
+
if "project_id" in data:
|
|
21
|
+
data["project_name"] = data["project_id"]
|
|
22
|
+
return data
|
|
23
|
+
|
|
10
24
|
|
|
11
25
|
class ProjectsListResponse(Base):
|
|
12
26
|
"""Response model for listing projects with deployment counts"""
|
|
@@ -1,9 +1,13 @@
|
|
|
1
|
-
from ._abstract_deployments_service import
|
|
1
|
+
from ._abstract_deployments_service import (
|
|
2
|
+
AbstractDeploymentsService,
|
|
3
|
+
AbstractPublicDeploymentsService,
|
|
4
|
+
)
|
|
2
5
|
from ._create_deployments_router import create_v1beta1_deployments_router
|
|
3
6
|
from ._exceptions import DeploymentNotFoundError, ReplicaSetNotFoundError
|
|
4
7
|
|
|
5
8
|
__all__ = [
|
|
6
9
|
"AbstractDeploymentsService",
|
|
10
|
+
"AbstractPublicDeploymentsService",
|
|
7
11
|
"create_v1beta1_deployments_router",
|
|
8
12
|
"DeploymentNotFoundError",
|
|
9
13
|
"ReplicaSetNotFoundError",
|
|
@@ -2,11 +2,27 @@ from abc import ABC, abstractmethod
|
|
|
2
2
|
from typing import AsyncGenerator, cast
|
|
3
3
|
|
|
4
4
|
from llama_deploy.core import schema
|
|
5
|
-
from llama_deploy.core.schema
|
|
5
|
+
from llama_deploy.core.schema import LogEvent
|
|
6
6
|
from llama_deploy.core.schema.deployments import DeploymentResponse
|
|
7
7
|
|
|
8
8
|
|
|
9
|
+
class AbstractPublicDeploymentsService(ABC):
|
|
10
|
+
@abstractmethod
|
|
11
|
+
async def get_version(self) -> schema.VersionResponse:
|
|
12
|
+
"""
|
|
13
|
+
Get the version of the server
|
|
14
|
+
"""
|
|
15
|
+
...
|
|
16
|
+
|
|
17
|
+
|
|
9
18
|
class AbstractDeploymentsService(ABC):
|
|
19
|
+
@abstractmethod
|
|
20
|
+
async def get_projects(self) -> schema.ProjectsListResponse:
|
|
21
|
+
"""
|
|
22
|
+
Get a list of projects
|
|
23
|
+
"""
|
|
24
|
+
...
|
|
25
|
+
|
|
10
26
|
@abstractmethod
|
|
11
27
|
async def validate_repository(
|
|
12
28
|
self,
|
|
@@ -1,14 +1,16 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
from typing import Awaitable, Callable
|
|
3
3
|
|
|
4
|
-
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
|
4
|
+
from fastapi import APIRouter, Depends, HTTPException, Request, Response, params
|
|
5
5
|
from fastapi.params import Query
|
|
6
6
|
from fastapi.responses import StreamingResponse
|
|
7
|
-
from llama_deploy.control_plane import k8s_client
|
|
8
7
|
from llama_deploy.core import schema
|
|
9
8
|
from typing_extensions import Annotated
|
|
10
9
|
|
|
11
|
-
from ._abstract_deployments_service import
|
|
10
|
+
from ._abstract_deployments_service import (
|
|
11
|
+
AbstractDeploymentsService,
|
|
12
|
+
AbstractPublicDeploymentsService,
|
|
13
|
+
)
|
|
12
14
|
from ._exceptions import DeploymentNotFoundError, ReplicaSetNotFoundError
|
|
13
15
|
|
|
14
16
|
logger = logging.getLogger(__name__)
|
|
@@ -20,15 +22,29 @@ async def get_project_id(project_id: Annotated[str, Query()]) -> str:
|
|
|
20
22
|
|
|
21
23
|
def create_v1beta1_deployments_router(
|
|
22
24
|
deployments_service: AbstractDeploymentsService,
|
|
25
|
+
public_service: AbstractPublicDeploymentsService,
|
|
23
26
|
get_project_id: Callable[[str], Awaitable[str]] = get_project_id,
|
|
27
|
+
dependencies: list[params.Depends] = [],
|
|
28
|
+
public_dependencies: list[params.Depends] = [],
|
|
24
29
|
) -> APIRouter:
|
|
25
|
-
|
|
30
|
+
base_router = APIRouter(prefix="/api/v1beta1")
|
|
31
|
+
public_router = APIRouter(
|
|
32
|
+
tags=["v1beta1-deployments-public"],
|
|
33
|
+
dependencies=public_dependencies,
|
|
34
|
+
)
|
|
35
|
+
router = APIRouter(
|
|
36
|
+
tags=["v1beta1-deployments"],
|
|
37
|
+
dependencies=dependencies,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
@public_router.get("/version")
|
|
41
|
+
async def get_version() -> schema.VersionResponse:
|
|
42
|
+
return await public_service.get_version()
|
|
26
43
|
|
|
27
44
|
@router.get("/list-projects")
|
|
28
45
|
async def get_projects() -> schema.ProjectsListResponse:
|
|
29
46
|
"""Get all unique projects with their deployment counts"""
|
|
30
|
-
|
|
31
|
-
return schema.ProjectsListResponse(projects=projects_data)
|
|
47
|
+
return await deployments_service.get_projects()
|
|
32
48
|
|
|
33
49
|
@router.post("/validate-repository")
|
|
34
50
|
async def validate_repository(
|
|
@@ -164,4 +180,6 @@ def create_v1beta1_deployments_router(
|
|
|
164
180
|
# Deployment exists but hasn't created a ReplicaSet yet
|
|
165
181
|
raise HTTPException(status_code=409, detail=str(e))
|
|
166
182
|
|
|
167
|
-
|
|
183
|
+
base_router.include_router(public_router, prefix="/deployments-public")
|
|
184
|
+
base_router.include_router(router, prefix="/deployments")
|
|
185
|
+
return base_router
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: llama-deploy-core
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.0a14
|
|
4
4
|
Summary: Core models and schemas for LlamaDeploy
|
|
5
5
|
License: MIT
|
|
6
6
|
Requires-Dist: fastapi>=0.115.0
|
|
7
|
+
Requires-Dist: overrides>=7.7.0
|
|
7
8
|
Requires-Dist: pydantic>=2.0.0
|
|
8
9
|
Requires-Dist: pyyaml>=6.0.2
|
|
9
10
|
Requires-Dist: types-pyyaml>=6.0.12.20250822
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
llama_deploy/core/__init__.py,sha256=112612bf2e928c2e0310d6556bb13fc28c00db70297b90a8527486cd2562e408,43
|
|
2
|
+
llama_deploy/core/client/manage_client.py,sha256=0f2f63c0d5ba657580af758edd5c5f07602061d7ae13a5964ff82b71127d8324,8542
|
|
3
|
+
llama_deploy/core/config.py,sha256=69bb0ea8ac169eaa4e808cd60a098b616bddd3145d26c6c35e56db38496b0e6a,35
|
|
4
|
+
llama_deploy/core/deployment_config.py,sha256=b052fa66bd140fa39dea2de6cda362b39f8aca22f9c1cc0011c9f4a025263d0c,15117
|
|
5
|
+
llama_deploy/core/git/git_util.py,sha256=c581c1da13871b4e89eda58f56ddb074139454c06ae9b04c0b396fdb2b9a5176,9193
|
|
6
|
+
llama_deploy/core/path_util.py,sha256=14d50c0c337c8450ed46cafc88436027056b365a48370a69cdb76c88d7c26fd1,798
|
|
7
|
+
llama_deploy/core/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
|
|
8
|
+
llama_deploy/core/schema/__init__.py,sha256=d1459ee50c690779a682130eff72f61dc1a687d2d4b26d8a5d3620a72d92d831,802
|
|
9
|
+
llama_deploy/core/schema/base.py,sha256=2de6d23e58c36b6bb311ec0aea4b902661867056c1250c6b7ce3bad17141fe15,677
|
|
10
|
+
llama_deploy/core/schema/deployments.py,sha256=d9254d9a478d7aeaf3d28ec6205215ea892dcfb245966d726d69d0418e03b03d,6486
|
|
11
|
+
llama_deploy/core/schema/git_validation.py,sha256=27b306aa6ecabe58cab6381d92551545f263fe7550c58b3087115410bc71fd21,1915
|
|
12
|
+
llama_deploy/core/schema/projects.py,sha256=726f91e90ff8699c90861d9740819c44c3f00d945ab09df71bd6d35fdc218a45,726
|
|
13
|
+
llama_deploy/core/schema/public.py,sha256=022129c8fc09192f5e503b0500ccf54d106f5712b9cf8ce84b3b1c37e186f930,147
|
|
14
|
+
llama_deploy/core/server/manage_api/__init__.py,sha256=e477ccab59cfd084edbad46f209972a282e623eb314d0847a754a46a16361db5,457
|
|
15
|
+
llama_deploy/core/server/manage_api/_abstract_deployments_service.py,sha256=85ceab2a343c3642db7f77d4a665d5710a14bca920bbfdc25c5f1168cce30b22,4638
|
|
16
|
+
llama_deploy/core/server/manage_api/_create_deployments_router.py,sha256=cde496f5922a74ae6199224abb25aefa1a357bb45c06ce25de79283d6f1a7174,6622
|
|
17
|
+
llama_deploy/core/server/manage_api/_exceptions.py,sha256=ee71cd9c2354a665e6905cd9cc752d2d65f71f0b936d33fec3c1c5229c38accf,246
|
|
18
|
+
llama_deploy/core/ui_build.py,sha256=290dafa951918e5593b9035570fa4c66791d7e5ea785bd372ad11e99e8283857,1514
|
|
19
|
+
llama_deploy_core-0.3.0a14.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
20
|
+
llama_deploy_core-0.3.0a14.dist-info/METADATA,sha256=8d13ddc1e6b6ec292b5715931f95f41ac5733af8d74183bd3fb423df80476d4f,659
|
|
21
|
+
llama_deploy_core-0.3.0a14.dist-info/RECORD,,
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
llama_deploy/core/__init__.py,sha256=112612bf2e928c2e0310d6556bb13fc28c00db70297b90a8527486cd2562e408,43
|
|
2
|
-
llama_deploy/core/client/manage_client.py,sha256=c098a12def4ece9897d1d5d70c69bf2bd7140de5ec970cae30a5927901e2a3da,7499
|
|
3
|
-
llama_deploy/core/config.py,sha256=69bb0ea8ac169eaa4e808cd60a098b616bddd3145d26c6c35e56db38496b0e6a,35
|
|
4
|
-
llama_deploy/core/deployment_config.py,sha256=ff10cc96f2c64abc4761eb83c5372fd22f3770159b45503818264723b578de4e,15092
|
|
5
|
-
llama_deploy/core/git/git_util.py,sha256=c581c1da13871b4e89eda58f56ddb074139454c06ae9b04c0b396fdb2b9a5176,9193
|
|
6
|
-
llama_deploy/core/path_util.py,sha256=14d50c0c337c8450ed46cafc88436027056b365a48370a69cdb76c88d7c26fd1,798
|
|
7
|
-
llama_deploy/core/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
|
|
8
|
-
llama_deploy/core/schema/__init__.py,sha256=cc60a6fb54983d7ca13e2cc86d414a0d006a79c20e44344701f9fbe3b1d21577,739
|
|
9
|
-
llama_deploy/core/schema/base.py,sha256=c02e33e35e7e4540b3065a82267febeb6da169222210a1d1c2479f6a7f1c6a4b,802
|
|
10
|
-
llama_deploy/core/schema/deployments.py,sha256=1e310548f6847ee000b06d655b3fec006148bd2994b4c6b7d073582a7c312ec1,6392
|
|
11
|
-
llama_deploy/core/schema/git_validation.py,sha256=27b306aa6ecabe58cab6381d92551545f263fe7550c58b3087115410bc71fd21,1915
|
|
12
|
-
llama_deploy/core/schema/projects.py,sha256=c97eda38207d80354c2ee3a237cba9c3f6838148197cfa2d97b9a18d3da1a38b,294
|
|
13
|
-
llama_deploy/core/server/manage_api/__init__.py,sha256=ed814d76fcade150d43205631b12bcae0b06bc2b8456a81fee24d6cf867adbc8,370
|
|
14
|
-
llama_deploy/core/server/manage_api/_abstract_deployments_service.py,sha256=1bb1fbe904f84f892f092fb82d931cb2c2ca6a7563a64fb6ab52be21c02d75e5,4290
|
|
15
|
-
llama_deploy/core/server/manage_api/_create_deployments_router.py,sha256=9b3fd2eeae027e3095da4db1785cdcc971b49023de615efe13b0a9e5f649982d,6081
|
|
16
|
-
llama_deploy/core/server/manage_api/_exceptions.py,sha256=ee71cd9c2354a665e6905cd9cc752d2d65f71f0b936d33fec3c1c5229c38accf,246
|
|
17
|
-
llama_deploy/core/ui_build.py,sha256=290dafa951918e5593b9035570fa4c66791d7e5ea785bd372ad11e99e8283857,1514
|
|
18
|
-
llama_deploy_core-0.3.0a12.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
|
|
19
|
-
llama_deploy_core-0.3.0a12.dist-info/METADATA,sha256=b224c05824f8f1d3edaba70f2252be8874e7d98ec588dc7269f53e14b0ae2bc2,627
|
|
20
|
-
llama_deploy_core-0.3.0a12.dist-info/RECORD,,
|
|
File without changes
|