llama-deploy-core 0.3.6__py3-none-any.whl → 0.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -68,16 +68,56 @@ class ControlPlaneClient(BaseClient):
68
68
 
69
69
  async def server_version(self) -> VersionResponse:
70
70
  response = await self.client.get("/api/v1beta1/deployments-public/version")
71
- response.raise_for_status()
71
+ _raise_for_status(response)
72
72
  return VersionResponse.model_validate(response.json())
73
73
 
74
74
  async def list_projects(self) -> List[ProjectSummary]:
75
75
  response = await self.client.get("/api/v1beta1/deployments/list-projects")
76
- response.raise_for_status()
76
+ _raise_for_status(response)
77
77
  projects_response = ProjectsListResponse.model_validate(response.json())
78
78
  return [project for project in projects_response.projects]
79
79
 
80
80
 
81
+ def _raise_for_status(response: httpx.Response) -> None:
82
+ """
83
+ Custom raise for status that adds response body information to the error message, but still uses the httpx
84
+ error classes
85
+ """
86
+ try:
87
+ response.raise_for_status()
88
+ except httpx.HTTPStatusError as e:
89
+ body = _response_body_snippet(response, limit=250)
90
+ request_id = response.headers.get("x-request-id") or response.headers.get(
91
+ "x-correlation-id"
92
+ )
93
+ rid = f" [request id: {request_id}]" if request_id else ""
94
+ body_part = f" - {body}" if body else ""
95
+ raise httpx.HTTPStatusError(
96
+ f"HTTP {response.status_code} for url {response.url}{body_part}{rid}",
97
+ request=e.request or response.request,
98
+ response=e.response or response,
99
+ )
100
+
101
+
102
+ def _response_body_snippet(response: httpx.Response, limit: int = 500) -> str:
103
+ try:
104
+ text = response.text
105
+ if not text:
106
+ # fallback attempt if body not read
107
+ try:
108
+ data = response.json()
109
+ except Exception:
110
+ data = None
111
+ if data is not None:
112
+ text = str(data)
113
+ text = (text or "").strip()
114
+ if len(text) > limit:
115
+ return text[: limit - 3] + "..."
116
+ return text
117
+ except Exception:
118
+ return ""
119
+
120
+
81
121
  class ProjectClient(BaseClient):
82
122
  """Project-scoped client for deployment operations."""
83
123
 
@@ -114,7 +154,7 @@ class ProjectClient(BaseClient):
114
154
  "/api/v1beta1/deployments",
115
155
  params={"project_id": self.project_id},
116
156
  )
117
- response.raise_for_status()
157
+ _raise_for_status(response)
118
158
  deployments_response = DeploymentsListResponse.model_validate(response.json())
119
159
  return [deployment for deployment in deployments_response.deployments]
120
160
 
@@ -125,7 +165,7 @@ class ProjectClient(BaseClient):
125
165
  f"/api/v1beta1/deployments/{deployment_id}",
126
166
  params={"project_id": self.project_id, "include_events": include_events},
127
167
  )
128
- response.raise_for_status()
168
+ _raise_for_status(response)
129
169
  return DeploymentResponse.model_validate(response.json())
130
170
 
131
171
  async def create_deployment(
@@ -136,7 +176,7 @@ class ProjectClient(BaseClient):
136
176
  params={"project_id": self.project_id},
137
177
  json=deployment_data.model_dump(exclude_none=True),
138
178
  )
139
- response.raise_for_status()
179
+ _raise_for_status(response)
140
180
  return DeploymentResponse.model_validate(response.json())
141
181
 
142
182
  async def delete_deployment(self, deployment_id: str) -> None:
@@ -144,7 +184,7 @@ class ProjectClient(BaseClient):
144
184
  f"/api/v1beta1/deployments/{deployment_id}",
145
185
  params={"project_id": self.project_id},
146
186
  )
147
- response.raise_for_status()
187
+ _raise_for_status(response)
148
188
 
149
189
  async def update_deployment(
150
190
  self,
@@ -156,7 +196,7 @@ class ProjectClient(BaseClient):
156
196
  params={"project_id": self.project_id},
157
197
  json=update_data.model_dump(),
158
198
  )
159
- response.raise_for_status()
199
+ _raise_for_status(response)
160
200
  return DeploymentResponse.model_validate(response.json())
161
201
 
162
202
  async def validate_repository(
@@ -174,7 +214,7 @@ class ProjectClient(BaseClient):
174
214
  pat=pat,
175
215
  ).model_dump(),
176
216
  )
177
- response.raise_for_status()
217
+ _raise_for_status(response)
178
218
  return RepositoryValidationResponse.model_validate(response.json())
179
219
 
180
220
  async def stream_deployment_logs(
@@ -204,7 +244,7 @@ class ProjectClient(BaseClient):
204
244
  async with self.hookless_client.stream(
205
245
  "GET", url, params=params, headers=headers, timeout=None
206
246
  ) as response:
207
- response.raise_for_status()
247
+ _raise_for_status(response)
208
248
 
209
249
  event_name: str | None = None
210
250
  data_lines: list[str] = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-core
3
- Version: 0.3.6
3
+ Version: 0.3.8
4
4
  Summary: Core models and schemas for LlamaDeploy
5
5
  License: MIT
6
6
  Requires-Dist: fastapi>=0.115.0
@@ -1,5 +1,5 @@
1
1
  llama_deploy/core/__init__.py,sha256=112612bf2e928c2e0310d6556bb13fc28c00db70297b90a8527486cd2562e408,43
2
- llama_deploy/core/client/manage_client.py,sha256=98715e323336fc9bc2730ecf585d6f9b862690fd6712998434cf9444d7bd47bc,7974
2
+ llama_deploy/core/client/manage_client.py,sha256=b54b276519247279c02cd70e0aa965a06cd574b4e3a808e219c40fa8a2d940fe,9326
3
3
  llama_deploy/core/config.py,sha256=69bb0ea8ac169eaa4e808cd60a098b616bddd3145d26c6c35e56db38496b0e6a,35
4
4
  llama_deploy/core/deployment_config.py,sha256=bde431070758421f578f2e27f006152147e8cd752ee1054f1bf7c37ca95b0b38,15853
5
5
  llama_deploy/core/git/git_util.py,sha256=e62a5479c619a5973de203ebcc56b9729b5060c48fcb9cfc2e442756716c2abf,10960
@@ -17,6 +17,6 @@ llama_deploy/core/server/manage_api/_abstract_deployments_service.py,sha256=85ce
17
17
  llama_deploy/core/server/manage_api/_create_deployments_router.py,sha256=9bc8468169445e1cc7f2a479e1c7da42b4bdd7482fa3b440e03ee49cd09a75df,6801
18
18
  llama_deploy/core/server/manage_api/_exceptions.py,sha256=ee71cd9c2354a665e6905cd9cc752d2d65f71f0b936d33fec3c1c5229c38accf,246
19
19
  llama_deploy/core/ui_build.py,sha256=290dafa951918e5593b9035570fa4c66791d7e5ea785bd372ad11e99e8283857,1514
20
- llama_deploy_core-0.3.6.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
21
- llama_deploy_core-0.3.6.dist-info/METADATA,sha256=b5a36b0fc6cda4812349ba81ade1d6de11c726675dc4b5a14d611f0399afb42e,663
22
- llama_deploy_core-0.3.6.dist-info/RECORD,,
20
+ llama_deploy_core-0.3.8.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
21
+ llama_deploy_core-0.3.8.dist-info/METADATA,sha256=be98967a3fa869e9ff8f72856c1720f061262a24d0028904fbdcfb286e3ec829,663
22
+ llama_deploy_core-0.3.8.dist-info/RECORD,,