llama-deploy-core 0.3.0a19__tar.gz → 0.3.0a21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/PKG-INFO +1 -1
  2. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/pyproject.toml +1 -1
  3. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/client/manage_client.py +29 -40
  4. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/git/git_util.py +66 -1
  5. llama_deploy_core-0.3.0a21/src/llama_deploy/core/iter_utils.py +196 -0
  6. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/README.md +0 -0
  7. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/__init__.py +0 -0
  8. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/config.py +0 -0
  9. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/deployment_config.py +0 -0
  10. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/path_util.py +0 -0
  11. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/py.typed +0 -0
  12. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/schema/__init__.py +0 -0
  13. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/schema/base.py +0 -0
  14. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/schema/deployments.py +0 -0
  15. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/schema/git_validation.py +0 -0
  16. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/schema/projects.py +0 -0
  17. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/schema/public.py +0 -0
  18. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/server/manage_api/__init__.py +0 -0
  19. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/server/manage_api/_abstract_deployments_service.py +0 -0
  20. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/server/manage_api/_create_deployments_router.py +0 -0
  21. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/server/manage_api/_exceptions.py +0 -0
  22. {llama_deploy_core-0.3.0a19 → llama_deploy_core-0.3.0a21}/src/llama_deploy/core/ui_build.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-core
3
- Version: 0.3.0a19
3
+ Version: 0.3.0a21
4
4
  Summary: Core models and schemas for LlamaDeploy
5
5
  License: MIT
6
6
  Requires-Dist: fastapi>=0.115.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llama-deploy-core"
3
- version = "0.3.0a19"
3
+ version = "0.3.0a21"
4
4
  description = "Core models and schemas for LlamaDeploy"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -19,17 +19,12 @@ from llama_deploy.core.schema.projects import ProjectsListResponse, ProjectSumma
19
19
  from llama_deploy.core.schema.public import VersionResponse
20
20
 
21
21
 
22
- class ClientError(Exception):
23
- """Base class for client errors."""
24
-
25
- def __init__(self, message: str, status_code: int | None = None) -> None:
26
- super().__init__(message)
27
- self.status_code = status_code
28
-
29
-
30
22
  class BaseClient:
31
- def __init__(self, base_url: str, api_key: str | None = None) -> None:
23
+ def __init__(
24
+ self, base_url: str, api_key: str | None = None, auth: httpx.Auth | None = None
25
+ ) -> None:
32
26
  self.base_url = base_url.rstrip("/")
27
+ self.api_key = api_key
33
28
 
34
29
  headers: dict[str, str] = {}
35
30
  if api_key:
@@ -38,33 +33,12 @@ class BaseClient:
38
33
  self.client = httpx.AsyncClient(
39
34
  base_url=self.base_url,
40
35
  headers=headers,
41
- event_hooks={"response": [self._handle_response]},
36
+ auth=auth,
42
37
  )
43
38
  self.hookless_client = httpx.AsyncClient(
44
- base_url=self.base_url, headers=headers
39
+ base_url=self.base_url, headers=headers, auth=auth
45
40
  )
46
41
 
47
- async def _handle_response(self, response: httpx.Response) -> None:
48
- try:
49
- response.raise_for_status()
50
- except httpx.HTTPStatusError as e:
51
- try:
52
- # Ensure content is loaded for JSON/text extraction
53
- await response.aread()
54
- error_data = e.response.json()
55
- if isinstance(error_data, dict) and "detail" in error_data:
56
- error_message = error_data["detail"]
57
- else:
58
- error_message = str(error_data)
59
- except (ValueError, KeyError):
60
- error_message = e.response.text
61
- raise ClientError(
62
- f"HTTP {e.response.status_code}: {error_message}",
63
- e.response.status_code,
64
- ) from e
65
- except httpx.RequestError as e:
66
- raise ClientError(f"Request failed: {e}") from e
67
-
68
42
  async def aclose(self) -> None:
69
43
  await self.client.aclose()
70
44
  await self.hookless_client.aclose()
@@ -76,9 +50,9 @@ class ControlPlaneClient(BaseClient):
76
50
  @classmethod
77
51
  @asynccontextmanager
78
52
  async def ctx(
79
- cls, base_url: str, api_key: str | None = None
53
+ cls, base_url: str, api_key: str | None = None, auth: httpx.Auth | None = None
80
54
  ) -> AsyncIterator[ControlPlaneClient]:
81
- client = cls(base_url, api_key)
55
+ client = cls(base_url, api_key, auth)
82
56
  try:
83
57
  yield client
84
58
  finally:
@@ -87,15 +61,19 @@ class ControlPlaneClient(BaseClient):
87
61
  except Exception:
88
62
  pass
89
63
 
90
- def __init__(self, base_url: str, api_key: str | None = None) -> None:
91
- super().__init__(base_url, api_key)
64
+ def __init__(
65
+ self, base_url: str, api_key: str | None = None, auth: httpx.Auth | None = None
66
+ ) -> None:
67
+ super().__init__(base_url, api_key, auth)
92
68
 
93
69
  async def server_version(self) -> VersionResponse:
94
70
  response = await self.client.get("/api/v1beta1/deployments-public/version")
71
+ response.raise_for_status()
95
72
  return VersionResponse.model_validate(response.json())
96
73
 
97
74
  async def list_projects(self) -> List[ProjectSummary]:
98
75
  response = await self.client.get("/api/v1beta1/deployments/list-projects")
76
+ response.raise_for_status()
99
77
  projects_response = ProjectsListResponse.model_validate(response.json())
100
78
  return [project for project in projects_response.projects]
101
79
 
@@ -106,9 +84,13 @@ class ProjectClient(BaseClient):
106
84
  @classmethod
107
85
  @asynccontextmanager
108
86
  async def ctx(
109
- cls, base_url: str, project_id: str, api_key: str | None = None
87
+ cls,
88
+ base_url: str,
89
+ project_id: str,
90
+ api_key: str | None = None,
91
+ auth: httpx.Auth | None = None,
110
92
  ) -> AsyncIterator[ProjectClient]:
111
- client = cls(base_url, project_id, api_key)
93
+ client = cls(base_url, project_id, api_key, auth)
112
94
  try:
113
95
  yield client
114
96
  finally:
@@ -122,8 +104,9 @@ class ProjectClient(BaseClient):
122
104
  base_url: str,
123
105
  project_id: str,
124
106
  api_key: str | None = None,
107
+ auth: httpx.Auth | None = None,
125
108
  ) -> None:
126
- super().__init__(base_url, api_key)
109
+ super().__init__(base_url, api_key, auth)
127
110
  self.project_id = project_id
128
111
 
129
112
  async def list_deployments(self) -> List[DeploymentResponse]:
@@ -131,6 +114,7 @@ class ProjectClient(BaseClient):
131
114
  "/api/v1beta1/deployments",
132
115
  params={"project_id": self.project_id},
133
116
  )
117
+ response.raise_for_status()
134
118
  deployments_response = DeploymentsListResponse.model_validate(response.json())
135
119
  return [deployment for deployment in deployments_response.deployments]
136
120
 
@@ -141,6 +125,7 @@ class ProjectClient(BaseClient):
141
125
  f"/api/v1beta1/deployments/{deployment_id}",
142
126
  params={"project_id": self.project_id, "include_events": include_events},
143
127
  )
128
+ response.raise_for_status()
144
129
  return DeploymentResponse.model_validate(response.json())
145
130
 
146
131
  async def create_deployment(
@@ -151,13 +136,15 @@ class ProjectClient(BaseClient):
151
136
  params={"project_id": self.project_id},
152
137
  json=deployment_data.model_dump(exclude_none=True),
153
138
  )
139
+ response.raise_for_status()
154
140
  return DeploymentResponse.model_validate(response.json())
155
141
 
156
142
  async def delete_deployment(self, deployment_id: str) -> None:
157
- await self.client.delete(
143
+ response = await self.client.delete(
158
144
  f"/api/v1beta1/deployments/{deployment_id}",
159
145
  params={"project_id": self.project_id},
160
146
  )
147
+ response.raise_for_status()
161
148
 
162
149
  async def update_deployment(
163
150
  self,
@@ -169,6 +156,7 @@ class ProjectClient(BaseClient):
169
156
  params={"project_id": self.project_id},
170
157
  json=update_data.model_dump(),
171
158
  )
159
+ response.raise_for_status()
172
160
  return DeploymentResponse.model_validate(response.json())
173
161
 
174
162
  async def validate_repository(
@@ -186,6 +174,7 @@ class ProjectClient(BaseClient):
186
174
  pat=pat,
187
175
  ).model_dump(),
188
176
  )
177
+ response.raise_for_status()
189
178
  return RepositoryValidationResponse.model_validate(response.json())
190
179
 
191
180
  async def stream_deployment_logs(
@@ -157,7 +157,9 @@ def clone_repo(
157
157
  _run_process(
158
158
  ["git", "fetch", "origin"], cwd=str(dest_dir.absolute())
159
159
  )
160
- _run_process(["git", "checkout", git_ref], cwd=str(dest_dir.absolute()))
160
+ _run_process(
161
+ ["git", "checkout", git_ref, "--"], cwd=str(dest_dir.absolute())
162
+ )
161
163
  # if no ref, stay on whatever the clone gave us/current commit
162
164
  # return the resolved sha
163
165
  resolved_sha = _run_process(
@@ -269,3 +271,66 @@ def get_git_root() -> Path:
269
271
  """
270
272
  result = _run_process(["git", "rev-parse", "--show-toplevel"])
271
273
  return Path(result.strip())
274
+
275
+
276
+ def working_tree_has_changes() -> bool:
277
+ """
278
+ Returns True if the working tree has uncommitted or untracked changes.
279
+ Safe to call; returns False if unable to determine.
280
+ """
281
+ try:
282
+ result = subprocess.run(
283
+ ["git", "status", "--porcelain"],
284
+ capture_output=True,
285
+ text=True,
286
+ check=False,
287
+ timeout=30,
288
+ )
289
+ return bool((result.stdout or "").strip())
290
+ except Exception:
291
+ return False
292
+
293
+
294
+ def get_unpushed_commits_count() -> int | None:
295
+ """
296
+ Returns the number of local commits ahead of the upstream.
297
+
298
+ - Returns an integer >= 0 when an upstream is configured
299
+ - Returns None when no upstream is configured
300
+ - Returns 0 if the status cannot be determined
301
+ """
302
+ try:
303
+ upstream = subprocess.run(
304
+ [
305
+ "git",
306
+ "rev-parse",
307
+ "--abbrev-ref",
308
+ "--symbolic-full-name",
309
+ "@{u}",
310
+ ],
311
+ capture_output=True,
312
+ text=True,
313
+ check=False,
314
+ timeout=30,
315
+ )
316
+ if upstream.returncode != 0:
317
+ return None
318
+
319
+ ahead_behind = subprocess.run(
320
+ ["git", "rev-list", "--left-right", "--count", "@{u}...HEAD"],
321
+ capture_output=True,
322
+ text=True,
323
+ check=False,
324
+ timeout=30,
325
+ )
326
+ output = (ahead_behind.stdout or "").strip()
327
+ if not output:
328
+ return 0
329
+ parts = output.split()
330
+ if len(parts) >= 2:
331
+ # format: behind ahead
332
+ ahead_count = int(parts[1])
333
+ return ahead_count
334
+ return 0
335
+ except Exception:
336
+ return 0
@@ -0,0 +1,196 @@
1
+ """Iterator utilities for buffering, sorting, and debouncing streams."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import time
7
+ from typing import Any, AsyncGenerator, Callable, TypeVar
8
+
9
+ from typing_extensions import Literal
10
+
11
+ T = TypeVar("T")
12
+
13
+
14
+ async def debounced_sorted_prefix(
15
+ inner: AsyncGenerator[T, None],
16
+ *,
17
+ key: Callable[[T], Any],
18
+ debounce_seconds: float = 0.1,
19
+ max_window_seconds: float = 0.1,
20
+ ) -> AsyncGenerator[T, None]:
21
+ """Yield a stream where the initial burst is sorted, then passthrough.
22
+
23
+ Behavior:
24
+ - Buffer early items and sort them by the provided key.
25
+ - Flush the buffer when either:
26
+ - No new item arrives for `debounce_seconds`, or
27
+ - `max_window_seconds` elapses from the first buffered item, or
28
+ - After the first flush, subsequent items are yielded passthrough, in arrival order.
29
+
30
+ This async variant uses an asyncio.Queue and a background task to pump `inner`.
31
+ """
32
+
33
+ buffer: list[T] = []
34
+ debouncer = Debouncer(debounce_seconds, max_window_seconds)
35
+ merged = merge_generators(inner, debouncer.aiter())
36
+
37
+ async for item in merged:
38
+ if item == "__COMPLETE__":
39
+ buffer.sort(key=key)
40
+ for item in buffer:
41
+ yield item
42
+ buffer = []
43
+ else:
44
+ if debouncer.is_complete:
45
+ yield item
46
+ else:
47
+ debouncer.extend_window()
48
+ buffer.append(item)
49
+
50
+
51
+ COMPLETE = Literal["__COMPLETE__"]
52
+
53
+
54
+ async def merge_generators(
55
+ *generators: AsyncGenerator[T, None],
56
+ stop_on_first_completion: bool = False,
57
+ ) -> AsyncGenerator[T, None]:
58
+ """
59
+ Merge multiple async iterators into a single async iterator, yielding items as
60
+ soon as any source produces them.
61
+
62
+ - If stop_on_first_completion is False (default), continues until all inputs are exhausted.
63
+ - If stop_on_first_completion is True, stops as soon as any input completes.
64
+ - Propagates exceptions from any input immediately.
65
+ """
66
+ if not generators:
67
+ return
68
+
69
+ active_generators: dict[int, AsyncGenerator[T, None]] = {
70
+ index: gen for index, gen in enumerate(generators)
71
+ }
72
+
73
+ next_item_tasks: dict[int, asyncio.Task[T]] = {}
74
+ exception_to_raise: BaseException | None = None
75
+ stopped_on_first_completion = False
76
+
77
+ # Prime one pending task per generator to maintain fairness
78
+ for index, gen in active_generators.items():
79
+ next_item_tasks[index] = asyncio.create_task(anext(gen))
80
+
81
+ try:
82
+ while next_item_tasks and exception_to_raise is None:
83
+ done, _ = await asyncio.wait(
84
+ set(next_item_tasks.values()),
85
+ return_when=asyncio.FIRST_COMPLETED,
86
+ )
87
+
88
+ for finished in done:
89
+ # Locate which generator this task belonged to
90
+ task_index: int | None = None
91
+ for index, task in next_item_tasks.items():
92
+ if task is finished:
93
+ task_index = index
94
+ break
95
+
96
+ if task_index is None:
97
+ # Should not happen, but continue defensively
98
+ continue
99
+
100
+ try:
101
+ value = finished.result()
102
+ except StopAsyncIteration:
103
+ # Generator exhausted
104
+ if stop_on_first_completion:
105
+ stopped_on_first_completion = True
106
+ # Break out of the inner loop; the outer loop will
107
+ # observe the stop flag and exit to the finally block
108
+ # where pending tasks are cancelled and generators closed.
109
+ break
110
+ else:
111
+ next_item_tasks.pop(task_index, None)
112
+ active_generators.pop(task_index, None)
113
+ continue
114
+ except Exception as exc: # noqa: BLE001 - propagate specific generator error
115
+ exception_to_raise = exc
116
+ break
117
+ else:
118
+ # Remove the finished task before yielding
119
+ next_item_tasks.pop(task_index, None)
120
+ yield value
121
+ # Schedule the next item fetch for this generator
122
+ gen = active_generators.get(task_index)
123
+ if gen is not None:
124
+ next_item_tasks[task_index] = asyncio.create_task(anext(gen))
125
+ # If we are configured to stop on first completion and observed one,
126
+ # exit the outer loop to perform cleanup in the finally block.
127
+ if stopped_on_first_completion:
128
+ break
129
+ finally:
130
+ # Ensure we do not leak tasks or open generators
131
+ for task in next_item_tasks.values():
132
+ task.cancel()
133
+ if next_item_tasks:
134
+ try:
135
+ await asyncio.gather(*next_item_tasks.values(), return_exceptions=True)
136
+ except Exception:
137
+ pass
138
+ for gen in active_generators.values():
139
+ try:
140
+ await gen.aclose()
141
+ except Exception:
142
+ pass
143
+
144
+ if exception_to_raise is not None:
145
+ raise exception_to_raise
146
+ if stopped_on_first_completion:
147
+ return
148
+
149
+
150
+ class Debouncer:
151
+ """
152
+ Continually extends a complete time while extend is called, up to a max window.
153
+ Exposes methods that notify on completion
154
+ """
155
+
156
+ def __init__(
157
+ self,
158
+ debounce_seconds: float = 0.1,
159
+ max_window_seconds: float = 1,
160
+ get_time: Callable[[], float] = time.monotonic,
161
+ ):
162
+ self.debounce_seconds = debounce_seconds
163
+ self.max_window_seconds = max_window_seconds
164
+ self.complete_signal = asyncio.Event()
165
+ self.get_time = get_time
166
+ self.start_time = self.get_time()
167
+ self.complete_time = self.start_time + self.debounce_seconds
168
+ self.max_complete_time = self.start_time + self.max_window_seconds
169
+ asyncio.create_task(self._loop())
170
+
171
+ async def _loop(self):
172
+ while not self.complete_signal.is_set():
173
+ now = self.get_time()
174
+ remaining = min(self.complete_time, self.max_complete_time) - now
175
+ if remaining <= 0:
176
+ self.complete_signal.set()
177
+ else:
178
+ await asyncio.sleep(remaining)
179
+
180
+ @property
181
+ def is_complete(self) -> bool:
182
+ return self.complete_signal.is_set()
183
+
184
+ def extend_window(self) -> None:
185
+ """Mark a new item has arrived, extending the debounce window."""
186
+ now = self.get_time()
187
+ self.complete_time = now + self.debounce_seconds
188
+
189
+ async def wait(self) -> None:
190
+ """Wait for the debounce window to expire, or the max window to elapse."""
191
+ await self.complete_signal.wait()
192
+
193
+ async def aiter(self) -> AsyncGenerator[COMPLETE, None]:
194
+ """Yield a stream that emits an element when the wait event occurs."""
195
+ await self.wait()
196
+ yield "__COMPLETE__"