llama-deploy-core 0.3.22__py3-none-any.whl → 0.3.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,31 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import sys
5
+ from collections.abc import Mapping
6
+ from typing import Any, BinaryIO
7
+
8
+ if sys.version_info >= (3, 11):
9
+ # Stdlib TOML parser (Python 3.11+)
10
+ import tomllib as _toml_backend # type: ignore[import-not-found]
11
+ else:
12
+ # Lightweight TOML backport for Python 3.10
13
+ import tomli as _toml_backend # type: ignore[import-not-found]
14
+
15
+
16
+ def get_logging_level_mapping() -> Mapping[str, int]:
17
+ """Return a mapping of log level names to their numeric values."""
18
+ if sys.version_info >= (3, 11):
19
+ mapping = logging.getLevelNamesMapping()
20
+ return {k: int(v) for k, v in mapping.items() if isinstance(v, int)}
21
+
22
+ return {
23
+ name: level
24
+ for name, level in logging._nameToLevel.items() # type: ignore[attr-defined]
25
+ if isinstance(level, int)
26
+ }
27
+
28
+
29
+ def load_toml_file(file_obj: BinaryIO) -> dict[str, Any]:
30
+ """Load TOML data from a binary file object in a version-agnostic way."""
31
+ return _toml_backend.load(file_obj)
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from contextlib import asynccontextmanager
4
- from typing import AsyncIterator, Callable, List
4
+ from typing import AsyncGenerator, AsyncIterator, Callable, List
5
5
 
6
6
  import httpx
7
+ from httpx._types import PrimitiveData
7
8
  from llama_deploy.core.client.ssl_util import get_httpx_verify_param
8
9
  from llama_deploy.core.schema import LogEvent
9
10
  from llama_deploy.core.schema.deployments import (
@@ -250,25 +251,29 @@ class ProjectClient(BaseClient):
250
251
  include_init_containers: bool = False,
251
252
  since_seconds: int | None = None,
252
253
  tail_lines: int | None = None,
253
- ) -> AsyncIterator[LogEvent]:
254
+ ) -> AsyncGenerator[LogEvent, None]:
254
255
  """Stream logs as LogEvent items from the control plane using SSE.
255
256
 
256
257
  Yields `LogEvent` models until the stream ends (e.g., rollout completes).
257
258
  """
258
- params: dict[str, object] = {
259
+ params_dict: dict[str, PrimitiveData] = {
259
260
  "project_id": self.project_id,
260
261
  "include_init_containers": include_init_containers,
261
262
  }
262
263
  if since_seconds is not None:
263
- params["since_seconds"] = since_seconds
264
+ params_dict["since_seconds"] = since_seconds
264
265
  if tail_lines is not None:
265
- params["tail_lines"] = tail_lines
266
+ params_dict["tail_lines"] = tail_lines
266
267
 
267
268
  url = f"/api/v1beta1/deployments/{deployment_id}/logs"
268
269
  headers = {"Accept": "text/event-stream"}
269
270
 
270
271
  async with self.hookless_client.stream(
271
- "GET", url, params=params, headers=headers, timeout=None
272
+ "GET",
273
+ url,
274
+ params=httpx.QueryParams(params_dict),
275
+ headers=headers,
276
+ timeout=None,
272
277
  ) as response:
273
278
  _raise_for_status(response)
274
279
 
@@ -1,11 +1,11 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import json
4
- import tomllib
5
4
  from pathlib import Path
6
5
  from typing import Any, TypeVar
7
6
 
8
7
  import yaml
8
+ from llama_deploy.core._compat import load_toml_file
9
9
  from llama_deploy.core.git.git_util import get_git_root, is_git_repo
10
10
  from llama_deploy.core.path_util import validate_path_traversal
11
11
  from pydantic import BaseModel, ConfigDict, Field, ValidationError, model_validator
@@ -54,13 +54,13 @@ def read_deployment_config(source_root: Path, config_path: Path) -> "DeploymentC
54
54
  # local TOML format
55
55
  if local_toml_path.exists():
56
56
  with open(local_toml_path, "rb") as toml_file:
57
- toml_data = tomllib.load(toml_file)
57
+ toml_data = load_toml_file(toml_file)
58
58
  if isinstance(toml_data, dict):
59
59
  toml_config = DeploymentConfig.model_validate(toml_data)
60
60
  # pyproject.toml format
61
61
  elif pyproject_path.exists():
62
62
  with open(pyproject_path, "rb") as pyproject_file:
63
- pyproject = tomllib.load(pyproject_file)
63
+ pyproject = load_toml_file(pyproject_file)
64
64
  tool = pyproject.get("tool", {})
65
65
  project_name: str | None = None
66
66
  project_metadata = pyproject.get("project", {})
@@ -179,7 +179,7 @@ class DeploymentConfig(BaseModel):
179
179
  description="If true, serving locally expects Llama Cloud access and will inject credentials when possible.",
180
180
  )
181
181
  app: str | None = Field(
182
- None,
182
+ default=None,
183
183
  description="A full bundle of all workflows as an 'app'. \"path.to_import:app_name\"",
184
184
  )
185
185
  workflows: dict[str, str] = Field(
@@ -202,7 +202,7 @@ class DeploymentConfig(BaseModel):
202
202
  ),
203
203
  )
204
204
  ui: UIConfig | None = Field(
205
- None,
205
+ default=None,
206
206
  description="The UI configuration.",
207
207
  )
208
208
 
@@ -238,12 +238,12 @@ class DeploymentConfig(BaseModel):
238
238
  def is_valid(self) -> bool:
239
239
  """Check if the config is valid."""
240
240
  try:
241
- self.validate()
241
+ self.validate_config()
242
242
  return True
243
243
  except ValueError:
244
244
  return False
245
245
 
246
- def validate(self) -> None:
246
+ def validate_config(self) -> None:
247
247
  """Validate the config."""
248
248
  if self.has_no_workflows():
249
249
  raise ValueError("Config must have at least one workflow.")
@@ -276,23 +276,23 @@ class UIConfig(BaseModel):
276
276
  description="The directory containing the UI, relative to the pyproject.toml directory",
277
277
  )
278
278
  build_output_dir: str | None = Field(
279
- None,
279
+ default=None,
280
280
  description="The directory containing the built UI, relative to the pyproject.toml directory. Defaults to 'dist' relative to the ui_directory, if defined",
281
281
  )
282
282
  package_manager: str = Field(
283
- DEFAULT_UI_PACKAGE_MANAGER,
283
+ default=DEFAULT_UI_PACKAGE_MANAGER,
284
284
  description=f"The package manager to use to build the UI. Defaults to '{DEFAULT_UI_PACKAGE_MANAGER}'",
285
285
  )
286
286
  build_command: str = Field(
287
- DEFAULT_UI_BUILD_COMMAND,
287
+ default=DEFAULT_UI_BUILD_COMMAND,
288
288
  description=f"The npm script command to build the UI. Defaults to '{DEFAULT_UI_BUILD_COMMAND}' if not specified",
289
289
  )
290
290
  serve_command: str = Field(
291
- DEFAULT_UI_SERVE_COMMAND,
291
+ default=DEFAULT_UI_SERVE_COMMAND,
292
292
  description=f"The command to serve the UI. Defaults to '{DEFAULT_UI_SERVE_COMMAND}' if not specified",
293
293
  )
294
294
  proxy_port: int = Field(
295
- DEFAULT_UI_PROXY_PORT,
295
+ default=DEFAULT_UI_PROXY_PORT,
296
296
  description=f"The port to proxy the UI to. Defaults to '{DEFAULT_UI_PROXY_PORT}' if not specified",
297
297
  )
298
298
 
@@ -334,11 +334,11 @@ class ServiceSourceV0(BaseModel):
334
334
  class DerecatedService(BaseModel):
335
335
  """Configuration for a single service."""
336
336
 
337
- source: ServiceSourceV0 | None = Field(None)
338
- import_path: str | None = Field(None)
339
- env: dict[str, str] | None = Field(None)
340
- env_files: list[str] | None = Field(None)
341
- python_dependencies: list[str] | None = Field(None)
337
+ source: ServiceSourceV0 | None = Field(default=None)
338
+ import_path: str | None = Field(default=None)
339
+ env: dict[str, str] | None = Field(default=None)
340
+ env_files: list[str] | None = Field(default=None)
341
+ python_dependencies: list[str] | None = Field(default=None)
342
342
 
343
343
  @model_validator(mode="before")
344
344
  @classmethod
@@ -372,7 +372,7 @@ class DeprecatedDeploymentConfig(BaseModel):
372
372
  model_config = ConfigDict(populate_by_name=True, extra="ignore")
373
373
 
374
374
  name: str
375
- default_service: str | None = Field(None)
375
+ default_service: str | None = Field(default=None)
376
376
  services: dict[str, DerecatedService]
377
377
  ui: DerecatedService | None = None
378
378
 
@@ -37,15 +37,15 @@ async def debounced_sorted_prefix(
37
37
  async for item in merged:
38
38
  if item == "__COMPLETE__":
39
39
  buffer.sort(key=key)
40
- for item in buffer:
41
- yield item
40
+ for buffered_item in buffer:
41
+ yield buffered_item
42
42
  buffer = []
43
43
  else:
44
44
  if debouncer.is_complete:
45
- yield item
45
+ yield item # type: ignore[misc] # item is T after checking != "__COMPLETE__"
46
46
  else:
47
47
  debouncer.extend_window()
48
- buffer.append(item)
48
+ buffer.append(item) # type: ignore[arg-type] # item is T after checking != "__COMPLETE__"
49
49
 
50
50
 
51
51
  COMPLETE = Literal["__COMPLETE__"]
@@ -67,7 +67,7 @@ async def merge_generators(
67
67
  return
68
68
 
69
69
  active_generators: dict[int, AsyncGenerator[T, None]] = {
70
- index: gen for index, gen in enumerate(generators)
70
+ index: generator for index, generator in enumerate(generators)
71
71
  }
72
72
 
73
73
  next_item_tasks: dict[int, asyncio.Task[T]] = {}
@@ -75,8 +75,8 @@ async def merge_generators(
75
75
  stopped_on_first_completion = False
76
76
 
77
77
  # Prime one pending task per generator to maintain fairness
78
- for index, gen in active_generators.items():
79
- next_item_tasks[index] = asyncio.create_task(anext(gen))
78
+ for index, generator in active_generators.items():
79
+ next_item_tasks[index] = asyncio.create_task(anext(generator))
80
80
 
81
81
  try:
82
82
  while next_item_tasks and exception_to_raise is None:
@@ -119,9 +119,13 @@ async def merge_generators(
119
119
  next_item_tasks.pop(task_index, None)
120
120
  yield value
121
121
  # Schedule the next item fetch for this generator
122
- gen = active_generators.get(task_index)
123
- if gen is not None:
124
- next_item_tasks[task_index] = asyncio.create_task(anext(gen))
122
+ active_gen: AsyncGenerator[T, None] | None = active_generators.get(
123
+ task_index
124
+ )
125
+ if active_gen is not None:
126
+ next_item_tasks[task_index] = asyncio.create_task(
127
+ anext(active_gen)
128
+ )
125
129
  # If we are configured to stop on first completion and observed one,
126
130
  # exit the outer loop to perform cleanup in the finally block.
127
131
  if stopped_on_first_completion:
@@ -168,7 +172,7 @@ class Debouncer:
168
172
  self.max_complete_time = self.start_time + self.max_window_seconds
169
173
  asyncio.create_task(self._loop())
170
174
 
171
- async def _loop(self):
175
+ async def _loop(self) -> None:
172
176
  while not self.complete_signal.is_set():
173
177
  now = self.get_time()
174
178
  remaining = min(self.complete_time, self.max_complete_time) - now
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ from collections.abc import AsyncGenerator
2
3
  from typing import Awaitable, Callable
3
4
 
4
5
  from fastapi import APIRouter, Depends, HTTPException, Request, Response, params
@@ -166,7 +167,7 @@ def create_v1beta1_deployments_router(
166
167
  include_init_containers: Annotated[bool, Query()] = False,
167
168
  since_seconds: Annotated[int | None, Query()] = None,
168
169
  tail_lines: Annotated[int | None, Query()] = None,
169
- ):
170
+ ) -> StreamingResponse:
170
171
  """Stream logs for the latest ReplicaSet of a deployment.
171
172
 
172
173
  The stream ends when the latest ReplicaSet changes (e.g., a new rollout occurs).
@@ -181,7 +182,7 @@ def create_v1beta1_deployments_router(
181
182
  tail_lines=tail_lines,
182
183
  )
183
184
 
184
- async def sse_lines():
185
+ async def sse_lines() -> AsyncGenerator[str, None]:
185
186
  async for data in inner:
186
187
  yield "event: log\n"
187
188
  yield f"data: {data.model_dump_json()}\n\n"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-core
3
- Version: 0.3.22
3
+ Version: 0.3.24
4
4
  Summary: Core models and schemas for LlamaDeploy
5
5
  License: MIT
6
6
  Requires-Dist: fastapi>=0.115.0
@@ -9,9 +9,10 @@ Requires-Dist: pydantic>=2.0.0
9
9
  Requires-Dist: pyyaml>=6.0.2
10
10
  Requires-Dist: truststore>=0.10.4
11
11
  Requires-Dist: types-pyyaml>=6.0.12.20250822
12
+ Requires-Dist: tomli>=2.0.1 ; python_full_version < '3.11'
12
13
  Requires-Dist: httpx>=0.24.0,<1.0.0 ; extra == 'client'
13
14
  Requires-Dist: fastapi>=0.115.0 ; extra == 'server'
14
- Requires-Python: >=3.11, <4
15
+ Requires-Python: >=3.10, <4
15
16
  Provides-Extra: client
16
17
  Provides-Extra: server
17
18
  Description-Content-Type: text/markdown
@@ -1,10 +1,11 @@
1
1
  llama_deploy/core/__init__.py,sha256=112612bf2e928c2e0310d6556bb13fc28c00db70297b90a8527486cd2562e408,43
2
- llama_deploy/core/client/manage_client.py,sha256=d33613cc3355896dc15d20ead713b7766fd900f43c5f4d547e8e8df9e29a660f,10366
2
+ llama_deploy/core/_compat.py,sha256=c4b88be801b052a4b357673c0127115e54a9d37ea108c436b8fa3f31be4a4d73,1023
3
+ llama_deploy/core/client/manage_client.py,sha256=81a44ad8f528b6628f447a369667e70d9f479829e71c69cedf21b81a4879e143,10523
3
4
  llama_deploy/core/client/ssl_util.py,sha256=b9743dc828fa27c18ba0867b1348662cdf0d855965c5a33db63505f23eef5d7b,1010
4
5
  llama_deploy/core/config.py,sha256=69bb0ea8ac169eaa4e808cd60a098b616bddd3145d26c6c35e56db38496b0e6a,35
5
- llama_deploy/core/deployment_config.py,sha256=69f8556fce84eddc033a7c881cb9d4d3e33661466074afbfb6b3913535679e92,16382
6
+ llama_deploy/core/deployment_config.py,sha256=724fd2bc210ba11385c965b2f8882fe7e735894e75799778c1c06c8580dc0b9d,16542
6
7
  llama_deploy/core/git/git_util.py,sha256=40436142a955d39c95a5b526df57c32a6f31bbb6a421c090710c9c1c9d741373,11883
7
- llama_deploy/core/iter_utils.py,sha256=68ac9dbf09f58315ffcfec6bd01ed50b88a954b00458da9bc28a0535b79ba29e,7042
8
+ llama_deploy/core/iter_utils.py,sha256=c73bb1a38d68793cdde895674aa2caef3e91373b9853006ba993fb0d490ba4f4,7385
8
9
  llama_deploy/core/path_util.py,sha256=14d50c0c337c8450ed46cafc88436027056b365a48370a69cdb76c88d7c26fd1,798
9
10
  llama_deploy/core/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
10
11
  llama_deploy/core/schema/__init__.py,sha256=d466fb068415a890de01be1696237cbd62e5fb0d83a6b2976e80c4d29381cd29,910
@@ -15,9 +16,9 @@ llama_deploy/core/schema/projects.py,sha256=726f91e90ff8699c90861d9740819c44c3f0
15
16
  llama_deploy/core/schema/public.py,sha256=022129c8fc09192f5e503b0500ccf54d106f5712b9cf8ce84b3b1c37e186f930,147
16
17
  llama_deploy/core/server/manage_api/__init__.py,sha256=e477ccab59cfd084edbad46f209972a282e623eb314d0847a754a46a16361db5,457
17
18
  llama_deploy/core/server/manage_api/_abstract_deployments_service.py,sha256=4b55c9628a9374a68bb78b7f6ae362b7cf4d1c5fc13d35e5cac191130fc13731,5181
18
- llama_deploy/core/server/manage_api/_create_deployments_router.py,sha256=2db7d59a43e8690cf6d3d925940212228498f0a7c5728cfcf4253830d6fa1b52,7554
19
+ llama_deploy/core/server/manage_api/_create_deployments_router.py,sha256=784e8e4285a5e209eddb8ea92e75404e22f96fdc8fae9c6d792b585f16465f77,7647
19
20
  llama_deploy/core/server/manage_api/_exceptions.py,sha256=ee71cd9c2354a665e6905cd9cc752d2d65f71f0b936d33fec3c1c5229c38accf,246
20
21
  llama_deploy/core/ui_build.py,sha256=290dafa951918e5593b9035570fa4c66791d7e5ea785bd372ad11e99e8283857,1514
21
- llama_deploy_core-0.3.22.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
22
- llama_deploy_core-0.3.22.dist-info/METADATA,sha256=f6c8a95f64b556d7efbf4a6da0ba4079f9eb513275653e613dd8270843cdc480,760
23
- llama_deploy_core-0.3.22.dist-info/RECORD,,
22
+ llama_deploy_core-0.3.24.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
23
+ llama_deploy_core-0.3.24.dist-info/METADATA,sha256=343c128d288d6710f57b5c006fdfbda346e583582c5c727da9e14d2140cd1732,819
24
+ llama_deploy_core-0.3.24.dist-info/RECORD,,