llama-deploy-appserver 0.3.23__py3-none-any.whl → 0.3.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -122,7 +122,7 @@ def _setup_openapi(name: str, app: FastAPI, server: WorkflowServer) -> None:
122
122
 
123
123
  schema["paths"] = new_paths
124
124
 
125
- def custom_openapi():
125
+ def custom_openapi() -> dict[str, object]:
126
126
  return schema
127
127
 
128
128
  app.openapi = custom_openapi # ty: ignore[invalid-assignment] - doesn't like us overwriting the method
@@ -139,16 +139,22 @@ app = FastAPI(
139
139
  Instrumentator().instrument(app).expose(app, include_in_schema=False)
140
140
 
141
141
 
142
- # Configure CORS middleware if the environment variable is set
143
- if not os.environ.get("DISABLE_CORS", False):
142
+ def _configure_cors(app: FastAPI) -> None:
143
+ """Attach CORS middleware in a way that keeps type-checkers happy."""
144
+ # Use a cast here because ty's view of Starlette's middleware factory
145
+ # protocol is stricter than FastAPI's runtime expectations.
144
146
  app.add_middleware(
145
- CORSMiddleware,
147
+ cast(Any, CORSMiddleware),
146
148
  allow_origins=["*"], # Allows all origins
147
149
  allow_credentials=True,
148
150
  allow_methods=["GET", "POST"],
149
151
  allow_headers=["Content-Type", "Authorization"],
150
152
  )
151
153
 
154
+
155
+ if not os.environ.get("DISABLE_CORS", False):
156
+ _configure_cors(app)
157
+
152
158
  app.include_router(health_router)
153
159
  add_log_middleware(app)
154
160
 
@@ -280,16 +286,13 @@ def start_server_in_target_venv(
280
286
  if log_format:
281
287
  env["LOG_FORMAT"] = log_format
282
288
 
283
- ret = run_process(
289
+ run_process(
284
290
  args,
285
291
  cwd=path,
286
292
  env=env,
287
293
  line_transform=_exclude_venv_warning,
288
294
  )
289
295
 
290
- if ret != 0:
291
- raise SystemExit(ret)
292
-
293
296
 
294
297
  def start_preflight_in_target_venv(
295
298
  cwd: Path | None = None,
@@ -297,7 +300,7 @@ def start_preflight_in_target_venv(
297
300
  ) -> None:
298
301
  """
299
302
  Run preflight validation inside the target project's virtual environment using uv.
300
- Mirrors the venv targetting and invocation strategy used by start_server_in_target_venv.
303
+ Mirrors the venv targeting and invocation strategy used by start_server_in_target_venv.
301
304
  """
302
305
  configure_settings(
303
306
  app_root=cwd,
@@ -317,14 +320,13 @@ def start_preflight_in_target_venv(
317
320
  if deployment_file:
318
321
  args.extend(["--deployment-file", str(deployment_file)])
319
322
 
320
- ret = run_process(
323
+ run_process(
321
324
  args,
322
325
  cwd=path,
323
326
  env=os.environ.copy(),
324
327
  line_transform=_exclude_venv_warning,
325
328
  )
326
- if ret is not None and ret != 0:
327
- raise SystemExit(ret)
329
+ # Note: run_process doesn't return exit code; process runs to completion or raises
328
330
 
329
331
 
330
332
  class PreflightValidationError(Exception):
@@ -27,7 +27,7 @@ from llama_deploy.core.git.git_util import (
27
27
 
28
28
  def bootstrap_app_from_repo(
29
29
  target_dir: str = "/opt/app",
30
- ):
30
+ ) -> None:
31
31
  bootstrap_settings = BootstrapSettings()
32
32
  # Needs the github url+auth, and the deployment file path
33
33
  # clones the repo to a standard directory
@@ -22,18 +22,18 @@ def _get_or_create_correlation_id(request: Request) -> str:
22
22
  return request.headers.get("X-Request-ID", create_correlation_id())
23
23
 
24
24
 
25
- def add_log_middleware(app: FastAPI):
25
+ def add_log_middleware(app: FastAPI) -> None:
26
26
  @app.middleware("http")
27
27
  async def add_log_id(
28
28
  request: Request, call_next: Callable[[Request], Awaitable[Response]]
29
- ):
29
+ ) -> Response:
30
30
  set_correlation_id(_get_or_create_correlation_id(request))
31
31
  return await call_next(request)
32
32
 
33
33
  @app.middleware("http")
34
34
  async def access_log_middleware(
35
35
  request: Request, call_next: Callable[[Request], Awaitable[Response]]
36
- ):
36
+ ) -> Response:
37
37
  if _is_proxy_request(request):
38
38
  return await call_next(request)
39
39
  start = time.perf_counter()
@@ -18,7 +18,7 @@ from starlette.responses import HTMLResponse
18
18
  from starlette.routing import Route
19
19
  from workflows import Context, Workflow
20
20
  from workflows.handler import WorkflowHandler
21
- from workflows.server import SqliteWorkflowStore, WorkflowServer
21
+ from workflows.server import AbstractWorkflowStore, SqliteWorkflowStore, WorkflowServer
22
22
  from workflows.server.memory_workflow_store import MemoryWorkflowStore
23
23
 
24
24
  logger = logging.getLogger()
@@ -40,7 +40,7 @@ class Deployment:
40
40
  local: Whether the deployment is local. If true, sources won't be synced
41
41
  """
42
42
 
43
- self._default_service: str | None = workflows.get(DEFAULT_SERVICE_ID)
43
+ self._default_service: Workflow | None = workflows.get(DEFAULT_SERVICE_ID)
44
44
  self._service_tasks: list[asyncio.Task] = []
45
45
  # Ready to load services
46
46
  self._workflow_services: dict[str, Workflow] = workflows
@@ -50,13 +50,9 @@ class Deployment:
50
50
 
51
51
  @property
52
52
  def default_service(self) -> Workflow | None:
53
+ """Return the default workflow, if any."""
53
54
  return self._default_service
54
55
 
55
- @property
56
- def name(self) -> str:
57
- """Returns the name of this deployment."""
58
- return self._name
59
-
60
56
  @property
61
57
  def service_names(self) -> list[str]:
62
58
  """Returns the list of service names in this deployment."""
@@ -66,7 +62,7 @@ class Deployment:
66
62
  self, service_id: str, session_id: str | None = None, **run_kwargs: dict
67
63
  ) -> Any:
68
64
  workflow = self._workflow_services[service_id]
69
- if session_id:
65
+ if session_id is not None:
70
66
  context = self._contexts[session_id]
71
67
  return await workflow.run(context=context, **run_kwargs)
72
68
 
@@ -79,7 +75,7 @@ class Deployment:
79
75
  self, service_id: str, session_id: str | None = None, **run_kwargs: dict
80
76
  ) -> Tuple[str, str]:
81
77
  workflow = self._workflow_services[service_id]
82
- if session_id:
78
+ if session_id is not None:
83
79
  context = self._contexts[session_id]
84
80
  handler = workflow.run(context=context, **run_kwargs)
85
81
  else:
@@ -90,12 +86,13 @@ class Deployment:
90
86
  handler_id = generate_id()
91
87
  self._handlers[handler_id] = handler
92
88
  self._handler_inputs[handler_id] = json.dumps(run_kwargs)
89
+ assert session_id is not None
93
90
  return handler_id, session_id
94
91
 
95
92
  def create_workflow_server(
96
93
  self, deployment_config: DeploymentConfig, settings: ApiserverSettings
97
94
  ) -> WorkflowServer:
98
- persistence = MemoryWorkflowStore()
95
+ persistence: AbstractWorkflowStore = MemoryWorkflowStore()
99
96
  if settings.persistence == "local":
100
97
  logger.info("Using local sqlite persistence for workflows")
101
98
  persistence = SqliteWorkflowStore(
@@ -137,8 +134,8 @@ class Deployment:
137
134
  "/debugger/index.html?api=" + quote_plus("/deployments/" + config.name)
138
135
  )
139
136
 
140
- @app.get("/debugger/index.html", include_in_schema=False)
141
- def serve_debugger(api: str | None = None):
137
+ @app.get("/debugger/index.html", include_in_schema=False, response_model=None)
138
+ def serve_debugger(api: str | None = None) -> RedirectResponse | HTMLResponse:
142
139
  if not api:
143
140
  return RedirectResponse(
144
141
  "/debugger/index.html?api="
@@ -2,7 +2,7 @@ import asyncio
2
2
  import signal
3
3
  from asyncio import Event
4
4
  from contextlib import suppress
5
- from typing import Awaitable, TypeVar
5
+ from typing import Any, Coroutine, TypeVar
6
6
 
7
7
  shutdown_event = Event()
8
8
 
@@ -21,7 +21,7 @@ T = TypeVar("T")
21
21
 
22
22
 
23
23
  async def wait_or_abort(
24
- awaitable: Awaitable[T], shutdown_event: asyncio.Event = shutdown_event
24
+ awaitable: Coroutine[Any, Any, T], shutdown_event: asyncio.Event = shutdown_event
25
25
  ) -> T:
26
26
  """Await an operation, aborting early if shutdown is requested.
27
27
 
@@ -32,7 +32,7 @@ async def wait_or_abort(
32
32
  if event.is_set():
33
33
  raise OperationAborted()
34
34
 
35
- op_task = asyncio.create_task(awaitable)
35
+ op_task: asyncio.Task[T] = asyncio.create_task(awaitable)
36
36
  stop_task = asyncio.create_task(event.wait())
37
37
  try:
38
38
  done, _ = await asyncio.wait(
@@ -112,7 +112,7 @@ def should_use_color() -> bool:
112
112
 
113
113
  @dataclass
114
114
  class SpawnProcessResult:
115
- process: subprocess.Popen
115
+ process: subprocess.Popen[str] | subprocess.Popen[bytes]
116
116
  sources: list[Tuple[int | TextIO, TextIO]]
117
117
  cleanup: Callable[[], None]
118
118
 
@@ -124,6 +124,7 @@ def _spawn_process(
124
124
  env: dict[str, str] | None,
125
125
  use_pty: bool,
126
126
  ) -> SpawnProcessResult:
127
+ process: subprocess.Popen[str] | subprocess.Popen[bytes]
127
128
  if use_pty:
128
129
  import pty
129
130
 
@@ -164,7 +165,7 @@ def _spawn_process(
164
165
  shell=use_shell,
165
166
  )
166
167
 
167
- def cleanup() -> None:
168
+ def cleanup_non_pty() -> None:
168
169
  return None
169
170
 
170
171
  assert process.stdout is not None and process.stderr is not None
@@ -172,7 +173,7 @@ def _spawn_process(
172
173
  (cast(int | TextIO, process.stdout), cast(TextIO, sys.stdout)),
173
174
  (cast(int | TextIO, process.stderr), cast(TextIO, sys.stderr)),
174
175
  ]
175
- return SpawnProcessResult(process, sources, cleanup)
176
+ return SpawnProcessResult(process, sources, cleanup_non_pty)
176
177
 
177
178
 
178
179
  def _stream_source(
@@ -212,7 +213,9 @@ def _stream_source(
212
213
  def _log_command(cmd: list[str], transform: Callable[[str], str | None] | None) -> None:
213
214
  cmd_str = "> " + " ".join(cmd)
214
215
  if transform:
215
- cmd_str = transform(cmd_str)
216
+ transformed = transform(cmd_str)
217
+ if transformed is not None:
218
+ cmd_str = transformed
216
219
  sys.stderr.write(cmd_str + "\n")
217
220
 
218
221
 
@@ -1,7 +1,7 @@
1
1
  import asyncio
2
2
  import logging
3
+ from collections.abc import AsyncGenerator, Sequence
3
4
  from contextlib import suppress
4
- from typing import List
5
5
 
6
6
  import httpx
7
7
  import websockets
@@ -23,6 +23,7 @@ from llama_deploy.appserver.interrupts import (
23
23
  from llama_deploy.appserver.settings import ApiserverSettings
24
24
  from llama_deploy.core.client.ssl_util import get_httpx_verify_param
25
25
  from llama_deploy.core.deployment_config import DeploymentConfig
26
+ from websockets.typing import Subprotocol
26
27
 
27
28
  logger = logging.getLogger(__name__)
28
29
 
@@ -53,11 +54,12 @@ async def _ws_proxy(ws: WebSocket, upstream_url: str) -> None:
53
54
 
54
55
  try:
55
56
  # Parse subprotocols if present
56
- subprotocols: List[str] | None = None
57
+ subprotocols: Sequence[Subprotocol] | None = None
57
58
  requested = ws.headers.get("sec-websocket-protocol")
58
59
  if requested:
59
60
  # Parse comma-separated subprotocols (as plain strings)
60
- subprotocols = [p.strip() for p in requested.split(",")]
61
+ parsed = [p.strip() for p in requested.split(",")]
62
+ subprotocols = [Subprotocol(p) for p in parsed if p]
61
63
 
62
64
  # Open upstream WebSocket connection, offering the same subprotocols
63
65
  async with websockets.connect(
@@ -210,7 +212,7 @@ def create_ui_proxy_router(name: str, port: int) -> APIRouter:
210
212
  }
211
213
 
212
214
  # Stream downloads and ensure cleanup in the generator's finally block
213
- async def upstream_body():
215
+ async def upstream_body() -> AsyncGenerator[bytes, None]:
214
216
  try:
215
217
  async for chunk in upstream.aiter_raw():
216
218
  yield chunk
@@ -240,9 +242,10 @@ def create_ui_proxy_router(name: str, port: int) -> APIRouter:
240
242
  def mount_static_files(
241
243
  app: FastAPI, config: DeploymentConfig, settings: ApiserverSettings
242
244
  ) -> None:
243
- path = settings.app_root / config.build_output_path()
244
- if not path:
245
+ build_output = config.build_output_path()
246
+ if build_output is None:
245
247
  return
248
+ path = settings.app_root / build_output
246
249
 
247
250
  if not path.exists():
248
251
  return
@@ -106,7 +106,8 @@ def parse_environment_variables(
106
106
  for env_file in config.env_files or []:
107
107
  env_file_path = source_root / env_file
108
108
  values = dotenv_values(env_file_path)
109
- env_vars.update(**values)
109
+ str_values = {k: v for k, v in values.items() if isinstance(v, str)}
110
+ env_vars.update(str_values)
110
111
  return env_vars
111
112
 
112
113
 
@@ -206,7 +207,9 @@ def _install_and_add_appserver_if_missing(
206
207
  )
207
208
  return
208
209
 
209
- def run_uv(cmd: str, args: list[str] = [], extra_env: dict[str, str] | None = None):
210
+ def run_uv(
211
+ cmd: str, args: list[str] = [], extra_env: dict[str, str] | None = None
212
+ ) -> None:
210
213
  env = os.environ.copy()
211
214
  if extra_env:
212
215
  env.update(extra_env)
@@ -1,19 +1,24 @@
1
1
  import asyncio
2
2
  import logging
3
3
  import os
4
- from typing import Any, List
4
+ import sys
5
+ from typing import Any, List, cast
5
6
 
6
7
  from llama_cloud.client import AsyncLlamaCloud, httpx
7
8
  from llama_cloud_services.beta.agent_data import AsyncAgentDataClient
8
9
  from llama_deploy.appserver.settings import ApiserverSettings
9
10
  from llama_deploy.core.client.ssl_util import get_httpx_verify_param
10
11
  from llama_deploy.core.deployment_config import DeploymentConfig
11
- from typing_extensions import override
12
12
  from workflows.server import AbstractWorkflowStore, HandlerQuery, PersistentHandler
13
13
 
14
14
  from .keyed_lock import AsyncKeyedLock
15
15
  from .lru_cache import LRUCache
16
16
 
17
+ if sys.version_info <= (3, 11):
18
+ from typing_extensions import override
19
+ else:
20
+ from typing import override
21
+
17
22
  logger = logging.getLogger(__name__)
18
23
 
19
24
 
@@ -77,8 +82,8 @@ class AgentDataStore(AbstractWorkflowStore):
77
82
  )
78
83
 
79
84
  @override
80
- async def delete(self, handler: HandlerQuery) -> int:
81
- filters = self._build_filters(handler)
85
+ async def delete(self, query: HandlerQuery) -> int:
86
+ filters = self._build_filters(query)
82
87
  results = await self.client.search(filter=filters, page_size=1000)
83
88
  await asyncio.gather(
84
89
  *[self.client.delete_item(item_id=x.id) for x in results.items if x.id]
@@ -89,18 +94,21 @@ class AgentDataStore(AbstractWorkflowStore):
89
94
  cached_id = self.cache.get(handler.handler_id)
90
95
  if cached_id is not None:
91
96
  return cached_id
97
+ search_filter = {"handler_id": {"eq": handler.handler_id}}
92
98
  results = await self.client.search(
93
- filter={"handler_id": {"eq": handler.handler_id}},
99
+ filter=cast(Any, search_filter),
94
100
  page_size=1,
95
101
  )
96
102
  if not results.items:
97
103
  return None
98
104
  id = results.items[0].id
105
+ if id is None:
106
+ return None
99
107
  self.cache.set(handler.handler_id, id)
100
108
  return id
101
109
 
102
110
  def _build_filters(self, query: HandlerQuery) -> dict[str, Any]:
103
- filters = {}
111
+ filters: dict[str, Any] = {}
104
112
  if query.handler_id_in is not None:
105
113
  filters["handler_id"] = {
106
114
  "includes": query.handler_id_in,
@@ -1,16 +1,17 @@
1
1
  import asyncio
2
2
  from collections import Counter
3
+ from collections.abc import AsyncIterator
3
4
  from contextlib import asynccontextmanager
4
5
 
5
6
 
6
7
  class AsyncKeyedLock:
7
- def __init__(self):
8
+ def __init__(self) -> None:
8
9
  self._locks: dict[str, asyncio.Lock] = {}
9
- self._refcnt = Counter()
10
+ self._refcnt: Counter[str] = Counter()
10
11
  self._registry_lock = asyncio.Lock() # protects _locks/_refcnt
11
12
 
12
13
  @asynccontextmanager
13
- async def acquire(self, key: str):
14
+ async def acquire(self, key: str) -> AsyncIterator[None]:
14
15
  async with self._registry_lock:
15
16
  lock = self._locks.get(key)
16
17
  if lock is None:
@@ -1,4 +1,5 @@
1
1
  from collections import OrderedDict
2
+ from collections.abc import Iterator
2
3
  from typing import Generic, TypeVar, overload
3
4
 
4
5
  K = TypeVar("K")
@@ -21,7 +22,7 @@ class LRUCache(Generic[K, V]):
21
22
  return default
22
23
  return self[key]
23
24
 
24
- def set(self, key: K, value: V):
25
+ def set(self, key: K, value: V) -> None:
25
26
  if key in self._store:
26
27
  # remove old so we can push to end
27
28
  self._store.pop(key)
@@ -41,11 +42,11 @@ class LRUCache(Generic[K, V]):
41
42
  self._store[key] = value
42
43
  return value
43
44
 
44
- def __setitem__(self, key: K, value: V):
45
+ def __setitem__(self, key: K, value: V) -> None:
45
46
  self.set(key, value)
46
47
 
47
48
  def __len__(self) -> int:
48
49
  return len(self._store)
49
50
 
50
- def __iter__(self):
51
+ def __iter__(self) -> Iterator[K]:
51
52
  return iter(self._store)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-appserver
3
- Version: 0.3.23
3
+ Version: 0.3.24
4
4
  Summary: Application server components for LlamaDeploy
5
5
  Author: Massimiliano Pippi, Adrian Lyjak
6
6
  Author-email: Massimiliano Pippi <mpippi@gmail.com>, Adrian Lyjak <adrianlyjak@gmail.com>
@@ -9,7 +9,7 @@ Requires-Dist: llama-index-workflows[server]>=2.9.1
9
9
  Requires-Dist: pydantic-settings>=2.10.1
10
10
  Requires-Dist: fastapi>=0.100.0
11
11
  Requires-Dist: websockets>=12.0
12
- Requires-Dist: llama-deploy-core>=0.3.23,<0.4.0
12
+ Requires-Dist: llama-deploy-core>=0.3.24,<0.4.0
13
13
  Requires-Dist: httpx>=0.24.0,<1.0.0
14
14
  Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
15
15
  Requires-Dist: packaging>=25.0
@@ -19,7 +19,8 @@ Requires-Dist: pyyaml>=6.0.2
19
19
  Requires-Dist: llama-cloud-services>=0.6.60
20
20
  Requires-Dist: watchfiles>=1.1.0
21
21
  Requires-Dist: uvicorn>=0.35.0
22
- Requires-Python: >=3.11, <4
22
+ Requires-Dist: typing-extensions>=4.15.0 ; python_full_version < '3.12'
23
+ Requires-Python: >=3.10, <4
23
24
  Description-Content-Type: text/markdown
24
25
 
25
26
  # llama-deploy-appserver
@@ -0,0 +1,24 @@
1
+ llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
+ llama_deploy/appserver/app.py,sha256=654ba089ca7382a683c87f34ff3f78745cc6c7d658cda43fd03ea798538ab359,12772
3
+ llama_deploy/appserver/bootstrap.py,sha256=34724cf3056b653b71d1bedab64dbde221d7c47443a2a41032e767d50168a24b,2581
4
+ llama_deploy/appserver/configure_logging.py,sha256=2431fda77c47d0aef783992b0f01978a3a643768a2e82f10f3c263538f23d800,6361
5
+ llama_deploy/appserver/correlation_id.py,sha256=8ac5bc6160c707b93a9fb818b64dd369a4ef7a53f9f91a6b3d90c4cf446f7327,572
6
+ llama_deploy/appserver/deployment.py,sha256=f1ccdb2df6e333b40cede8e46a194f4fcf35673b0d66b307d6f3260c3478738a,6531
7
+ llama_deploy/appserver/deployment_config_parser.py,sha256=e2b6c483203d96ab795c4e55df15c694c20458d5a03fab89c2b71e481291a2d3,510
8
+ llama_deploy/appserver/interrupts.py,sha256=c101c76790280ee03deb44e798331c1e335593dd78dc781301f198469b31036a,1654
9
+ llama_deploy/appserver/process_utils.py,sha256=c1fc6a624ba9675dc82531f787baa5f63b6b8aabfe3c47ea1d12e4a569a23820,6700
10
+ llama_deploy/appserver/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
11
+ llama_deploy/appserver/routers/__init__.py,sha256=ee2d14ebf4b067c844947ed1cc98186456e8bfa4919282722eaaf8cca345a138,214
12
+ llama_deploy/appserver/routers/deployments.py,sha256=e7bafd72c1b4b809e5ad57442594a997c85ecab998b8430da65899faa910db1c,7572
13
+ llama_deploy/appserver/routers/status.py,sha256=2af74bc40e52dc5944af2df98c6a021fea7b0cfcda88b56ac124dc383120758c,282
14
+ llama_deploy/appserver/routers/ui_proxy.py,sha256=38f00793a5644c937b1f09af862a637dcaa1b844c979575ffa53065705390b21,8952
15
+ llama_deploy/appserver/settings.py,sha256=aa4512d2f1f28b8ee7d3fedc8c61f77bce9f807f85857c0f6282054320c9da23,5124
16
+ llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
17
+ llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
18
+ llama_deploy/appserver/workflow_loader.py,sha256=021d07a5e4a0f42129b7cdd042af8da7664a45adc8ee9cb555ccb413f53473da,15514
19
+ llama_deploy/appserver/workflow_store/agent_data_store.py,sha256=ecf11a1454b13b7d618e720c8ba3be41833f322fdddfacc8356dfe34858d3a30,4421
20
+ llama_deploy/appserver/workflow_store/keyed_lock.py,sha256=72bcfafbce56d5b36d53aff764b573c2dca2b3f5bc59f2d8baa80be0e4db6e34,1037
21
+ llama_deploy/appserver/workflow_store/lru_cache.py,sha256=10b7d69e4be7d929d9dac009b59635b20fbed4603fb004bc35cbdc3ce538af8b,1454
22
+ llama_deploy_appserver-0.3.24.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
23
+ llama_deploy_appserver-0.3.24.dist-info/METADATA,sha256=80ef8eb718c57bd747754b6ea5fdb32da351afac1c5e1ba4e9129f7257df86ce,1154
24
+ llama_deploy_appserver-0.3.24.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- llama_deploy/appserver/__init__.py,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
2
- llama_deploy/appserver/app.py,sha256=64637dec1504d3a05f68ef2e91472754a8089a227f51948897121c5449bad1d4,12562
3
- llama_deploy/appserver/bootstrap.py,sha256=d0c7b4ad5bd64aa5070d993b1439006694a639010ae757b1d897680318173743,2573
4
- llama_deploy/appserver/configure_logging.py,sha256=194dd1ebed3c1d9065d9174f7828d557a577eaac8fb0443b3102430b1f578c19,6329
5
- llama_deploy/appserver/correlation_id.py,sha256=8ac5bc6160c707b93a9fb818b64dd369a4ef7a53f9f91a6b3d90c4cf446f7327,572
6
- llama_deploy/appserver/deployment.py,sha256=4cec5be1872005a275be74d96ed7a80fe417195bd2c3a3845a105b485c9de794,6430
7
- llama_deploy/appserver/deployment_config_parser.py,sha256=e2b6c483203d96ab795c4e55df15c694c20458d5a03fab89c2b71e481291a2d3,510
8
- llama_deploy/appserver/interrupts.py,sha256=14f262a0cedc00bb3aecd3d6c14c41ba0e88e7d2a6df02cd35b5bea1940822a2,1622
9
- llama_deploy/appserver/process_utils.py,sha256=2f501d31df2ab77ad249139801f885b956cc5750fb6d048079fc9f5ff12d403e,6518
10
- llama_deploy/appserver/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
11
- llama_deploy/appserver/routers/__init__.py,sha256=ee2d14ebf4b067c844947ed1cc98186456e8bfa4919282722eaaf8cca345a138,214
12
- llama_deploy/appserver/routers/deployments.py,sha256=e7bafd72c1b4b809e5ad57442594a997c85ecab998b8430da65899faa910db1c,7572
13
- llama_deploy/appserver/routers/status.py,sha256=2af74bc40e52dc5944af2df98c6a021fea7b0cfcda88b56ac124dc383120758c,282
14
- llama_deploy/appserver/routers/ui_proxy.py,sha256=78b334097dc8a14916aa403ffe1a23d0108fe26aba0f9c44d872349755b2da73,8735
15
- llama_deploy/appserver/settings.py,sha256=aa4512d2f1f28b8ee7d3fedc8c61f77bce9f807f85857c0f6282054320c9da23,5124
16
- llama_deploy/appserver/stats.py,sha256=1f3989f6705a6de3e4d61ee8cdd189fbe04a2c53ec5e720b2e5168acc331427f,691
17
- llama_deploy/appserver/types.py,sha256=4edc991aafb6b8497f068d12387455df292da3ff8440223637641ab1632553ec,2133
18
- llama_deploy/appserver/workflow_loader.py,sha256=03ef9d2d5eed6b0ec3e3520a6566163637b6ba99b9fa77ecee8ce0243a211d13,15413
19
- llama_deploy/appserver/workflow_store/agent_data_store.py,sha256=c58d84ac658679cb9c9e4c7bc2f4096af8bb1aa8f6e3c24ff5a84be1125f4ab3,4221
20
- llama_deploy/appserver/workflow_store/keyed_lock.py,sha256=bb1504d9de09d51a8f60721cc77b14d4051ac5a897ace6f9d9cba494f068465e,950
21
- llama_deploy/appserver/workflow_store/lru_cache.py,sha256=bb05c573cf92532a3e673e5b3d5765c4165bf6f8d985744941580c06ceb3eb2d,1386
22
- llama_deploy_appserver-0.3.23.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
23
- llama_deploy_appserver-0.3.23.dist-info/METADATA,sha256=b49f84e397248967022dee1767cbd58b354a06fa7fbdd667d0bbd9f61ae74962,1082
24
- llama_deploy_appserver-0.3.23.dist-info/RECORD,,