llama-deploy-appserver 0.3.24__tar.gz → 0.3.26__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/PKG-INFO +3 -3
  2. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/pyproject.toml +3 -3
  3. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/app.py +81 -0
  4. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/workflow_loader.py +39 -27
  5. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/README.md +0 -0
  6. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/__init__.py +0 -0
  7. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/bootstrap.py +0 -0
  8. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/configure_logging.py +0 -0
  9. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/correlation_id.py +0 -0
  10. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/deployment.py +0 -0
  11. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/deployment_config_parser.py +0 -0
  12. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/interrupts.py +0 -0
  13. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/process_utils.py +0 -0
  14. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/py.typed +0 -0
  15. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/routers/__init__.py +0 -0
  16. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/routers/deployments.py +0 -0
  17. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/routers/status.py +0 -0
  18. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/routers/ui_proxy.py +0 -0
  19. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/settings.py +0 -0
  20. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/stats.py +0 -0
  21. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/types.py +0 -0
  22. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/workflow_store/agent_data_store.py +0 -0
  23. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/workflow_store/keyed_lock.py +0 -0
  24. {llama_deploy_appserver-0.3.24 → llama_deploy_appserver-0.3.26}/src/llama_deploy/appserver/workflow_store/lru_cache.py +0 -0
@@ -1,15 +1,15 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llama-deploy-appserver
3
- Version: 0.3.24
3
+ Version: 0.3.26
4
4
  Summary: Application server components for LlamaDeploy
5
5
  Author: Massimiliano Pippi, Adrian Lyjak
6
6
  Author-email: Massimiliano Pippi <mpippi@gmail.com>, Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-index-workflows[server]>=2.9.1
8
+ Requires-Dist: llama-index-workflows[server]>=2.11.3
9
9
  Requires-Dist: pydantic-settings>=2.10.1
10
10
  Requires-Dist: fastapi>=0.100.0
11
11
  Requires-Dist: websockets>=12.0
12
- Requires-Dist: llama-deploy-core>=0.3.24,<0.4.0
12
+ Requires-Dist: llama-deploy-core>=0.3.26,<0.4.0
13
13
  Requires-Dist: httpx>=0.24.0,<1.0.0
14
14
  Requires-Dist: prometheus-fastapi-instrumentator>=7.1.0
15
15
  Requires-Dist: packaging>=25.0
@@ -14,7 +14,7 @@ dev = [
14
14
 
15
15
  [project]
16
16
  name = "llama-deploy-appserver"
17
- version = "0.3.24"
17
+ version = "0.3.26"
18
18
  description = "Application server components for LlamaDeploy"
19
19
  readme = "README.md"
20
20
  license = {text = "MIT"}
@@ -24,11 +24,11 @@ authors = [
24
24
  ]
25
25
  requires-python = ">=3.10, <4"
26
26
  dependencies = [
27
- "llama-index-workflows[server]>=2.9.1",
27
+ "llama-index-workflows[server]>=2.11.3",
28
28
  "pydantic-settings>=2.10.1",
29
29
  "fastapi>=0.100.0",
30
30
  "websockets>=12.0",
31
- "llama-deploy-core>=0.3.24,<0.4.0",
31
+ "llama-deploy-core>=0.3.26,<0.4.0",
32
32
  "httpx>=0.24.0,<1.0.0",
33
33
  "prometheus-fastapi-instrumentator>=7.1.0",
34
34
  "packaging>=25.0",
@@ -1,4 +1,5 @@
1
1
  import argparse
2
+ import json
2
3
  import logging
3
4
  import os
4
5
  import threading
@@ -329,6 +330,44 @@ def start_preflight_in_target_venv(
329
330
  # Note: run_process doesn't return exit code; process runs to completion or raises
330
331
 
331
332
 
333
+ def start_export_json_graph_in_target_venv(
334
+ cwd: Path | None = None,
335
+ deployment_file: Path | None = None,
336
+ output: Path | None = None,
337
+ ) -> None:
338
+ """
339
+ Run workflow graph export inside the target project's virtual environment using uv.
340
+ Mirrors the venv targeting and invocation strategy used by start_preflight_in_target_venv.
341
+ """
342
+
343
+ configure_settings(
344
+ app_root=cwd,
345
+ deployment_file_path=deployment_file or Path(DEFAULT_DEPLOYMENT_FILE_PATH),
346
+ )
347
+ base_dir = cwd or Path.cwd()
348
+ path = settings.resolved_config_parent.relative_to(base_dir)
349
+ args = [
350
+ "uv",
351
+ "run",
352
+ "--no-progress",
353
+ "python",
354
+ "-m",
355
+ "llama_deploy.appserver.app",
356
+ "--export-json-graph",
357
+ ]
358
+ if deployment_file:
359
+ args.extend(["--deployment-file", str(deployment_file)])
360
+ if output is not None:
361
+ args.extend(["--export-output", str(output)])
362
+
363
+ run_process(
364
+ args,
365
+ cwd=path,
366
+ env=os.environ.copy(),
367
+ line_transform=_exclude_venv_warning,
368
+ )
369
+
370
+
332
371
  class PreflightValidationError(Exception):
333
372
  """Raised when workflow validations fail during preflight.
334
373
 
@@ -376,6 +415,40 @@ def preflight_validate(
376
415
  raise PreflightValidationError(errors)
377
416
 
378
417
 
418
+ def export_json_graph(
419
+ cwd: Path | None = None,
420
+ deployment_file: Path | None = None,
421
+ output: Path | None = None,
422
+ ) -> None:
423
+ """
424
+ Export a JSON representation of the registered workflows' graph.
425
+
426
+ This follows the same initialization path as preflight validation and writes
427
+ a workflows.json-style structure compatible with the CLI expectations.
428
+ """
429
+ from workflows.representation_utils import extract_workflow_structure
430
+
431
+ configure_settings(
432
+ app_root=cwd,
433
+ deployment_file_path=deployment_file or Path(DEFAULT_DEPLOYMENT_FILE_PATH),
434
+ )
435
+ cfg = get_deployment_config()
436
+ load_environment_variables(cfg, settings.resolved_config_parent)
437
+
438
+ workflows = load_workflows(cfg)
439
+
440
+ graph: dict[str, dict[str, Any]] = {}
441
+ for name, workflow in workflows.items():
442
+ wf_repr_dict = (
443
+ extract_workflow_structure(workflow).to_response_model().model_dump()
444
+ )
445
+ graph[name] = wf_repr_dict
446
+
447
+ output_path = output or (Path.cwd() / "workflows.json")
448
+ with output_path.open("w", encoding="utf-8") as f:
449
+ json.dump(graph, f, indent=2)
450
+
451
+
379
452
  if __name__ == "__main__":
380
453
  parser = argparse.ArgumentParser()
381
454
  parser.add_argument("--proxy-ui", action="store_true")
@@ -383,10 +456,18 @@ if __name__ == "__main__":
383
456
  parser.add_argument("--deployment-file", type=Path)
384
457
  parser.add_argument("--open-browser", action="store_true")
385
458
  parser.add_argument("--preflight", action="store_true")
459
+ parser.add_argument("--export-json-graph", action="store_true")
460
+ parser.add_argument("--export-output", type=Path)
386
461
 
387
462
  args = parser.parse_args()
388
463
  if args.preflight:
389
464
  preflight_validate(cwd=Path.cwd(), deployment_file=args.deployment_file)
465
+ elif args.export_json_graph:
466
+ export_json_graph(
467
+ cwd=Path.cwd(),
468
+ deployment_file=args.deployment_file,
469
+ output=args.export_output,
470
+ )
390
471
  else:
391
472
  start_server(
392
473
  proxy_ui=args.proxy_ui,
@@ -12,11 +12,9 @@ from pathlib import Path
12
12
  from textwrap import dedent
13
13
 
14
14
  from dotenv import dotenv_values
15
- from llama_deploy.appserver.deployment_config_parser import (
16
- DeploymentConfig,
17
- )
18
15
  from llama_deploy.appserver.process_utils import run_process, spawn_process
19
16
  from llama_deploy.appserver.settings import ApiserverSettings, settings
17
+ from llama_deploy.core.deployment_config import DeploymentConfig
20
18
  from llama_deploy.core.ui_build import ui_build_output_path
21
19
  from packaging.version import InvalidVersion, Version
22
20
  from workflows import Workflow
@@ -191,6 +189,34 @@ def _is_missing_or_outdated(path: Path) -> Version | None:
191
189
  return None
192
190
 
193
191
 
192
+ def run_uv(
193
+ source_root: Path,
194
+ path: Path,
195
+ cmd: str,
196
+ args: list[str] = [],
197
+ extra_env: dict[str, str] | None = None,
198
+ ) -> None:
199
+ env = os.environ.copy()
200
+ if extra_env:
201
+ env.update(extra_env)
202
+ run_process(
203
+ ["uv", cmd] + args,
204
+ cwd=source_root / path,
205
+ prefix=f"[uv {cmd}]",
206
+ color_code="36",
207
+ use_tty=False,
208
+ line_transform=_exclude_venv_warning,
209
+ env=env,
210
+ )
211
+
212
+
213
+ def ensure_venv(source_root: Path, path: Path, force: bool = False) -> Path:
214
+ venv_path = source_root / path / ".venv"
215
+ if force or not venv_path.exists():
216
+ run_uv(source_root, path, "venv", [str(venv_path)])
217
+ return venv_path
218
+
219
+
194
220
  def _install_and_add_appserver_if_missing(
195
221
  path: Path,
196
222
  source_root: Path,
@@ -207,31 +233,11 @@ def _install_and_add_appserver_if_missing(
207
233
  )
208
234
  return
209
235
 
210
- def run_uv(
211
- cmd: str, args: list[str] = [], extra_env: dict[str, str] | None = None
212
- ) -> None:
213
- env = os.environ.copy()
214
- if extra_env:
215
- env.update(extra_env)
216
- run_process(
217
- ["uv", cmd] + args,
218
- cwd=source_root / path,
219
- prefix=f"[uv {cmd}]",
220
- color_code="36",
221
- use_tty=False,
222
- line_transform=_exclude_venv_warning,
223
- env=env,
224
- )
225
-
226
- def ensure_venv(path: Path, force: bool = False) -> Path:
227
- venv_path = source_root / path / ".venv"
228
- if force or not venv_path.exists():
229
- run_uv("venv", [str(venv_path)])
230
- return venv_path
231
-
232
236
  editable = are_we_editable_mode()
233
- venv_path = ensure_venv(path, force=editable)
237
+ venv_path = ensure_venv(source_root, path, force=editable)
234
238
  run_uv(
239
+ source_root,
240
+ path,
235
241
  "sync",
236
242
  ["--no-dev", "--inexact"],
237
243
  extra_env={"UV_PROJECT_ENVIRONMENT": str(venv_path)},
@@ -239,6 +245,8 @@ def _install_and_add_appserver_if_missing(
239
245
 
240
246
  if sdists:
241
247
  run_uv(
248
+ source_root,
249
+ path,
242
250
  "pip",
243
251
  ["install"]
244
252
  + [str(s.absolute()) for s in sdists]
@@ -261,6 +269,8 @@ def _install_and_add_appserver_if_missing(
261
269
  target = f"file://{str(rel)}"
262
270
 
263
271
  run_uv(
272
+ source_root,
273
+ path,
264
274
  "pip",
265
275
  [
266
276
  "install",
@@ -276,9 +286,11 @@ def _install_and_add_appserver_if_missing(
276
286
  version = _is_missing_or_outdated(path)
277
287
  if version is not None:
278
288
  if save_version:
279
- run_uv("add", [f"llama-deploy-appserver>={version}"])
289
+ run_uv(source_root, path, "add", [f"llama-deploy-appserver>={version}"])
280
290
  else:
281
291
  run_uv(
292
+ source_root,
293
+ path,
282
294
  "pip",
283
295
  [
284
296
  "install",