llamactl 0.3.2__tar.gz → 0.3.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {llamactl-0.3.2 → llamactl-0.3.4}/PKG-INFO +3 -3
  2. {llamactl-0.3.2 → llamactl-0.3.4}/pyproject.toml +3 -3
  3. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/commands/init.py +97 -89
  4. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/commands/serve.py +12 -6
  5. {llamactl-0.3.2 → llamactl-0.3.4}/README.md +0 -0
  6. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/__init__.py +0 -0
  7. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/app.py +0 -0
  8. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/auth/client.py +0 -0
  9. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/client.py +0 -0
  10. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/commands/aliased_group.py +0 -0
  11. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/commands/auth.py +0 -0
  12. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/commands/deployment.py +0 -0
  13. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/commands/env.py +0 -0
  14. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/_config.py +0 -0
  15. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/_migrations.py +0 -0
  16. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/auth_service.py +0 -0
  17. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/env_service.py +0 -0
  18. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/migrations/0001_init.sql +0 -0
  19. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/migrations/0002_add_auth_fields.sql +0 -0
  20. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/migrations/__init__.py +0 -0
  21. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/config/schema.py +0 -0
  22. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/debug.py +0 -0
  23. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/env.py +0 -0
  24. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/interactive_prompts/session_utils.py +0 -0
  25. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/interactive_prompts/utils.py +0 -0
  26. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/options.py +0 -0
  27. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/py.typed +0 -0
  28. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/styles.py +0 -0
  29. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/deployment_form.py +0 -0
  30. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/deployment_help.py +0 -0
  31. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/deployment_monitor.py +0 -0
  32. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/git_validation.py +0 -0
  33. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/github_callback_server.py +0 -0
  34. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/llama_loader.py +0 -0
  35. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/secrets_form.py +0 -0
  36. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/textual/styles.tcss +0 -0
  37. {llamactl-0.3.2 → llamactl-0.3.4}/src/llama_deploy/cli/utils/env_inject.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.2
3
+ Version: 0.3.4
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core[client]>=0.3.2,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.2,<0.4.0
8
+ Requires-Dist: llama-deploy-core[client]>=0.3.4,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.4,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0,<1.0.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "llamactl"
3
- version = "0.3.2"
3
+ version = "0.3.4"
4
4
  description = "A command-line interface for managing LlamaDeploy projects and deployments"
5
5
  readme = "README.md"
6
6
  license = { text = "MIT" }
@@ -9,8 +9,8 @@ authors = [
9
9
  ]
10
10
  requires-python = ">=3.11, <4"
11
11
  dependencies = [
12
- "llama-deploy-core[client]>=0.3.2,<0.4.0",
13
- "llama-deploy-appserver>=0.3.2,<0.4.0",
12
+ "llama-deploy-core[client]>=0.3.4,<0.4.0",
13
+ "llama-deploy-appserver>=0.3.4,<0.4.0",
14
14
  "httpx>=0.24.0,<1.0.0",
15
15
  "rich>=13.0.0",
16
16
  "questionary>=2.0.0",
@@ -15,94 +15,6 @@ from llama_deploy.cli.app import app
15
15
  from llama_deploy.cli.options import global_options
16
16
  from llama_deploy.cli.styles import HEADER_COLOR_HEX
17
17
  from rich import print as rprint
18
- from vibe_llama.scaffold import create_scaffold
19
- from vibe_llama.scaffold.scaffold import ProjectName
20
- from vibe_llama.sdk import VibeLlamaStarter
21
-
22
-
23
- @dataclass
24
- class TemplateOption:
25
- id: str
26
- name: str
27
- description: str
28
- source: VibeLlamaTemplate | GithubTemplateRepo
29
- llama_cloud: bool
30
-
31
-
32
- @dataclass
33
- class VibeLlamaTemplate:
34
- name: ProjectName
35
-
36
-
37
- @dataclass
38
- class GithubTemplateRepo:
39
- url: str
40
-
41
-
42
- ui_options = [
43
- TemplateOption(
44
- id="basic-ui",
45
- name="Basic UI",
46
- description="A basic starter workflow with a React Vite UI",
47
- source=GithubTemplateRepo(
48
- url="https://github.com/run-llama/template-workflow-basic-ui"
49
- ),
50
- llama_cloud=False,
51
- ),
52
- TemplateOption(
53
- id="extraction-review",
54
- name="Extraction Agent with Review UI",
55
- description="Extract data from documents using a custom schema and Llama Cloud. Includes a UI to review and correct the results",
56
- source=GithubTemplateRepo(
57
- url="https://github.com/run-llama/template-workflow-data-extraction"
58
- ),
59
- llama_cloud=True,
60
- ),
61
- ]
62
- headless_options = [
63
- TemplateOption(
64
- id="basic",
65
- name="Basic Workflow",
66
- description="A base example that showcases usage patterns for workflows",
67
- source=VibeLlamaTemplate(name="basic"),
68
- llama_cloud=False,
69
- ),
70
- TemplateOption(
71
- id="document_parsing",
72
- name="Document Parser",
73
- description="A workflow that, using LlamaParse, parses unstructured documents and returns their raw text content",
74
- source=VibeLlamaTemplate(name="document_parsing"),
75
- llama_cloud=True,
76
- ),
77
- TemplateOption(
78
- id="human_in_the_loop",
79
- name="Human in the Loop",
80
- description="A workflow showcasing how to use human in the loop with LlamaIndex workflows",
81
- source=VibeLlamaTemplate(name="human_in_the_loop"),
82
- llama_cloud=False,
83
- ),
84
- TemplateOption(
85
- id="invoice_extraction",
86
- name="Invoice Extraction",
87
- description="A workflow that, given an invoice, extracts several key details using LlamaExtract",
88
- source=VibeLlamaTemplate(name="invoice_extraction"),
89
- llama_cloud=True,
90
- ),
91
- TemplateOption(
92
- id="rag",
93
- name="RAG",
94
- description="A workflow that embeds, indexes and queries your documents on the fly, providing you with a simple RAG pipeline",
95
- source=VibeLlamaTemplate(name="rag"),
96
- llama_cloud=False,
97
- ),
98
- TemplateOption(
99
- id="web_scraping",
100
- name="Web Scraping",
101
- description="A workflow that, given several urls, scrapes and summarizes their content using Google's Gemini API",
102
- source=VibeLlamaTemplate(name="web_scraping"),
103
- llama_cloud=False,
104
- ),
105
- ]
106
18
 
107
19
 
108
20
  @app.command()
@@ -113,7 +25,6 @@ headless_options = [
113
25
  )
114
26
  @click.option(
115
27
  "--template",
116
- type=click.Choice([o.id for o in ui_options]),
117
28
  help="The template to use for the new app",
118
29
  )
119
30
  @click.option(
@@ -143,6 +54,102 @@ def init(
143
54
 
144
55
 
145
56
  def _create(template: str | None, dir: Path | None, force: bool) -> None:
57
+ # defer loading to improve cli startup time
58
+ from vibe_llama.scaffold import create_scaffold
59
+ from vibe_llama.scaffold.scaffold import ProjectName
60
+ from vibe_llama.sdk import VibeLlamaStarter
61
+
62
+ @dataclass
63
+ class TemplateOption:
64
+ id: str
65
+ name: str
66
+ description: str
67
+ source: VibeLlamaTemplate | GithubTemplateRepo
68
+ llama_cloud: bool
69
+
70
+ @dataclass
71
+ class VibeLlamaTemplate:
72
+ name: ProjectName
73
+
74
+ @dataclass
75
+ class GithubTemplateRepo:
76
+ url: str
77
+
78
+ ui_options = [
79
+ TemplateOption(
80
+ id="basic-ui",
81
+ name="Basic UI",
82
+ description="A basic starter workflow with a React Vite UI",
83
+ source=GithubTemplateRepo(
84
+ url="https://github.com/run-llama/template-workflow-basic-ui"
85
+ ),
86
+ llama_cloud=False,
87
+ ),
88
+ TemplateOption(
89
+ id="document-qa",
90
+ name="Document Question & Answer",
91
+ description="Upload documents and run question answering through a React UI",
92
+ source=GithubTemplateRepo(
93
+ url="https://github.com/run-llama/template-workflow-document-qa"
94
+ ),
95
+ llama_cloud=True,
96
+ ),
97
+ TemplateOption(
98
+ id="extraction-review",
99
+ name="Extraction Agent with Review UI",
100
+ description="Extract data from documents using a custom schema and Llama Cloud. Includes a UI to review and correct the results",
101
+ source=GithubTemplateRepo(
102
+ url="https://github.com/run-llama/template-workflow-data-extraction"
103
+ ),
104
+ llama_cloud=True,
105
+ ),
106
+ ]
107
+
108
+ headless_options = [
109
+ TemplateOption(
110
+ id="basic",
111
+ name="Basic Workflow",
112
+ description="A base example that showcases usage patterns for workflows",
113
+ source=VibeLlamaTemplate(name="basic"),
114
+ llama_cloud=False,
115
+ ),
116
+ TemplateOption(
117
+ id="document_parsing",
118
+ name="Document Parser",
119
+ description="A workflow that, using LlamaParse, parses unstructured documents and returns their raw text content",
120
+ source=VibeLlamaTemplate(name="document_parsing"),
121
+ llama_cloud=True,
122
+ ),
123
+ TemplateOption(
124
+ id="human_in_the_loop",
125
+ name="Human in the Loop",
126
+ description="A workflow showcasing how to use human in the loop with LlamaIndex workflows",
127
+ source=VibeLlamaTemplate(name="human_in_the_loop"),
128
+ llama_cloud=False,
129
+ ),
130
+ TemplateOption(
131
+ id="invoice_extraction",
132
+ name="Invoice Extraction",
133
+ description="A workflow that, given an invoice, extracts several key details using LlamaExtract",
134
+ source=VibeLlamaTemplate(name="invoice_extraction"),
135
+ llama_cloud=True,
136
+ ),
137
+ TemplateOption(
138
+ id="rag",
139
+ name="RAG",
140
+ description="A workflow that embeds, indexes and queries your documents on the fly, providing you with a simple RAG pipeline",
141
+ source=VibeLlamaTemplate(name="rag"),
142
+ llama_cloud=False,
143
+ ),
144
+ TemplateOption(
145
+ id="web_scraping",
146
+ name="Web Scraping",
147
+ description="A workflow that, given several urls, scrapes and summarizes their content using Google's Gemini API",
148
+ source=VibeLlamaTemplate(name="web_scraping"),
149
+ llama_cloud=False,
150
+ ),
151
+ ]
152
+
146
153
  if template is None:
147
154
  rprint(
148
155
  "[bold]Select a template to start from.[/bold] Either with javascript frontend UI, or just a python workflow that can be used as an API."
@@ -217,6 +224,7 @@ def _create(template: str | None, dir: Path | None, force: bool) -> None:
217
224
 
218
225
  try:
219
226
  # Dump in a bunch of docs for AI agents
227
+
220
228
  vibe_llama_starter = VibeLlamaStarter(
221
229
  agents=["OpenAI Codex CLI"], # AGENTS.md, supported by Cursor,
222
230
  services=["LlamaIndex", "llama-index-workflows"]
@@ -5,12 +5,6 @@ from typing import Literal
5
5
 
6
6
  import click
7
7
  import questionary
8
- from llama_deploy.appserver.app import (
9
- prepare_server,
10
- start_server_in_target_venv,
11
- )
12
- from llama_deploy.appserver.deployment_config_parser import get_deployment_config
13
- from llama_deploy.appserver.workflow_loader import parse_environment_variables
14
8
  from llama_deploy.cli.commands.auth import validate_authenticated_profile
15
9
  from llama_deploy.cli.config.env_service import service
16
10
  from llama_deploy.cli.config.schema import Auth
@@ -99,6 +93,15 @@ def serve(
99
93
  deployment_file, interactive, require_cloud=persistence == "cloud"
100
94
  )
101
95
 
96
+ # Defer heavy appserver imports until the `serve` command is actually invoked
97
+ from llama_deploy.appserver.app import (
98
+ prepare_server,
99
+ start_server_in_target_venv,
100
+ )
101
+ from llama_deploy.appserver.deployment_config_parser import (
102
+ get_deployment_config,
103
+ )
104
+
102
105
  prepare_server(
103
106
  deployment_file=deployment_file,
104
107
  install=not no_install,
@@ -192,6 +195,9 @@ def _maybe_inject_llama_cloud_credentials(
192
195
  if not config.llama_cloud and not require_cloud:
193
196
  return
194
197
 
198
+ # Import lazily to avoid loading appserver dependencies on general CLI startup
199
+ from llama_deploy.appserver.workflow_loader import parse_environment_variables
200
+
195
201
  vars = parse_environment_variables(
196
202
  config, deployment_file.parent if deployment_file.is_file() else deployment_file
197
203
  )
File without changes