llamactl 0.3.0a12__py3-none-any.whl → 0.3.0a14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,9 @@ import logging
2
2
  from typing import Callable, ParamSpec, TypeVar
3
3
 
4
4
  import click
5
+ from llama_deploy.cli.interactive_prompts.session_utils import is_interactive_session
6
+
7
+ from .debug import setup_file_logging
5
8
 
6
9
  P = ParamSpec("P")
7
10
  R = TypeVar("R")
@@ -9,7 +12,6 @@ R = TypeVar("R")
9
12
 
10
13
  def global_options(f: Callable[P, R]) -> Callable[P, R]:
11
14
  """Common decorator to add global options to command groups"""
12
- from .debug import setup_file_logging
13
15
 
14
16
  def debug_callback(ctx: click.Context, param: click.Parameter, value: str) -> str:
15
17
  if value:
@@ -27,3 +29,15 @@ def global_options(f: Callable[P, R]) -> Callable[P, R]:
27
29
  is_eager=True,
28
30
  hidden=True,
29
31
  )(f)
32
+
33
+
34
+ def interactive_option(f: Callable[P, R]) -> Callable[P, R]:
35
+ """Add an interactive option to the command"""
36
+
37
+ default = is_interactive_session()
38
+ return click.option(
39
+ "--interactive/--no-interactive",
40
+ help="Run in interactive mode. If not provided, will default to the current session's interactive state.",
41
+ is_flag=True,
42
+ default=default,
43
+ )(f)
@@ -0,0 +1,52 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime
4
+
5
+ import httpx
6
+ from pydantic import BaseModel, TypeAdapter
7
+
8
+
9
+ class PlatformClient:
10
+ def __init__(self, auth_token: str, platform_url: str):
11
+ self.auth_token = auth_token
12
+ self.platform_url = platform_url
13
+ self.client = httpx.AsyncClient(base_url=platform_url)
14
+
15
+ async def list_projects(self) -> list[Project]:
16
+ response = await self.client.get("/api/v1/projects")
17
+ response.raise_for_status()
18
+ return ProjectList.validate_python(response.json())
19
+
20
+ async def list_organizations(self) -> list[Organization]:
21
+ response = await self.client.get("/api/v1/organizations")
22
+ response.raise_for_status()
23
+ return OrganizationList.validate_python(response.json())
24
+
25
+ async def validate_auth_token(self) -> bool:
26
+ response = await self.client.get("/api/v1/organizations/default")
27
+ try:
28
+ response.raise_for_status()
29
+ return True
30
+ except httpx.HTTPStatusError:
31
+ if response.status_code == 401:
32
+ return False
33
+ raise
34
+
35
+
36
+ class Organization(BaseModel):
37
+ id: str
38
+ name: str
39
+ created_at: datetime
40
+ updated_at: datetime
41
+
42
+
43
+ class Project(BaseModel):
44
+ id: str
45
+ name: str
46
+ organization_id: str
47
+ created_at: datetime
48
+ updated_at: datetime
49
+
50
+
51
+ OrganizationList = TypeAdapter(list[Organization])
52
+ ProjectList = TypeAdapter(list[Project])
@@ -405,7 +405,9 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
405
405
  self.save_error = "" # Clear any previous errors
406
406
  self.current_state = "validation"
407
407
 
408
- def on_validation_result_message(self, message: ValidationResultMessage) -> None:
408
+ async def on_validation_result_message(
409
+ self, message: ValidationResultMessage
410
+ ) -> None:
409
411
  """Handle validation success from git validation widget"""
410
412
  logging.info("validation result message", message)
411
413
  # Update form data with validated PAT if provided
@@ -417,8 +419,8 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
417
419
  updated_form.has_existing_pat = False
418
420
  self.form_data = updated_form
419
421
 
420
- # Proceed with save
421
- self._perform_save()
422
+ # Proceed with save (async)
423
+ await self._perform_save()
422
424
 
423
425
  def on_validation_cancel_message(self, message: ValidationCancelMessage) -> None:
424
426
  """Handle validation cancellation from git validation widget"""
@@ -435,18 +437,17 @@ class DeploymentEditApp(App[DeploymentResponse | None]):
435
437
  """Return from help to form, keeping form state intact."""
436
438
  self.current_state = "form"
437
439
 
438
- def _perform_save(self) -> None:
440
+ async def _perform_save(self) -> None:
439
441
  """Actually save the deployment after validation"""
440
442
  logging.info("saving form data", self.form_data)
441
443
  result = self.form_data
442
444
  client = get_client()
443
445
  try:
444
- if result.is_editing:
445
- update_deployment = client.update_deployment(
446
- result.id, result.to_update()
447
- )
448
- else:
449
- update_deployment = client.create_deployment(result.to_create())
446
+ update_deployment = (
447
+ await client.update_deployment(result.id, result.to_update())
448
+ if result.is_editing
449
+ else await client.create_deployment(result.to_create())
450
+ )
450
451
  # Save and navigate to embedded monitor screen
451
452
  self.saved_deployment = update_deployment
452
453
  # Ensure form_data carries the new ID for any subsequent operations
@@ -7,11 +7,11 @@ import hashlib
7
7
  import threading
8
8
  import time
9
9
  from pathlib import Path
10
- from typing import Iterator
11
10
 
12
- from llama_deploy.cli.client import get_project_client as get_client
13
- from llama_deploy.core.client.manage_client import Closer
14
- from llama_deploy.core.schema.base import LogEvent
11
+ from llama_deploy.cli.client import (
12
+ project_client_context,
13
+ )
14
+ from llama_deploy.core.schema import LogEvent
15
15
  from llama_deploy.core.schema.deployments import DeploymentResponse
16
16
  from rich.text import Text
17
17
  from textual import events
@@ -78,7 +78,6 @@ class DeploymentMonitorWidget(Widget):
78
78
  error_message = reactive("", recompose=False)
79
79
  wrap_enabled = reactive(False, recompose=False)
80
80
  autoscroll_enabled = reactive(True, recompose=False)
81
- stream_closer: Closer | None = None
82
81
 
83
82
  def __init__(self, deployment_id: str) -> None:
84
83
  super().__init__()
@@ -87,12 +86,12 @@ class DeploymentMonitorWidget(Widget):
87
86
  # Persist content written to the RichLog across recomposes
88
87
  self._log_buffer: list[Text] = []
89
88
 
90
- def on_mount(self) -> None:
89
+ async def on_mount(self) -> None:
91
90
  # Kick off initial fetch and start logs stream in background
92
- self.run_worker(self._fetch_deployment(), exclusive=True)
93
- self.run_worker(self._stream_logs, exclusive=False, thread=True)
91
+ self.run_worker(self._fetch_deployment())
92
+ self.run_worker(self._stream_logs())
94
93
  # Start periodic polling of deployment status
95
- self.run_worker(self._poll_deployment_status(), exclusive=False)
94
+ self.run_worker(self._poll_deployment_status())
96
95
 
97
96
  def compose(self) -> ComposeResult:
98
97
  yield Static("Deployment Status", classes="primary-message")
@@ -141,81 +140,117 @@ class DeploymentMonitorWidget(Widget):
141
140
 
142
141
  async def _fetch_deployment(self) -> None:
143
142
  try:
144
- client = get_client()
145
- self.deployment = client.get_deployment(
146
- self.deployment_id, include_events=True
147
- )
143
+ async with project_client_context() as client:
144
+ self.deployment = await client.get_deployment(
145
+ self.deployment_id, include_events=True
146
+ )
148
147
  # Clear any previous error on success
149
148
  self.error_message = ""
150
149
  except Exception as e: # pragma: no cover - network errors
151
150
  self.error_message = f"Failed to fetch deployment: {e}"
152
151
 
153
- def _stream_logs(self) -> None:
154
- """Consume the blocking log iterator in a single worker thread.
152
+ async def _stream_logs(self) -> None:
153
+ """Consume the async log iterator, batch updates, and reconnect with backoff."""
155
154
 
156
- Cooperative cancellation uses `self._stop_stream` to exit cleanly.
157
- """
158
- client = get_client()
159
-
160
- def _sleep_with_cancel(total_seconds: float) -> None:
155
+ async def _sleep_with_cancel(total_seconds: float) -> None:
161
156
  step = 0.2
162
157
  remaining = total_seconds
163
158
  while remaining > 0 and not self._stop_stream.is_set():
164
- time.sleep(min(step, remaining))
159
+ await asyncio.sleep(min(step, remaining))
165
160
  remaining -= step
166
161
 
162
+ # Batching configuration: small latency to reduce UI churn while staying responsive
163
+ batch_max_latency_seconds = 0.1
164
+ batch_max_items = 200
165
+
167
166
  base_backoff_seconds = 0.2
168
167
  backoff_seconds = base_backoff_seconds
169
168
  max_backoff_seconds = 30.0
170
169
 
171
170
  while not self._stop_stream.is_set():
172
- try:
173
- connect_started_at = time.monotonic()
174
- closer, stream = client.stream_deployment_logs(
175
- self.deployment_id,
176
- include_init_containers=True,
177
- )
178
- # On any (re)connect, clear existing content
179
- self.app.call_from_thread(self._reset_log_view_for_reconnect)
180
-
181
- buffered_stream = _buffer_log_lines(stream)
182
-
183
- def close_stream():
171
+ connect_started_at = time.monotonic()
172
+ # On any (re)connect, clear existing content
173
+ self._reset_log_view_for_reconnect()
174
+
175
+ queue: asyncio.Queue[LogEvent] = asyncio.Queue(maxsize=10000)
176
+ producer_done = asyncio.Event()
177
+
178
+ async def _producer() -> None:
179
+ try:
180
+ async with project_client_context() as client:
181
+ async for event in client.stream_deployment_logs(
182
+ self.deployment_id,
183
+ include_init_containers=True,
184
+ tail_lines=10000,
185
+ ):
186
+ if self._stop_stream.is_set():
187
+ break
188
+ try:
189
+ await queue.put(event)
190
+ except Exception:
191
+ # If queue put fails due to cancellation/shutdown, stop
192
+ break
193
+ except Exception as e:
194
+ # Surface error via error message and rely on reconnect loop
195
+ if not self._stop_stream.is_set():
196
+ self._set_error_message(
197
+ f"Log stream failed: {e}. Reconnecting..."
198
+ )
199
+ finally:
200
+ producer_done.set()
201
+
202
+ async def _consumer() -> None:
203
+ batch: list[LogEvent] = []
204
+ next_deadline = time.monotonic() + batch_max_latency_seconds
205
+ while not self._stop_stream.is_set():
206
+ # Stop once producer finished and queue drained
207
+ if producer_done.is_set() and queue.empty():
208
+ if batch:
209
+ self._handle_log_events(batch)
210
+ batch = []
211
+ break
212
+ timeout = max(0.0, next_deadline - time.monotonic())
184
213
  try:
185
- closer()
214
+ item = await asyncio.wait_for(queue.get(), timeout=timeout)
215
+ batch.append(item)
216
+ if len(batch) >= batch_max_items:
217
+ self._handle_log_events(batch)
218
+ batch = []
219
+ next_deadline = time.monotonic() + batch_max_latency_seconds
220
+ except asyncio.TimeoutError:
221
+ if batch:
222
+ self._handle_log_events(batch)
223
+ batch = []
224
+ next_deadline = time.monotonic() + batch_max_latency_seconds
186
225
  except Exception:
187
- pass
188
-
189
- self.stream_closer = close_stream
190
- # Stream connected; consume until end
191
- for events in buffered_stream:
192
- if self._stop_stream.is_set():
226
+ # On any unexpected error, flush and exit, reconnect will handle
227
+ if batch:
228
+ self._handle_log_events(batch)
193
229
  break
194
- # Marshal UI updates back to the main thread via the App
195
- self.app.call_from_thread(self._handle_log_events, events)
196
- if self._stop_stream.is_set():
197
- break
198
- # Stream ended without explicit error; attempt reconnect
199
- self.app.call_from_thread(
200
- self._set_error_message, "Log stream disconnected. Reconnecting..."
201
- )
202
- except Exception as e:
203
- if self._stop_stream.is_set():
204
- break
205
- # Surface the error to the UI and attempt reconnect with backoff
206
- self.app.call_from_thread(
207
- self._set_error_message, f"Log stream failed: {e}. Reconnecting..."
208
- )
209
230
 
210
- # Duration-aware backoff: subtract how long the last connection lived
231
+ producer_task = asyncio.create_task(_producer())
232
+ try:
233
+ await _consumer()
234
+ finally:
235
+ # Ensure producer is not left running
236
+ try:
237
+ producer_task.cancel()
238
+ except Exception:
239
+ pass
240
+
241
+ if self._stop_stream.is_set():
242
+ break
243
+
244
+ # If we reached here, the stream ended or failed; attempt reconnect with backoff
245
+ self._set_error_message("Log stream disconnected. Reconnecting...")
246
+
247
+ # Duration-aware backoff (smaller when the previous connection lived longer)
211
248
  connection_lifetime = 0.0
212
249
  try:
213
250
  connection_lifetime = max(0.0, time.monotonic() - connect_started_at)
214
251
  except Exception:
215
252
  connection_lifetime = 0.0
216
253
 
217
- # If the connection lived longer than the current backoff window,
218
- # reset to base so the next reconnect is immediate.
219
254
  if connection_lifetime >= backoff_seconds:
220
255
  backoff_seconds = base_backoff_seconds
221
256
  else:
@@ -223,7 +258,7 @@ class DeploymentMonitorWidget(Widget):
223
258
 
224
259
  delay = max(0.0, backoff_seconds - connection_lifetime)
225
260
  if delay > 0:
226
- _sleep_with_cancel(delay)
261
+ await _sleep_with_cancel(delay)
227
262
 
228
263
  def _reset_log_view_for_reconnect(self) -> None:
229
264
  """Clear UI and buffers so new stream replaces previous content."""
@@ -326,9 +361,6 @@ class DeploymentMonitorWidget(Widget):
326
361
  def on_unmount(self) -> None:
327
362
  # Attempt to stop the streaming loop
328
363
  self._stop_stream.set()
329
- if self.stream_closer is not None:
330
- self.stream_closer()
331
- self.stream_closer = None
332
364
 
333
365
  # Reactive watchers to update widgets in place instead of recomposing
334
366
  def watch_error_message(self, message: str) -> None:
@@ -383,12 +415,12 @@ class DeploymentMonitorWidget(Widget):
383
415
 
384
416
  async def _poll_deployment_status(self) -> None:
385
417
  """Periodically refresh deployment status to reflect updates in the UI."""
386
- client = get_client()
387
418
  while not self._stop_stream.is_set():
388
419
  try:
389
- self.deployment = client.get_deployment(
390
- self.deployment_id, include_events=True
391
- )
420
+ async with project_client_context() as client:
421
+ self.deployment = await client.get_deployment(
422
+ self.deployment_id, include_events=True
423
+ )
392
424
  # Clear any previous error on success
393
425
  if self.error_message:
394
426
  self.error_message = ""
@@ -434,42 +466,3 @@ def monitor_deployment_screen(deployment_id: str) -> None:
434
466
  """Launch the standalone deployment monitor screen."""
435
467
  app = DeploymentMonitorApp(deployment_id)
436
468
  app.run()
437
-
438
-
439
- def _buffer_log_lines(iter: Iterator[LogEvent]) -> Iterator[list[LogEvent]]:
440
- """Batch log events into small lists using a background reader.
441
-
442
- This reduces UI churn while still reacting quickly. On shutdown we
443
- absorb stream read errors that are expected when the connection is
444
- closed from another thread.
445
- """
446
- buffer: list[LogEvent] = []
447
- bg_error: Exception | None = None
448
- done = threading.Event()
449
-
450
- def pump() -> None:
451
- nonlocal bg_error
452
- try:
453
- for event in iter:
454
- buffer.append(event)
455
- except Exception as e:
456
- bg_error = e
457
- finally:
458
- done.set()
459
-
460
- t = threading.Thread(target=pump, daemon=True)
461
- t.start()
462
- try:
463
- while not done.is_set():
464
- if buffer:
465
- # Yield a snapshot and clear in-place to avoid reallocating list
466
- yield list(buffer)
467
- buffer.clear()
468
- time.sleep(0.5)
469
- if bg_error is not None:
470
- raise bg_error
471
- finally:
472
- try:
473
- t.join(timeout=0.1)
474
- except Exception:
475
- pass
@@ -282,14 +282,14 @@ class GitValidationWidget(Widget):
282
282
  self.error_message = ""
283
283
  try:
284
284
  client = get_client()
285
- response = client.validate_repository(
285
+ self.validation_response = await client.validate_repository(
286
286
  repo_url=self.repo_url, deployment_id=self.deployment_id, pat=pat
287
287
  )
288
- self.validation_response = response
289
288
 
290
- if response.accessible:
289
+ resp = self.validation_response
290
+ if resp and resp.accessible:
291
291
  # Success - post result message with appropriate messaging
292
- if response.pat_is_obsolete:
292
+ if resp.pat_is_obsolete:
293
293
  # Show success message about PAT obsolescence before proceeding
294
294
  self.current_state = "success"
295
295
  self.error_message = "Repository accessible via GitHub App. Your Personal Access Token is now obsolete and will be removed."
@@ -308,22 +308,24 @@ class GitValidationWidget(Widget):
308
308
  self.error_message = ""
309
309
  try:
310
310
  client = get_client()
311
- response = client.validate_repository(
311
+ self.validation_response = await client.validate_repository(
312
312
  repo_url=self.repo_url, deployment_id=self.deployment_id
313
313
  )
314
- self.validation_response = response
315
314
 
316
- if response.accessible:
315
+ resp = self.validation_response
316
+ if resp and resp.accessible:
317
317
  # Success - post result message with appropriate messaging
318
318
  self.current_state = "success"
319
319
  self.post_message(
320
320
  ValidationResultMessage(
321
- self.repo_url, "" if response.pat_is_obsolete else None
321
+ self.repo_url, "" if resp.pat_is_obsolete else None
322
322
  )
323
323
  )
324
324
  else:
325
325
  # Failed - stay in github_auth and show error
326
- self.error_message = f"Still not accessible: {response.message}"
326
+ self.error_message = (
327
+ f"Still not accessible: {resp.message if resp else ''}"
328
+ )
327
329
 
328
330
  except Exception as e:
329
331
  # Failed - stay in github_auth and show error
@@ -30,6 +30,10 @@ Container {
30
30
  height: auto;
31
31
  }
32
32
 
33
+ .two-column-form-grid .full-width {
34
+ column-span: 2;
35
+ }
36
+
33
37
  /* =============================================== */
34
38
  /* FORM ELEMENTS */
35
39
  /* =============================================== */
@@ -96,7 +100,7 @@ Input.disabled {
96
100
  background: $error-muted;
97
101
  border-left: heavy $error;
98
102
  margin: 0 0 1 0;
99
- padding: 0 0 0 1
103
+ padding: 0 0 0 1;
100
104
  }
101
105
 
102
106
  .primary-message {
@@ -104,7 +108,7 @@ Input.disabled {
104
108
  background: $primary-muted;
105
109
  border-left: heavy $primary;
106
110
  margin: 0 0 1 0;
107
- padding: 0 0 0 1
111
+ padding: 0 0 0 1;
108
112
  }
109
113
 
110
114
  .secondary-message {
@@ -112,15 +116,23 @@ Input.disabled {
112
116
  background: $secondary-muted;
113
117
  border-left: heavy $secondary;
114
118
  margin: 0 0 1 0;
115
- padding: 0 0 0 1
119
+ padding: 0 0 0 1;
116
120
  }
117
121
 
118
122
  .success-message {
119
123
  color: $text-success;
120
124
  background: $success-muted;
121
125
  border-left: heavy $success;
122
- padding: 1;
123
- margin: 1 0;
126
+ padding: 0 0 0 1;
127
+ margin: 0 0 0 0;
128
+ }
129
+
130
+ .warning-message {
131
+ color: $text-warning;
132
+ background: $warning-muted;
133
+ border-left: heavy $warning;
134
+ padding: 0 0 0 1;
135
+ margin: 0 0 0 0;
124
136
  }
125
137
 
126
138
  .hidden {
@@ -142,6 +154,10 @@ Input.disabled {
142
154
  width: 1fr;
143
155
  }
144
156
 
157
+ .align-right {
158
+ align: right middle;
159
+ }
160
+
145
161
  /* =============================================== */
146
162
  /* BUTTONS & ACTIONS */
147
163
  /* =============================================== */
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: llamactl
3
- Version: 0.3.0a12
3
+ Version: 0.3.0a14
4
4
  Summary: A command-line interface for managing LlamaDeploy projects and deployments
5
5
  Author: Adrian Lyjak
6
6
  Author-email: Adrian Lyjak <adrianlyjak@gmail.com>
7
7
  License: MIT
8
- Requires-Dist: llama-deploy-core[client]>=0.3.0a12,<0.4.0
9
- Requires-Dist: llama-deploy-appserver>=0.3.0a12,<0.4.0
8
+ Requires-Dist: llama-deploy-core[client]>=0.3.0a14,<0.4.0
9
+ Requires-Dist: llama-deploy-appserver>=0.3.0a14,<0.4.0
10
10
  Requires-Dist: httpx>=0.24.0
11
11
  Requires-Dist: rich>=13.0.0
12
12
  Requires-Dist: questionary>=2.0.0
@@ -0,0 +1,32 @@
1
+ llama_deploy/cli/__init__.py,sha256=df028686233c4d5a3e244bb50c1c7b84cf2399ae03abe45eb4d01e53caa1be38,476
2
+ llama_deploy/cli/app.py,sha256=9170e4f506c482522bd745eb1cdb700a198cfcfd7204c168c94e5ee2b6b43ffa,2199
3
+ llama_deploy/cli/client.py,sha256=f0f72c90cddfbc9198e154883f3b8f05fb47dbe7ec1f5755106dbb8009d2bb54,1459
4
+ llama_deploy/cli/commands/aliased_group.py,sha256=bc41007c97b7b93981217dbd4d4591df2b6c9412a2d9ed045b0ec5655ed285f2,1066
5
+ llama_deploy/cli/commands/auth.py,sha256=de584d11c1acf5a4e7aee8c8f30184335053ed206d38dbc8509b2d1d0677a092,12640
6
+ llama_deploy/cli/commands/deployment.py,sha256=c99feb73a887063cad86e2cc555f21ebac6d47576749d79c551f26b8567af638,9879
7
+ llama_deploy/cli/commands/env.py,sha256=e0b96b9f4e7921b4370ad5f8bc0a2bfb19b705e73004f72c37c9bed28a208e0d,6702
8
+ llama_deploy/cli/commands/init.py,sha256=51b2de1e35ff34bc15c9dfec72fbad08aaf528c334df168896d36458a4e9401c,6307
9
+ llama_deploy/cli/commands/serve.py,sha256=4d47850397ba172944df56a934a51bedb52403cbd3f9b000b1ced90a31c75049,2721
10
+ llama_deploy/cli/config/_config.py,sha256=31376c3f3ecadaf52545e4aeb4a660dde2c22d7e6c33b232ec93eebeb926e3fb,14953
11
+ llama_deploy/cli/config/auth_service.py,sha256=38a2de9bbcf5780675130d90e4f9116da190fd31a33e1c178a7f3847b943c229,2667
12
+ llama_deploy/cli/config/env_service.py,sha256=6e50ffe2e33dcff4193c87b3df8daac71542d406314a7bcaf48187715a53780e,2445
13
+ llama_deploy/cli/config/schema.py,sha256=086b6161b238c2037068a2b510f5d4bbda917494df764818ff9692e9735a8953,608
14
+ llama_deploy/cli/debug.py,sha256=e85a72d473bbe1645eb31772f7349bde703d45704166f767385895c440afc762,496
15
+ llama_deploy/cli/env.py,sha256=6ebc24579815b3787829c81fd5bb9f31698a06e62c0128a788559f962b33a7af,1016
16
+ llama_deploy/cli/interactive_prompts/session_utils.py,sha256=b996f2eddf70d6c49636c4797d246d212fce0950fe7e9a3f59cf6a1bf7ae26f5,1142
17
+ llama_deploy/cli/interactive_prompts/utils.py,sha256=594cc2a242cc3405d66d0e26a60647496cc5fcb4ce7d0500a4cfec4888c9a0fa,516
18
+ llama_deploy/cli/options.py,sha256=62ee7286c3305ddb4b597783d19e854284d79bf9384800045f15b934dc245c1d,1298
19
+ llama_deploy/cli/platform_client.py,sha256=69de23dc79a8f5922afc9e3bac1b633a531340ebbefeb7838e3a88419faa754c,1451
20
+ llama_deploy/cli/py.typed,sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855,0
21
+ llama_deploy/cli/textual/deployment_form.py,sha256=1cf186b765d10a1bdd7394f22ddd7598d75dba8c9a8a7a8be74e6151da2f31dc,20941
22
+ llama_deploy/cli/textual/deployment_help.py,sha256=d43e9ff29db71a842cf8b491545763d581ede3132b8af518c73af85a40950046,2464
23
+ llama_deploy/cli/textual/deployment_monitor.py,sha256=7bcf3f0213401c2432fdb5a9d9acf468a4afe83b2d86d7f2852319768e6f2534,17231
24
+ llama_deploy/cli/textual/git_validation.py,sha256=94c95b61d0cbc490566a406b4886c9c12e1d1793dc14038a5be37119223c9568,13419
25
+ llama_deploy/cli/textual/github_callback_server.py,sha256=dc74c510f8a98ef6ffaab0f6d11c7ea86ee77ca5adbc7725a2a29112bae24191,7556
26
+ llama_deploy/cli/textual/llama_loader.py,sha256=33cb32a46dd40bcf889c553e44f2672c410e26bd1d4b17aa6cca6d0a5d59c2c4,1468
27
+ llama_deploy/cli/textual/secrets_form.py,sha256=a43fbd81aad034d0d60906bfd917c107f9ace414648b0f63ac0b29eeba4050db,7061
28
+ llama_deploy/cli/textual/styles.tcss,sha256=b1a54dc5fb0e0aa12cbf48807e9e6a94b9926838b8058dae1336a134f02e92b0,3327
29
+ llamactl-0.3.0a14.dist-info/WHEEL,sha256=66530aef82d5020ef5af27ae0123c71abb9261377c5bc519376c671346b12918,79
30
+ llamactl-0.3.0a14.dist-info/entry_points.txt,sha256=b67e1eb64305058751a651a80f2d2268b5f7046732268421e796f64d4697f83c,52
31
+ llamactl-0.3.0a14.dist-info/METADATA,sha256=9b2916ded8d6a8edfa8ee764b30b115eba00d9bbcb95b5c98e37def2e9bee3f1,3177
32
+ llamactl-0.3.0a14.dist-info/RECORD,,