llamactl 0.3.0a19__py3-none-any.whl → 0.3.0a21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_deploy/cli/auth/client.py +362 -0
- llama_deploy/cli/client.py +14 -5
- llama_deploy/cli/commands/auth.py +300 -33
- llama_deploy/cli/commands/deployment.py +32 -38
- llama_deploy/cli/commands/env.py +19 -14
- llama_deploy/cli/commands/init.py +137 -34
- llama_deploy/cli/commands/serve.py +29 -12
- llama_deploy/cli/config/_config.py +178 -202
- llama_deploy/cli/config/_migrations.py +65 -0
- llama_deploy/cli/config/auth_service.py +64 -2
- llama_deploy/cli/config/env_service.py +15 -14
- llama_deploy/cli/config/migrations/0001_init.sql +35 -0
- llama_deploy/cli/config/migrations/0002_add_auth_fields.sql +24 -0
- llama_deploy/cli/config/migrations/__init__.py +7 -0
- llama_deploy/cli/config/schema.py +30 -0
- llama_deploy/cli/env.py +2 -1
- llama_deploy/cli/styles.py +10 -0
- llama_deploy/cli/textual/deployment_form.py +63 -7
- llama_deploy/cli/textual/deployment_monitor.py +71 -108
- llama_deploy/cli/textual/github_callback_server.py +4 -4
- llama_deploy/cli/textual/secrets_form.py +4 -0
- llama_deploy/cli/textual/styles.tcss +7 -5
- {llamactl-0.3.0a19.dist-info → llamactl-0.3.0a21.dist-info}/METADATA +5 -3
- llamactl-0.3.0a21.dist-info/RECORD +37 -0
- llama_deploy/cli/platform_client.py +0 -52
- llamactl-0.3.0a19.dist-info/RECORD +0 -32
- {llamactl-0.3.0a19.dist-info → llamactl-0.3.0a21.dist-info}/WHEEL +0 -0
- {llamactl-0.3.0a19.dist-info → llamactl-0.3.0a21.dist-info}/entry_points.txt +0 -0
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from dataclasses import replace
|
|
2
|
+
from typing import Callable
|
|
2
3
|
|
|
3
4
|
from llama_deploy.cli.config.schema import Environment
|
|
4
5
|
|
|
@@ -7,46 +8,46 @@ from .auth_service import AuthService
|
|
|
7
8
|
|
|
8
9
|
|
|
9
10
|
class EnvService:
|
|
10
|
-
def __init__(self, config_manager: ConfigManager):
|
|
11
|
+
def __init__(self, config_manager: Callable[[], ConfigManager]):
|
|
11
12
|
self.config_manager = config_manager
|
|
12
13
|
|
|
13
14
|
def list_environments(self) -> list[Environment]:
|
|
14
|
-
return self.config_manager.list_environments()
|
|
15
|
+
return self.config_manager().list_environments()
|
|
15
16
|
|
|
16
17
|
def get_current_environment(self) -> Environment:
|
|
17
|
-
return self.config_manager.get_current_environment()
|
|
18
|
+
return self.config_manager().get_current_environment()
|
|
18
19
|
|
|
19
20
|
def switch_environment(self, api_url: str) -> Environment:
|
|
20
|
-
env = self.config_manager.get_environment(api_url)
|
|
21
|
+
env = self.config_manager().get_environment(api_url)
|
|
21
22
|
if not env:
|
|
22
23
|
raise ValueError(
|
|
23
24
|
f"Environment '{api_url}' not found. Add it with 'llamactl auth env add <API_URL>'"
|
|
24
25
|
)
|
|
25
|
-
self.config_manager.set_settings_current_environment(api_url)
|
|
26
|
-
self.config_manager.set_settings_current_profile(None)
|
|
26
|
+
self.config_manager().set_settings_current_environment(api_url)
|
|
27
|
+
self.config_manager().set_settings_current_profile(None)
|
|
27
28
|
return env
|
|
28
29
|
|
|
29
30
|
def create_or_update_environment(self, env: Environment) -> None:
|
|
30
|
-
self.config_manager.create_or_update_environment(
|
|
31
|
+
self.config_manager().create_or_update_environment(
|
|
31
32
|
env.api_url, env.requires_auth, env.min_llamactl_version
|
|
32
33
|
)
|
|
33
|
-
self.config_manager.set_settings_current_environment(env.api_url)
|
|
34
|
-
self.config_manager.set_settings_current_profile(None)
|
|
34
|
+
self.config_manager().set_settings_current_environment(env.api_url)
|
|
35
|
+
self.config_manager().set_settings_current_profile(None)
|
|
35
36
|
|
|
36
37
|
def delete_environment(self, api_url: str) -> bool:
|
|
37
|
-
return self.config_manager.delete_environment(api_url)
|
|
38
|
+
return self.config_manager().delete_environment(api_url)
|
|
38
39
|
|
|
39
40
|
def current_auth_service(self) -> AuthService:
|
|
40
|
-
return AuthService(self.config_manager, self.get_current_environment())
|
|
41
|
+
return AuthService(self.config_manager(), self.get_current_environment())
|
|
41
42
|
|
|
42
43
|
def auto_update_env(self, env: Environment) -> Environment:
|
|
43
|
-
svc = AuthService(self.config_manager, env)
|
|
44
|
+
svc = AuthService(self.config_manager(), env)
|
|
44
45
|
version = svc.fetch_server_version()
|
|
45
46
|
update = replace(env)
|
|
46
47
|
update.requires_auth = version.requires_auth
|
|
47
48
|
update.min_llamactl_version = version.min_llamactl_version
|
|
48
49
|
if update != env:
|
|
49
|
-
self.config_manager.create_or_update_environment(
|
|
50
|
+
self.config_manager().create_or_update_environment(
|
|
50
51
|
update.api_url, update.requires_auth, update.min_llamactl_version
|
|
51
52
|
)
|
|
52
53
|
return update
|
|
@@ -56,7 +57,7 @@ class EnvService:
|
|
|
56
57
|
base_env = Environment(
|
|
57
58
|
api_url=clean, requires_auth=False, min_llamactl_version=None
|
|
58
59
|
)
|
|
59
|
-
svc = AuthService(self.config_manager, base_env)
|
|
60
|
+
svc = AuthService(self.config_manager(), base_env)
|
|
60
61
|
version = svc.fetch_server_version()
|
|
61
62
|
base_env.requires_auth = version.requires_auth
|
|
62
63
|
base_env.min_llamactl_version = version.min_llamactl_version
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
PRAGMA user_version=1;
|
|
2
|
+
|
|
3
|
+
-- Initial schema for llamactl config database
|
|
4
|
+
|
|
5
|
+
CREATE TABLE IF NOT EXISTS profiles (
|
|
6
|
+
name TEXT NOT NULL,
|
|
7
|
+
api_url TEXT NOT NULL,
|
|
8
|
+
project_id TEXT NOT NULL,
|
|
9
|
+
api_key TEXT,
|
|
10
|
+
PRIMARY KEY (name, api_url)
|
|
11
|
+
);
|
|
12
|
+
|
|
13
|
+
CREATE TABLE IF NOT EXISTS settings (
|
|
14
|
+
key TEXT PRIMARY KEY,
|
|
15
|
+
value TEXT NOT NULL
|
|
16
|
+
);
|
|
17
|
+
|
|
18
|
+
CREATE TABLE IF NOT EXISTS environments (
|
|
19
|
+
api_url TEXT PRIMARY KEY,
|
|
20
|
+
requires_auth INTEGER NOT NULL,
|
|
21
|
+
min_llamactl_version TEXT
|
|
22
|
+
);
|
|
23
|
+
|
|
24
|
+
-- Seed defaults (idempotent)
|
|
25
|
+
-- 1) Ensure current environment setting exists (do not overwrite if already set)
|
|
26
|
+
INSERT OR IGNORE INTO settings (key, value)
|
|
27
|
+
VALUES ('current_environment_api_url', 'https://api.cloud.llamaindex.ai');
|
|
28
|
+
|
|
29
|
+
-- 2) Backfill environments from any existing profiles (avoid duplicates)
|
|
30
|
+
INSERT OR IGNORE INTO environments (api_url, requires_auth)
|
|
31
|
+
SELECT DISTINCT api_url, 0 FROM profiles;
|
|
32
|
+
|
|
33
|
+
-- 3) Ensure the default cloud environment exists with auth required
|
|
34
|
+
INSERT OR IGNORE INTO environments (api_url, requires_auth, min_llamactl_version)
|
|
35
|
+
VALUES ('https://api.cloud.llamaindex.ai', 1, NULL);
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
PRAGMA user_version=2;
|
|
2
|
+
|
|
3
|
+
-- Add new fields to profiles: api_key_id and device_oidc (stored as JSON string)
|
|
4
|
+
ALTER TABLE profiles ADD COLUMN api_key_id TEXT;
|
|
5
|
+
ALTER TABLE profiles ADD COLUMN device_oidc TEXT;
|
|
6
|
+
|
|
7
|
+
-- Add synthetic identifier for profiles
|
|
8
|
+
ALTER TABLE profiles ADD COLUMN id TEXT;
|
|
9
|
+
|
|
10
|
+
-- Populate existing rows with random UUIDv4 values
|
|
11
|
+
UPDATE profiles
|
|
12
|
+
SET id = lower(
|
|
13
|
+
hex(randomblob(4)) || '-' ||
|
|
14
|
+
hex(randomblob(2)) || '-' ||
|
|
15
|
+
'4' || substr(hex(randomblob(2)), 2) || '-' ||
|
|
16
|
+
substr('89ab', 1 + (abs(random()) % 4), 1) || substr(hex(randomblob(2)), 2) || '-' ||
|
|
17
|
+
hex(randomblob(6))
|
|
18
|
+
)
|
|
19
|
+
WHERE id IS NULL;
|
|
20
|
+
|
|
21
|
+
-- Ensure id values are unique
|
|
22
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_profiles_id ON profiles(id);
|
|
23
|
+
|
|
24
|
+
|
|
@@ -1,14 +1,44 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
1
3
|
from dataclasses import dataclass
|
|
2
4
|
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
|
|
3
7
|
|
|
4
8
|
@dataclass
|
|
5
9
|
class Auth:
|
|
6
10
|
"""Auth Profile configuration"""
|
|
7
11
|
|
|
12
|
+
id: str
|
|
8
13
|
name: str
|
|
9
14
|
api_url: str
|
|
10
15
|
project_id: str
|
|
11
16
|
api_key: str | None = None
|
|
17
|
+
# reference to the API key if we created it from device oauth, to be cleaned up
|
|
18
|
+
# once de-authenticated
|
|
19
|
+
api_key_id: str | None = None
|
|
20
|
+
device_oidc: DeviceOIDC | None = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class DeviceOIDC(BaseModel):
|
|
24
|
+
"""Device OIDC configuration"""
|
|
25
|
+
|
|
26
|
+
# A name for this device, derived from the host. Used in API key name.
|
|
27
|
+
device_name: str
|
|
28
|
+
# A unique user ID to identify the user in the API. Prevents duplicate logins.
|
|
29
|
+
user_id: str
|
|
30
|
+
# email of the user
|
|
31
|
+
email: str
|
|
32
|
+
# OIDC client ID
|
|
33
|
+
client_id: str
|
|
34
|
+
# OIDC discovery URL
|
|
35
|
+
discovery_url: str
|
|
36
|
+
# usually 5m long JWT. For calling APIs.
|
|
37
|
+
device_access_token: str
|
|
38
|
+
# usually opaque, used to get new access tokens
|
|
39
|
+
device_refresh_token: str | None = None
|
|
40
|
+
# usually 1h long JWT. Contains user info (email, name, etc.)
|
|
41
|
+
device_id_token: str | None = None
|
|
12
42
|
|
|
13
43
|
|
|
14
44
|
@dataclass
|
llama_deploy/cli/env.py
CHANGED
|
@@ -4,6 +4,7 @@ from io import StringIO
|
|
|
4
4
|
from typing import Dict
|
|
5
5
|
|
|
6
6
|
from dotenv import dotenv_values
|
|
7
|
+
from llama_deploy.cli.styles import WARNING
|
|
7
8
|
from rich import print as rprint
|
|
8
9
|
|
|
9
10
|
|
|
@@ -26,6 +27,6 @@ def load_env_secrets_from_string(env_content: str) -> Dict[str, str]:
|
|
|
26
27
|
return {k: str(v) for k, v in secrets.items() if v is not None}
|
|
27
28
|
except Exception as e:
|
|
28
29
|
rprint(
|
|
29
|
-
f"[
|
|
30
|
+
f"[{WARNING}]Warning: Could not parse environment variables from string: {e}[/]"
|
|
30
31
|
)
|
|
31
32
|
return {}
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
# A place to centralize design tokens to simplify tweaking the appearance of the CLI
|
|
2
|
+
# See https://rich.readthedocs.io/en/stable/appendix/colors.html
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
HEADER_COLOR = "cornflower_blue"
|
|
6
|
+
HEADER_COLOR_HEX = "#5f87ff"
|
|
7
|
+
PRIMARY_COL = "default"
|
|
8
|
+
MUTED_COL = "grey46"
|
|
9
|
+
WARNING = "yellow"
|
|
10
|
+
ACTIVE_INDICATOR = "magenta"
|
|
@@ -30,8 +30,10 @@ from llama_deploy.core.deployment_config import (
|
|
|
30
30
|
from llama_deploy.core.git.git_util import (
|
|
31
31
|
get_current_branch,
|
|
32
32
|
get_git_root,
|
|
33
|
+
get_unpushed_commits_count,
|
|
33
34
|
is_git_repo,
|
|
34
35
|
list_remotes,
|
|
36
|
+
working_tree_has_changes,
|
|
35
37
|
)
|
|
36
38
|
from llama_deploy.core.schema.deployments import (
|
|
37
39
|
DeploymentCreate,
|
|
@@ -69,6 +71,10 @@ class DeploymentForm:
|
|
|
69
71
|
removed_secrets: set[str] = field(default_factory=set)
|
|
70
72
|
# if the deployment is being edited
|
|
71
73
|
is_editing: bool = False
|
|
74
|
+
# warnings shown to the user
|
|
75
|
+
warnings: list[str] = field(default_factory=list)
|
|
76
|
+
# env info
|
|
77
|
+
env_info_messages: str | None = None
|
|
72
78
|
|
|
73
79
|
@classmethod
|
|
74
80
|
def from_deployment(cls, deployment: DeploymentResponse) -> "DeploymentForm":
|
|
@@ -154,6 +160,13 @@ class DeploymentFormWidget(Widget):
|
|
|
154
160
|
id="error-message",
|
|
155
161
|
classes="error-message " + ("visible" if self.error_message else "hidden"),
|
|
156
162
|
)
|
|
163
|
+
# Top-of-form warnings banner
|
|
164
|
+
yield Static(
|
|
165
|
+
"Note: " + " ".join(f"{w}" for w in self.form_data.warnings),
|
|
166
|
+
id="warning-list",
|
|
167
|
+
classes="warning-message mb-1 hidden "
|
|
168
|
+
+ ("visible" if self.form_data.warnings else ""),
|
|
169
|
+
)
|
|
157
170
|
|
|
158
171
|
# Main deployment fields
|
|
159
172
|
with Widget(classes="two-column-form-grid"):
|
|
@@ -217,6 +230,7 @@ class DeploymentFormWidget(Widget):
|
|
|
217
230
|
yield SecretsWidget(
|
|
218
231
|
initial_secrets=self.form_data.secrets,
|
|
219
232
|
prior_secrets=self.form_data.initial_secrets,
|
|
233
|
+
info_message=self.form_data.env_info_messages,
|
|
220
234
|
)
|
|
221
235
|
|
|
222
236
|
with HorizontalGroup(classes="button-row"):
|
|
@@ -501,7 +515,29 @@ def _initialize_deployment_data() -> DeploymentForm:
|
|
|
501
515
|
secrets: dict[str, str] = {}
|
|
502
516
|
name: str | None = None
|
|
503
517
|
config_file_path: str | None = None
|
|
504
|
-
|
|
518
|
+
warnings: list[str] = []
|
|
519
|
+
has_git = is_git_repo()
|
|
520
|
+
has_no_workflows = False
|
|
521
|
+
try:
|
|
522
|
+
config = read_deployment_config(Path("."), Path("."))
|
|
523
|
+
if config.name != DEFAULT_DEPLOYMENT_NAME:
|
|
524
|
+
name = config.name
|
|
525
|
+
has_no_workflows = config.has_no_workflows()
|
|
526
|
+
except Exception:
|
|
527
|
+
warnings.append("Could not parse local deployment config. It may be invalid.")
|
|
528
|
+
if not has_git and has_no_workflows:
|
|
529
|
+
warnings = [
|
|
530
|
+
"Run from within a git repository to automatically generate a deployment config."
|
|
531
|
+
]
|
|
532
|
+
elif has_no_workflows:
|
|
533
|
+
warnings = [
|
|
534
|
+
"The current project has no workflows configured. It may be invalid."
|
|
535
|
+
]
|
|
536
|
+
elif not has_git:
|
|
537
|
+
warnings.append(
|
|
538
|
+
"Current directory is not a git repository. If you are trying to deploy this directory, you will need to create a git repository and push it before creating a deployment."
|
|
539
|
+
)
|
|
540
|
+
else:
|
|
505
541
|
seen = set[str]()
|
|
506
542
|
remotes = list_remotes()
|
|
507
543
|
candidate_origins = []
|
|
@@ -520,14 +556,32 @@ def _initialize_deployment_data() -> DeploymentForm:
|
|
|
520
556
|
if root != Path.cwd():
|
|
521
557
|
config_file_path = str(Path.cwd().relative_to(root))
|
|
522
558
|
|
|
559
|
+
if not preferred_origin:
|
|
560
|
+
warnings.append(
|
|
561
|
+
"No git remote was found. You will need to push your changes to a remote repository before creating a deployment from this repository."
|
|
562
|
+
)
|
|
563
|
+
else:
|
|
564
|
+
# Working tree changes
|
|
565
|
+
if working_tree_has_changes() and preferred_origin:
|
|
566
|
+
warnings.append(
|
|
567
|
+
"Working tree has uncommitted or untracked changes. You may want to push them before creating a deployment from this branch."
|
|
568
|
+
)
|
|
569
|
+
else:
|
|
570
|
+
# Unpushed commits (ahead of upstream)
|
|
571
|
+
ahead = get_unpushed_commits_count()
|
|
572
|
+
if ahead is None:
|
|
573
|
+
warnings.append(
|
|
574
|
+
"Current branch has no upstream configured. You will need to push them or choose a different branch."
|
|
575
|
+
)
|
|
576
|
+
elif ahead > 0:
|
|
577
|
+
warnings.append(
|
|
578
|
+
f"There are {ahead} local commits not pushed to upstream. They won't be included in the deployment unless you push them first."
|
|
579
|
+
)
|
|
580
|
+
env_info_message = None
|
|
523
581
|
if Path(".env").exists():
|
|
524
582
|
secrets = load_env_secrets_from_string(Path(".env").read_text())
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
if config.name != DEFAULT_DEPLOYMENT_NAME:
|
|
528
|
-
name = config.name
|
|
529
|
-
except Exception:
|
|
530
|
-
pass
|
|
583
|
+
if len(secrets) > 0:
|
|
584
|
+
env_info_message = "Secrets were automatically seeded from your .env file. Remove or change any that should not be set. They must be manually configured after creation."
|
|
531
585
|
|
|
532
586
|
form = DeploymentForm(
|
|
533
587
|
name=name or "",
|
|
@@ -535,6 +589,8 @@ def _initialize_deployment_data() -> DeploymentForm:
|
|
|
535
589
|
git_ref=git_ref or "main",
|
|
536
590
|
secrets=secrets,
|
|
537
591
|
deployment_file_path=config_file_path or "",
|
|
592
|
+
warnings=warnings,
|
|
593
|
+
env_info_messages=env_info_message,
|
|
538
594
|
)
|
|
539
595
|
return form
|
|
540
596
|
|
|
@@ -6,13 +6,14 @@ import asyncio
|
|
|
6
6
|
import hashlib
|
|
7
7
|
import logging
|
|
8
8
|
import threading
|
|
9
|
-
import time
|
|
10
9
|
import webbrowser
|
|
10
|
+
from collections.abc import AsyncGenerator
|
|
11
11
|
from pathlib import Path
|
|
12
12
|
|
|
13
13
|
from llama_deploy.cli.client import (
|
|
14
14
|
project_client_context,
|
|
15
15
|
)
|
|
16
|
+
from llama_deploy.core.iter_utils import merge_generators
|
|
16
17
|
from llama_deploy.core.schema import LogEvent
|
|
17
18
|
from llama_deploy.core.schema.deployments import DeploymentResponse
|
|
18
19
|
from rich.text import Text
|
|
@@ -20,9 +21,11 @@ from textual import events
|
|
|
20
21
|
from textual.app import App, ComposeResult
|
|
21
22
|
from textual.containers import Container, HorizontalGroup, Widget
|
|
22
23
|
from textual.content import Content
|
|
24
|
+
from textual.css.query import NoMatches
|
|
23
25
|
from textual.message import Message
|
|
24
26
|
from textual.reactive import reactive
|
|
25
27
|
from textual.widgets import Button, RichLog, Static
|
|
28
|
+
from typing_extensions import Literal
|
|
26
29
|
|
|
27
30
|
logger = logging.getLogger(__name__)
|
|
28
31
|
|
|
@@ -87,7 +90,12 @@ class DeploymentMonitorWidget(Widget):
|
|
|
87
90
|
content-align: left middle;
|
|
88
91
|
}
|
|
89
92
|
|
|
90
|
-
|
|
93
|
+
.log-view-container {
|
|
94
|
+
width: 1fr;
|
|
95
|
+
height: 1fr;
|
|
96
|
+
padding: 0;
|
|
97
|
+
margin: 0;
|
|
98
|
+
}
|
|
91
99
|
"""
|
|
92
100
|
|
|
93
101
|
deployment_id: str
|
|
@@ -102,11 +110,14 @@ class DeploymentMonitorWidget(Widget):
|
|
|
102
110
|
self._stop_stream = threading.Event()
|
|
103
111
|
# Persist content written to the RichLog across recomposes
|
|
104
112
|
self._log_buffer: list[Text] = []
|
|
113
|
+
self._log_stream_started = False
|
|
105
114
|
|
|
106
115
|
async def on_mount(self) -> None:
|
|
107
116
|
# Kick off initial fetch and start logs stream in background
|
|
108
117
|
self.run_worker(self._fetch_deployment())
|
|
109
|
-
|
|
118
|
+
# Force an initial layout, then start the log stream after that layout completes
|
|
119
|
+
self.refresh(layout=True)
|
|
120
|
+
self.call_after_refresh(lambda: self.run_worker(self._stream_logs()))
|
|
110
121
|
# Start periodic polling of deployment status
|
|
111
122
|
self.run_worker(self._poll_deployment_status())
|
|
112
123
|
|
|
@@ -131,16 +142,8 @@ class DeploymentMonitorWidget(Widget):
|
|
|
131
142
|
)
|
|
132
143
|
yield Static("", classes="status-right", id="last_event_status")
|
|
133
144
|
yield Static("", classes="last-event", id="last_event_details")
|
|
134
|
-
yield Static("") # just a spacer
|
|
135
|
-
|
|
136
145
|
yield Static("Logs", classes="secondary-message log-header")
|
|
137
|
-
yield
|
|
138
|
-
id="log_view",
|
|
139
|
-
classes="log-view mb-1",
|
|
140
|
-
auto_scroll=self.autoscroll_enabled,
|
|
141
|
-
wrap=self.wrap_enabled,
|
|
142
|
-
highlight=True,
|
|
143
|
-
)
|
|
146
|
+
yield HorizontalGroup(classes="log-view-container", id="log_view_container")
|
|
144
147
|
|
|
145
148
|
with HorizontalGroup(classes="button-row"):
|
|
146
149
|
wrap_label = "Wrap: On" if self.wrap_enabled else "Wrap: Off"
|
|
@@ -180,113 +183,45 @@ class DeploymentMonitorWidget(Widget):
|
|
|
180
183
|
async def _stream_logs(self) -> None:
|
|
181
184
|
"""Consume the async log iterator, batch updates, and reconnect with backoff."""
|
|
182
185
|
|
|
183
|
-
async def
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
while
|
|
187
|
-
await asyncio.sleep(
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
# Batching configuration: small latency to reduce UI churn while staying responsive
|
|
191
|
-
batch_max_latency_seconds = 0.1
|
|
192
|
-
batch_max_items = 200
|
|
193
|
-
|
|
194
|
-
base_backoff_seconds = 0.2
|
|
195
|
-
backoff_seconds = base_backoff_seconds
|
|
196
|
-
max_backoff_seconds = 30.0
|
|
186
|
+
async def _flush_signal(
|
|
187
|
+
frequency_seconds: float,
|
|
188
|
+
) -> AsyncGenerator[Literal["__FLUSH__"], None]:
|
|
189
|
+
while not self._stop_stream.is_set():
|
|
190
|
+
await asyncio.sleep(frequency_seconds)
|
|
191
|
+
yield "__FLUSH__"
|
|
197
192
|
|
|
193
|
+
failures = 0
|
|
198
194
|
while not self._stop_stream.is_set():
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
queue: asyncio.Queue[LogEvent] = asyncio.Queue(maxsize=10000)
|
|
204
|
-
producer_done = asyncio.Event()
|
|
205
|
-
|
|
206
|
-
async def _producer() -> None:
|
|
195
|
+
async with project_client_context() as client:
|
|
196
|
+
await asyncio.sleep(min(failures, 10))
|
|
197
|
+
batch: list[LogEvent] = []
|
|
207
198
|
try:
|
|
208
|
-
|
|
209
|
-
|
|
199
|
+
logger.info(f"Streaming logs for deployment {self.deployment_id}")
|
|
200
|
+
async for event in merge_generators(
|
|
201
|
+
client.stream_deployment_logs(
|
|
210
202
|
self.deployment_id,
|
|
211
203
|
include_init_containers=True,
|
|
212
204
|
tail_lines=10000,
|
|
213
|
-
)
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
205
|
+
),
|
|
206
|
+
_flush_signal(0.2),
|
|
207
|
+
):
|
|
208
|
+
if event == "__FLUSH__" and batch:
|
|
209
|
+
self._handle_log_events(batch)
|
|
210
|
+
batch = []
|
|
211
|
+
elif isinstance(event, LogEvent):
|
|
212
|
+
batch.append(event)
|
|
213
|
+
if len(batch) >= 1000:
|
|
214
|
+
self._handle_log_events(batch)
|
|
215
|
+
batch = []
|
|
221
216
|
except Exception as e:
|
|
222
|
-
# Surface error via error message and rely on reconnect loop
|
|
223
217
|
if not self._stop_stream.is_set():
|
|
224
218
|
self._set_error_message(
|
|
225
219
|
f"Log stream failed: {e}. Reconnecting..."
|
|
226
220
|
)
|
|
221
|
+
failures += 1
|
|
227
222
|
finally:
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
async def _consumer() -> None:
|
|
231
|
-
batch: list[LogEvent] = []
|
|
232
|
-
next_deadline = time.monotonic() + batch_max_latency_seconds
|
|
233
|
-
while not self._stop_stream.is_set():
|
|
234
|
-
# Stop once producer finished and queue drained
|
|
235
|
-
if producer_done.is_set() and queue.empty():
|
|
236
|
-
if batch:
|
|
237
|
-
self._handle_log_events(batch)
|
|
238
|
-
batch = []
|
|
239
|
-
break
|
|
240
|
-
timeout = max(0.0, next_deadline - time.monotonic())
|
|
241
|
-
try:
|
|
242
|
-
item = await asyncio.wait_for(queue.get(), timeout=timeout)
|
|
243
|
-
batch.append(item)
|
|
244
|
-
if len(batch) >= batch_max_items:
|
|
245
|
-
self._handle_log_events(batch)
|
|
246
|
-
batch = []
|
|
247
|
-
next_deadline = time.monotonic() + batch_max_latency_seconds
|
|
248
|
-
except asyncio.TimeoutError:
|
|
249
|
-
if batch:
|
|
250
|
-
self._handle_log_events(batch)
|
|
251
|
-
batch = []
|
|
252
|
-
next_deadline = time.monotonic() + batch_max_latency_seconds
|
|
253
|
-
except Exception:
|
|
254
|
-
# On any unexpected error, flush and exit, reconnect will handle
|
|
255
|
-
if batch:
|
|
256
|
-
self._handle_log_events(batch)
|
|
257
|
-
break
|
|
258
|
-
|
|
259
|
-
producer_task = asyncio.create_task(_producer())
|
|
260
|
-
try:
|
|
261
|
-
await _consumer()
|
|
262
|
-
finally:
|
|
263
|
-
# Ensure producer is not left running
|
|
264
|
-
try:
|
|
265
|
-
producer_task.cancel()
|
|
266
|
-
except Exception:
|
|
267
|
-
pass
|
|
268
|
-
|
|
269
|
-
if self._stop_stream.is_set():
|
|
270
|
-
break
|
|
271
|
-
|
|
272
|
-
# If we reached here, the stream ended or failed; attempt reconnect with backoff
|
|
273
|
-
self._set_error_message("Log stream disconnected. Reconnecting...")
|
|
274
|
-
|
|
275
|
-
# Duration-aware backoff (smaller when the previous connection lived longer)
|
|
276
|
-
connection_lifetime = 0.0
|
|
277
|
-
try:
|
|
278
|
-
connection_lifetime = max(0.0, time.monotonic() - connect_started_at)
|
|
279
|
-
except Exception:
|
|
280
|
-
connection_lifetime = 0.0
|
|
281
|
-
|
|
282
|
-
if connection_lifetime >= backoff_seconds:
|
|
283
|
-
backoff_seconds = base_backoff_seconds
|
|
284
|
-
else:
|
|
285
|
-
backoff_seconds = min(backoff_seconds * 2.0, max_backoff_seconds)
|
|
286
|
-
|
|
287
|
-
delay = max(0.0, backoff_seconds - connection_lifetime)
|
|
288
|
-
if delay > 0:
|
|
289
|
-
await _sleep_with_cancel(delay)
|
|
223
|
+
if batch:
|
|
224
|
+
self._handle_log_events(batch)
|
|
290
225
|
|
|
291
226
|
def _reset_log_view_for_reconnect(self) -> None:
|
|
292
227
|
"""Clear UI and buffers so new stream replaces previous content."""
|
|
@@ -313,10 +248,26 @@ class DeploymentMonitorWidget(Widget):
|
|
|
313
248
|
if not texts:
|
|
314
249
|
return
|
|
315
250
|
|
|
316
|
-
|
|
251
|
+
try:
|
|
252
|
+
# due to bugs in the the RichLog widget, defer mounting, otherwise it won't get a "ResizeEvent" (on_resize), and be waiting indefinitely
|
|
253
|
+
# before it renders (unless you manually resize the terminal window)
|
|
254
|
+
log_widget = self.query_one("#log_view", RichLog)
|
|
255
|
+
except NoMatches:
|
|
256
|
+
log_container = self.query_one("#log_view_container", HorizontalGroup)
|
|
257
|
+
log_widget = RichLog(
|
|
258
|
+
id="log_view",
|
|
259
|
+
classes="log-view mb-1",
|
|
260
|
+
auto_scroll=self.autoscroll_enabled,
|
|
261
|
+
wrap=self.wrap_enabled,
|
|
262
|
+
highlight=True,
|
|
263
|
+
)
|
|
264
|
+
log_container.mount(log_widget)
|
|
317
265
|
for text in texts:
|
|
318
266
|
log_widget.write(text)
|
|
319
267
|
self._log_buffer.append(text)
|
|
268
|
+
log_widget.refresh()
|
|
269
|
+
|
|
270
|
+
# One-time kick to ensure initial draw
|
|
320
271
|
# Clear any previous error once we successfully receive logs
|
|
321
272
|
if self.error_message:
|
|
322
273
|
self.error_message = ""
|
|
@@ -469,6 +420,18 @@ class MonitorCloseMessage(Message):
|
|
|
469
420
|
pass
|
|
470
421
|
|
|
471
422
|
|
|
423
|
+
class LogBatchMessage(Message):
|
|
424
|
+
def __init__(self, events: list[LogEvent]) -> None:
|
|
425
|
+
super().__init__()
|
|
426
|
+
self.events = events
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
class ErrorTextMessage(Message):
|
|
430
|
+
def __init__(self, text: str) -> None:
|
|
431
|
+
super().__init__()
|
|
432
|
+
self.text = text
|
|
433
|
+
|
|
434
|
+
|
|
472
435
|
class DeploymentMonitorApp(App[None]):
|
|
473
436
|
"""Standalone app wrapper around the monitor widget.
|
|
474
437
|
|
|
@@ -191,17 +191,17 @@ async def main():
|
|
|
191
191
|
server = GitHubCallbackServer(port=41010)
|
|
192
192
|
|
|
193
193
|
# Start server and open browser
|
|
194
|
-
|
|
195
|
-
|
|
194
|
+
logger.debug(f"Starting GitHub callback server on http://localhost:{server.port}")
|
|
195
|
+
logger.debug("Opening browser to show success page...")
|
|
196
196
|
|
|
197
197
|
# Open browser to success page to see the styling
|
|
198
198
|
webbrowser.open(f"http://localhost:{server.port}")
|
|
199
199
|
|
|
200
200
|
try:
|
|
201
201
|
# Wait for callback (or just keep server running)
|
|
202
|
-
|
|
202
|
+
logger.debug("Server running... Press Ctrl+C to stop")
|
|
203
203
|
callback_data = await server.start_and_wait(timeout=3600) # 1 hour timeout
|
|
204
|
-
|
|
204
|
+
logger.debug(f"Received callback data: {callback_data}")
|
|
205
205
|
finally:
|
|
206
206
|
await server.stop()
|
|
207
207
|
|
|
@@ -67,6 +67,7 @@ class SecretsWidget(Widget):
|
|
|
67
67
|
self,
|
|
68
68
|
initial_secrets: dict[str, str] | None = None,
|
|
69
69
|
prior_secrets: set[str] | None = None,
|
|
70
|
+
info_message: str | None = None,
|
|
70
71
|
):
|
|
71
72
|
super().__init__()
|
|
72
73
|
self.secrets = initial_secrets or {}
|
|
@@ -74,6 +75,7 @@ class SecretsWidget(Widget):
|
|
|
74
75
|
self.visible_secrets = set()
|
|
75
76
|
# Persist textarea content across recomposes triggered by other actions
|
|
76
77
|
self._new_secrets_text = ""
|
|
78
|
+
self.info_message = info_message
|
|
77
79
|
|
|
78
80
|
def compose(self) -> ComposeResult:
|
|
79
81
|
"""Compose the secrets section - called automatically when secrets change"""
|
|
@@ -85,6 +87,8 @@ class SecretsWidget(Widget):
|
|
|
85
87
|
)
|
|
86
88
|
secret_names = known_secret_names + prior_only_secret_names
|
|
87
89
|
hidden = len(secret_names) == 0
|
|
90
|
+
if self.info_message:
|
|
91
|
+
yield Static(self.info_message, classes="secondary-message mb-1")
|
|
88
92
|
with Static(
|
|
89
93
|
classes="secrets-grid" + (" hidden" if hidden else ""),
|
|
90
94
|
id="secrets-grid",
|