hypercli-cli 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- c3cli/__init__.py +1 -0
- c3cli/billing.py +60 -0
- c3cli/cli.py +183 -0
- c3cli/comfyui.py +823 -0
- c3cli/instances.py +193 -0
- c3cli/jobs.py +239 -0
- c3cli/llm.py +263 -0
- c3cli/output.py +78 -0
- c3cli/renders.py +192 -0
- c3cli/tui/__init__.py +4 -0
- c3cli/tui/job_monitor.py +335 -0
- c3cli/user.py +19 -0
- hypercli_cli-0.4.0.dist-info/METADATA +124 -0
- hypercli_cli-0.4.0.dist-info/RECORD +16 -0
- hypercli_cli-0.4.0.dist-info/WHEEL +4 -0
- hypercli_cli-0.4.0.dist-info/entry_points.txt +2 -0
c3cli/output.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""Output formatting"""
|
|
2
|
+
import json as json_lib
|
|
3
|
+
from typing import Any
|
|
4
|
+
from rich.console import Console
|
|
5
|
+
from rich.table import Table
|
|
6
|
+
from rich import print_json
|
|
7
|
+
|
|
8
|
+
console = Console()
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def output(data: Any, fmt: str = "table", columns: list[str] = None):
|
|
12
|
+
"""Output data in requested format"""
|
|
13
|
+
if fmt == "json":
|
|
14
|
+
if hasattr(data, "__dict__"):
|
|
15
|
+
print_json(data=data.__dict__)
|
|
16
|
+
else:
|
|
17
|
+
print_json(data=data)
|
|
18
|
+
elif fmt == "table":
|
|
19
|
+
if isinstance(data, list):
|
|
20
|
+
table_list(data, columns)
|
|
21
|
+
elif hasattr(data, "__dict__"):
|
|
22
|
+
table_dict(data.__dict__)
|
|
23
|
+
elif isinstance(data, dict):
|
|
24
|
+
table_dict(data)
|
|
25
|
+
else:
|
|
26
|
+
console.print(data)
|
|
27
|
+
else:
|
|
28
|
+
console.print(data)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def table_list(items: list, columns: list[str] = None):
|
|
32
|
+
"""Render list as table"""
|
|
33
|
+
if not items:
|
|
34
|
+
console.print("[dim]No results[/dim]")
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
# Convert dataclass to dict if needed
|
|
38
|
+
if hasattr(items[0], "__dict__"):
|
|
39
|
+
items = [i.__dict__ for i in items]
|
|
40
|
+
|
|
41
|
+
cols = columns or list(items[0].keys())
|
|
42
|
+
table = Table(show_header=True, header_style="bold cyan")
|
|
43
|
+
|
|
44
|
+
for col in cols:
|
|
45
|
+
table.add_column(col)
|
|
46
|
+
|
|
47
|
+
for item in items:
|
|
48
|
+
row = [str(item.get(col, "")) for col in cols]
|
|
49
|
+
table.add_row(*row)
|
|
50
|
+
|
|
51
|
+
console.print(table)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def table_dict(data: dict):
|
|
55
|
+
"""Render dict as key-value table"""
|
|
56
|
+
table = Table(show_header=False, box=None)
|
|
57
|
+
table.add_column("Key", style="bold cyan")
|
|
58
|
+
table.add_column("Value")
|
|
59
|
+
|
|
60
|
+
for k, v in data.items():
|
|
61
|
+
if isinstance(v, (dict, list)):
|
|
62
|
+
v = json_lib.dumps(v, indent=2)
|
|
63
|
+
table.add_row(str(k), str(v))
|
|
64
|
+
|
|
65
|
+
console.print(table)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def error(msg: str):
|
|
69
|
+
console.print(f"[bold red]Error:[/bold red] {msg}")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def success(msg: str):
|
|
73
|
+
console.print(f"[bold green]✓[/bold green] {msg}")
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def spinner(msg: str = "Loading..."):
|
|
77
|
+
"""Context manager for showing a spinner during slow operations"""
|
|
78
|
+
return console.status(f"[bold cyan]{msg}[/bold cyan]", spinner="dots")
|
c3cli/renders.py
ADDED
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
"""c3 renders commands"""
|
|
2
|
+
import time
|
|
3
|
+
import typer
|
|
4
|
+
from typing import Optional
|
|
5
|
+
from c3 import C3
|
|
6
|
+
from .output import output, console, success, spinner
|
|
7
|
+
|
|
8
|
+
app = typer.Typer(help="Manage renders")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def get_client() -> C3:
|
|
12
|
+
return C3()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@app.command("list")
|
|
16
|
+
def list_renders(
|
|
17
|
+
state: Optional[str] = typer.Option(None, "--state", "-s", help="Filter by state"),
|
|
18
|
+
template: Optional[str] = typer.Option(None, "--template", "-t", help="Filter by template"),
|
|
19
|
+
type: Optional[str] = typer.Option(None, "--type", help="Filter by render type"),
|
|
20
|
+
fmt: str = typer.Option("table", "--output", "-o", help="Output format: table|json"),
|
|
21
|
+
):
|
|
22
|
+
"""List all renders"""
|
|
23
|
+
c3 = get_client()
|
|
24
|
+
with spinner("Fetching renders..."):
|
|
25
|
+
renders = c3.renders.list(state=state, template=template, type=type)
|
|
26
|
+
|
|
27
|
+
if fmt == "json":
|
|
28
|
+
output(renders, "json")
|
|
29
|
+
else:
|
|
30
|
+
if not renders:
|
|
31
|
+
console.print("[dim]No renders found[/dim]")
|
|
32
|
+
return
|
|
33
|
+
output(renders, "table", ["render_id", "state", "template", "render_type", "created_at"])
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@app.command("get")
|
|
37
|
+
def get_render(
|
|
38
|
+
render_id: str = typer.Argument(..., help="Render ID"),
|
|
39
|
+
fmt: str = typer.Option("table", "--output", "-o", help="Output format: table|json"),
|
|
40
|
+
):
|
|
41
|
+
"""Get render details"""
|
|
42
|
+
c3 = get_client()
|
|
43
|
+
with spinner("Fetching render..."):
|
|
44
|
+
render = c3.renders.get(render_id)
|
|
45
|
+
output(render, fmt)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@app.command("create")
|
|
49
|
+
def create_render(
|
|
50
|
+
template: str = typer.Argument(..., help="Template name"),
|
|
51
|
+
prompt: str = typer.Option(..., "--prompt", "-p", help="Text prompt"),
|
|
52
|
+
gpu: str = typer.Option("L40S", "--gpu", "-g", help="GPU type"),
|
|
53
|
+
region: Optional[str] = typer.Option(None, "--region", "-r", help="Region"),
|
|
54
|
+
render_type: str = typer.Option("comfyui", "--type", "-t", help="Render type"),
|
|
55
|
+
wait: bool = typer.Option(False, "--wait", "-w", help="Wait for completion"),
|
|
56
|
+
notify_url: Optional[str] = typer.Option(None, "--notify", help="Webhook URL for completion"),
|
|
57
|
+
fmt: str = typer.Option("table", "--output", "-o", help="Output format: table|json"),
|
|
58
|
+
):
|
|
59
|
+
"""Create a new render"""
|
|
60
|
+
c3 = get_client()
|
|
61
|
+
|
|
62
|
+
params = {
|
|
63
|
+
"template": template,
|
|
64
|
+
"prompt": prompt,
|
|
65
|
+
"gpu_type": gpu,
|
|
66
|
+
}
|
|
67
|
+
if region:
|
|
68
|
+
params["region"] = region
|
|
69
|
+
|
|
70
|
+
with spinner("Creating render..."):
|
|
71
|
+
render = c3.renders.create(params=params, render_type=render_type, notify_url=notify_url)
|
|
72
|
+
|
|
73
|
+
if fmt == "json" and not wait:
|
|
74
|
+
output(render, "json")
|
|
75
|
+
else:
|
|
76
|
+
console.print(f"[bold green]✓[/bold green] Render created: [cyan]{render.render_id}[/cyan]")
|
|
77
|
+
console.print(f" State: {render.state}")
|
|
78
|
+
|
|
79
|
+
if wait:
|
|
80
|
+
_wait_for_render(c3, render.render_id, fmt)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@app.command("status")
|
|
84
|
+
def status(
|
|
85
|
+
render_id: str = typer.Argument(..., help="Render ID"),
|
|
86
|
+
watch: bool = typer.Option(False, "--watch", "-w", help="Watch status live"),
|
|
87
|
+
fmt: str = typer.Option("table", "--output", "-o", help="Output format: table|json"),
|
|
88
|
+
):
|
|
89
|
+
"""Get render status"""
|
|
90
|
+
c3 = get_client()
|
|
91
|
+
|
|
92
|
+
if watch:
|
|
93
|
+
_watch_status(c3, render_id)
|
|
94
|
+
else:
|
|
95
|
+
with spinner("Fetching status..."):
|
|
96
|
+
s = c3.renders.status(render_id)
|
|
97
|
+
output(s, fmt)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@app.command("cancel")
|
|
101
|
+
def cancel(
|
|
102
|
+
render_id: str = typer.Argument(..., help="Render ID"),
|
|
103
|
+
):
|
|
104
|
+
"""Cancel a render"""
|
|
105
|
+
c3 = get_client()
|
|
106
|
+
with spinner("Cancelling render..."):
|
|
107
|
+
c3.renders.cancel(render_id)
|
|
108
|
+
success(f"Render {render_id} cancelled")
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _wait_for_render(c3: C3, render_id: str, fmt: str, poll_interval: float = 2.0):
|
|
112
|
+
"""Wait for render to complete"""
|
|
113
|
+
console.print("Waiting for completion...")
|
|
114
|
+
|
|
115
|
+
while True:
|
|
116
|
+
status = c3.renders.status(render_id)
|
|
117
|
+
progress_str = f" ({status.progress:.0%})" if status.progress is not None else ""
|
|
118
|
+
console.print(f" State: {status.state}{progress_str}")
|
|
119
|
+
|
|
120
|
+
if status.state in ("completed", "failed", "cancelled"):
|
|
121
|
+
break
|
|
122
|
+
time.sleep(poll_interval)
|
|
123
|
+
|
|
124
|
+
# Get final render details
|
|
125
|
+
render = c3.renders.get(render_id)
|
|
126
|
+
|
|
127
|
+
if fmt == "json":
|
|
128
|
+
output(render, "json")
|
|
129
|
+
else:
|
|
130
|
+
console.print()
|
|
131
|
+
if render.state == "completed":
|
|
132
|
+
success(f"Render completed!")
|
|
133
|
+
if render.result_url:
|
|
134
|
+
console.print(f" Result: [link={render.result_url}]{render.result_url}[/link]")
|
|
135
|
+
else:
|
|
136
|
+
console.print(f"[bold red]Render {render.state}[/bold red]")
|
|
137
|
+
if render.error:
|
|
138
|
+
console.print(f" Error: {render.error}")
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def _watch_status(c3: C3, render_id: str, poll_interval: float = 2.0):
|
|
142
|
+
"""Watch render status live"""
|
|
143
|
+
from rich.live import Live
|
|
144
|
+
from rich.panel import Panel
|
|
145
|
+
from rich.table import Table
|
|
146
|
+
|
|
147
|
+
def render_status_panel(status, render=None):
|
|
148
|
+
table = Table(show_header=False, box=None)
|
|
149
|
+
table.add_column("Key", style="cyan")
|
|
150
|
+
table.add_column("Value")
|
|
151
|
+
|
|
152
|
+
table.add_row("ID", status.render_id)
|
|
153
|
+
table.add_row("State", _state_style(status.state))
|
|
154
|
+
if status.progress is not None:
|
|
155
|
+
table.add_row("Progress", f"{status.progress:.0%}")
|
|
156
|
+
if render and render.result_url:
|
|
157
|
+
table.add_row("Result", render.result_url)
|
|
158
|
+
if render and render.error:
|
|
159
|
+
table.add_row("Error", f"[red]{render.error}[/red]")
|
|
160
|
+
|
|
161
|
+
return Panel(table, title="[bold]Render Status[/bold]")
|
|
162
|
+
|
|
163
|
+
with Live(console=console, refresh_per_second=2) as live:
|
|
164
|
+
while True:
|
|
165
|
+
try:
|
|
166
|
+
status = c3.renders.status(render_id)
|
|
167
|
+
render = None
|
|
168
|
+
|
|
169
|
+
if status.state in ("completed", "failed", "cancelled"):
|
|
170
|
+
render = c3.renders.get(render_id)
|
|
171
|
+
live.update(render_status_panel(status, render))
|
|
172
|
+
break
|
|
173
|
+
|
|
174
|
+
live.update(render_status_panel(status))
|
|
175
|
+
time.sleep(poll_interval)
|
|
176
|
+
except KeyboardInterrupt:
|
|
177
|
+
break
|
|
178
|
+
except Exception as e:
|
|
179
|
+
console.print(f"[red]Error: {e}[/red]")
|
|
180
|
+
break
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _state_style(state: str) -> str:
|
|
184
|
+
"""Style state for display"""
|
|
185
|
+
styles = {
|
|
186
|
+
"pending": "[yellow]pending[/yellow]",
|
|
187
|
+
"running": "[blue]running[/blue]",
|
|
188
|
+
"completed": "[green]completed[/green]",
|
|
189
|
+
"failed": "[red]failed[/red]",
|
|
190
|
+
"cancelled": "[dim]cancelled[/dim]",
|
|
191
|
+
}
|
|
192
|
+
return styles.get(state, state)
|
c3cli/tui/__init__.py
ADDED
c3cli/tui/job_monitor.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
"""Job monitor TUI - async log streaming with metrics display"""
|
|
2
|
+
import asyncio
|
|
3
|
+
import time
|
|
4
|
+
from collections import deque
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from queue import Queue, Empty
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
from rich.live import Live
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from rich.layout import Layout
|
|
13
|
+
from rich import box
|
|
14
|
+
|
|
15
|
+
from c3 import C3, LogStream, fetch_logs
|
|
16
|
+
|
|
17
|
+
console = Console()
|
|
18
|
+
|
|
19
|
+
# Buffer limits
|
|
20
|
+
MAX_LOG_LINES = 1000
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class JobStatus:
|
|
25
|
+
"""Status updates for job execution (e.g., ComfyUI workflow progress)"""
|
|
26
|
+
stage: str = "initializing"
|
|
27
|
+
message: str = ""
|
|
28
|
+
progress: float = 0
|
|
29
|
+
history: list[str] = field(default_factory=list)
|
|
30
|
+
error: str | None = None
|
|
31
|
+
complete: bool = False
|
|
32
|
+
result: dict | None = None
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def format_time(seconds: int) -> str:
|
|
36
|
+
h, m, s = seconds // 3600, (seconds % 3600) // 60, seconds % 60
|
|
37
|
+
return f"{h}h {m}m {s}s" if h else f"{m}m {s}s" if m else f"{s}s"
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def bar(pct: float, width: int = 30, color: str = "blue") -> str:
|
|
41
|
+
filled = int(pct / 100 * width)
|
|
42
|
+
return f"[{color}]{'█' * filled}[/][dim]{'░' * (width - filled)}[/]"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def build_header(job, elapsed: int, metrics) -> Panel:
|
|
46
|
+
"""Combined job info + metrics header"""
|
|
47
|
+
parts = []
|
|
48
|
+
|
|
49
|
+
# Job info line
|
|
50
|
+
colors = {"queued": "yellow", "assigned": "blue", "running": "green",
|
|
51
|
+
"succeeded": "bright_green", "failed": "red", "terminated": "red"}
|
|
52
|
+
state_color = colors.get(job.state, "white")
|
|
53
|
+
|
|
54
|
+
info = f"[bold {state_color}]{job.state.upper()}[/] {job.gpu_type} x{job.gpu_count} {job.region} ${job.price_per_hour:.2f}/hr"
|
|
55
|
+
if job.hostname:
|
|
56
|
+
info += f" [dim]{job.hostname}[/]"
|
|
57
|
+
if job.runtime and elapsed > 0:
|
|
58
|
+
left = max(0, job.runtime - elapsed)
|
|
59
|
+
pct = min(elapsed / job.runtime * 100, 100)
|
|
60
|
+
c = "red" if left < 300 else "yellow" if left < 900 else "green"
|
|
61
|
+
info += f" {bar(pct, 10, c)} [{c}]{format_time(left)}[/]"
|
|
62
|
+
parts.append(info)
|
|
63
|
+
|
|
64
|
+
# System metrics (CPU + RAM) - show first
|
|
65
|
+
if metrics and metrics.system:
|
|
66
|
+
parts.append("")
|
|
67
|
+
s = metrics.system
|
|
68
|
+
cpu_pct = min(s.cpu_percent, 100) # Clamp at 100% for display
|
|
69
|
+
cpu_c = "green" if cpu_pct < 70 else "yellow" if cpu_pct < 90 else "red"
|
|
70
|
+
mem_pct = (s.memory_used / s.memory_limit * 100) if s.memory_limit else 0
|
|
71
|
+
mem_c = "red" if mem_pct >= 90 else "yellow" if mem_pct >= 70 else "green"
|
|
72
|
+
|
|
73
|
+
line = f"[bold]CPU[/] {bar(cpu_pct, 20, cpu_c)} {cpu_pct:4.0f}% "
|
|
74
|
+
line += f"RAM {bar(mem_pct, 15, mem_c)} {s.memory_used/1024:.1f}/{s.memory_limit/1024:.1f}GB"
|
|
75
|
+
parts.append(line)
|
|
76
|
+
|
|
77
|
+
# GPU metrics
|
|
78
|
+
if metrics and metrics.gpus:
|
|
79
|
+
if not metrics.system:
|
|
80
|
+
parts.append("")
|
|
81
|
+
for g in metrics.gpus:
|
|
82
|
+
uc = "green" if g.utilization >= 50 else "yellow" if g.utilization >= 20 else "dim"
|
|
83
|
+
mp = (g.memory_used / g.memory_total * 100) if g.memory_total else 0
|
|
84
|
+
mc = "red" if mp >= 90 else "yellow" if mp >= 70 else "green"
|
|
85
|
+
tc = "red" if g.temperature >= 85 else "yellow" if g.temperature >= 70 else "green"
|
|
86
|
+
pc = "green" if g.power_draw < 100 else "yellow" if g.power_draw < 250 else "bright_red" if g.power_draw < 350 else "red"
|
|
87
|
+
|
|
88
|
+
line = f"[bold]GPU {g.index}[/] {bar(g.utilization, 20, uc)} {g.utilization:4.0f}% "
|
|
89
|
+
line += f"VRAM {bar(mp, 15, mc)} {g.memory_used/1024:.1f}/{g.memory_total/1024:.1f}GB "
|
|
90
|
+
line += f"[{tc}]{g.temperature}°C[/] [{pc}]{g.power_draw:.0f}W[/]"
|
|
91
|
+
parts.append(line)
|
|
92
|
+
|
|
93
|
+
return Panel("\n".join(parts), title=f"[bold]{job.job_id[:24]}[/]", border_style="blue", box=box.ROUNDED)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def build_status_panel(status: JobStatus, height: int = 10) -> Panel:
|
|
97
|
+
"""Build the job status panel showing workflow progress"""
|
|
98
|
+
parts = []
|
|
99
|
+
|
|
100
|
+
if status.progress > 0:
|
|
101
|
+
prog_bar = bar(status.progress, 30, "cyan")
|
|
102
|
+
parts.append(f"[bold]{status.stage}[/] {prog_bar} {status.progress:.0f}%")
|
|
103
|
+
else:
|
|
104
|
+
parts.append(f"[bold cyan]{status.stage}[/]")
|
|
105
|
+
|
|
106
|
+
if status.message:
|
|
107
|
+
parts.append(f" {status.message}")
|
|
108
|
+
|
|
109
|
+
if status.error:
|
|
110
|
+
parts.append(f"\n[bold red]Error:[/] {status.error}")
|
|
111
|
+
|
|
112
|
+
if status.history:
|
|
113
|
+
parts.append("")
|
|
114
|
+
visible_history = status.history[-(height - 4):]
|
|
115
|
+
for msg in visible_history:
|
|
116
|
+
parts.append(f"[dim] {msg}[/]")
|
|
117
|
+
|
|
118
|
+
border = "green" if status.complete else "red" if status.error else "magenta"
|
|
119
|
+
title = "[bold]Status[/]"
|
|
120
|
+
if status.complete:
|
|
121
|
+
title = "[bold green]Complete[/]"
|
|
122
|
+
elif status.error:
|
|
123
|
+
title = "[bold red]Failed[/]"
|
|
124
|
+
|
|
125
|
+
return Panel("\n".join(parts), title=title, border_style=border, box=box.ROUNDED)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def build_layout(job, elapsed, metrics, logs, ws_status, job_status, content_height) -> Layout:
|
|
129
|
+
"""Build the full layout"""
|
|
130
|
+
layout = Layout()
|
|
131
|
+
header = build_header(job, elapsed, metrics)
|
|
132
|
+
header_height = 3 + (len(metrics.gpus) if metrics and metrics.gpus else 0) + (1 if metrics and metrics.system else 0)
|
|
133
|
+
|
|
134
|
+
if job_status:
|
|
135
|
+
log_lines = int(content_height * 0.7)
|
|
136
|
+
log_content = "\n".join(list(logs)[-log_lines:]) if logs else "[dim]Waiting for logs...[/]"
|
|
137
|
+
log_panel = Panel(log_content, title=f"[bold]Logs[/] ({ws_status})", border_style="yellow", box=box.ROUNDED, height=content_height + 2)
|
|
138
|
+
|
|
139
|
+
status_panel = build_status_panel(job_status, content_height)
|
|
140
|
+
content_layout = Layout()
|
|
141
|
+
content_layout.split_row(
|
|
142
|
+
Layout(log_panel, name="logs", ratio=2),
|
|
143
|
+
Layout(status_panel, name="status", ratio=1),
|
|
144
|
+
)
|
|
145
|
+
layout.split_column(
|
|
146
|
+
Layout(header, name="header", size=header_height + 2),
|
|
147
|
+
content_layout,
|
|
148
|
+
)
|
|
149
|
+
else:
|
|
150
|
+
log_content = "\n".join(list(logs)[-content_height:]) if logs else "[dim]Waiting for logs...[/]"
|
|
151
|
+
log_panel = Panel(log_content, title=f"[bold]Logs[/] ({ws_status})", border_style="yellow", box=box.ROUNDED, height=content_height + 2)
|
|
152
|
+
|
|
153
|
+
layout.split_column(
|
|
154
|
+
Layout(header, name="header", size=header_height + 2),
|
|
155
|
+
Layout(log_panel, name="logs"),
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return layout
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
async def _run_job_monitor_async(
|
|
162
|
+
job_id: str,
|
|
163
|
+
status_q: Queue = None,
|
|
164
|
+
stop_on_status_complete: bool = False,
|
|
165
|
+
):
|
|
166
|
+
"""Async job monitor - uses SDK LogStream for logs"""
|
|
167
|
+
c3 = C3()
|
|
168
|
+
logs: deque[str] = deque(maxlen=MAX_LOG_LINES)
|
|
169
|
+
log_stream: Optional[LogStream] = None
|
|
170
|
+
log_task: Optional[asyncio.Task] = None
|
|
171
|
+
metrics = None
|
|
172
|
+
job_status: Optional[JobStatus] = None
|
|
173
|
+
ws_status = "[dim]● waiting[/]"
|
|
174
|
+
stop_event = asyncio.Event()
|
|
175
|
+
|
|
176
|
+
console.print(f"[dim]Ctrl+C to exit[/]\n")
|
|
177
|
+
|
|
178
|
+
# Wait for job
|
|
179
|
+
job = None
|
|
180
|
+
with console.status("[cyan]Connecting..."):
|
|
181
|
+
while not job:
|
|
182
|
+
try:
|
|
183
|
+
job = c3.jobs.get(job_id)
|
|
184
|
+
except Exception:
|
|
185
|
+
await asyncio.sleep(1)
|
|
186
|
+
|
|
187
|
+
async def stream_logs_task(stream: LogStream):
|
|
188
|
+
"""Background task that streams logs continuously with batching"""
|
|
189
|
+
nonlocal ws_status
|
|
190
|
+
log_batch = []
|
|
191
|
+
last_flush = asyncio.get_event_loop().time()
|
|
192
|
+
FLUSH_INTERVAL = 0.05 # Flush every 50ms
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
async for line in stream:
|
|
196
|
+
if stop_event.is_set():
|
|
197
|
+
break
|
|
198
|
+
log_batch.append(line)
|
|
199
|
+
|
|
200
|
+
# Flush batch periodically
|
|
201
|
+
now = asyncio.get_event_loop().time()
|
|
202
|
+
if now - last_flush >= FLUSH_INTERVAL:
|
|
203
|
+
logs.extend(log_batch)
|
|
204
|
+
log_batch.clear()
|
|
205
|
+
last_flush = now
|
|
206
|
+
|
|
207
|
+
# Flush remaining
|
|
208
|
+
if log_batch:
|
|
209
|
+
logs.extend(log_batch)
|
|
210
|
+
except Exception:
|
|
211
|
+
pass
|
|
212
|
+
finally:
|
|
213
|
+
ws_status = "[dim]● ended[/]"
|
|
214
|
+
|
|
215
|
+
async def fetch_metrics_task():
|
|
216
|
+
"""Background task that fetches metrics periodically"""
|
|
217
|
+
nonlocal metrics
|
|
218
|
+
while not stop_event.is_set():
|
|
219
|
+
try:
|
|
220
|
+
if job and job.state in ("assigned", "running"):
|
|
221
|
+
metrics = await asyncio.to_thread(c3.jobs.metrics, job_id)
|
|
222
|
+
except Exception:
|
|
223
|
+
pass
|
|
224
|
+
await asyncio.sleep(2)
|
|
225
|
+
|
|
226
|
+
async def fetch_job_task():
|
|
227
|
+
"""Background task that fetches job state periodically"""
|
|
228
|
+
nonlocal job, log_stream, log_task, ws_status
|
|
229
|
+
while not stop_event.is_set():
|
|
230
|
+
try:
|
|
231
|
+
job = await asyncio.to_thread(c3.jobs.get, job_id)
|
|
232
|
+
|
|
233
|
+
# Start log stream when job is ready
|
|
234
|
+
if job.state in ("assigned", "running") and log_stream is None and job.job_key:
|
|
235
|
+
# Fetch initial logs ONCE
|
|
236
|
+
if job.state == "running":
|
|
237
|
+
initial = await asyncio.to_thread(fetch_logs, c3, job_id, MAX_LOG_LINES)
|
|
238
|
+
for line in initial:
|
|
239
|
+
logs.append(line)
|
|
240
|
+
|
|
241
|
+
# Connect websocket and start streaming task
|
|
242
|
+
log_stream = LogStream(
|
|
243
|
+
c3, job_id,
|
|
244
|
+
job_key=job.job_key,
|
|
245
|
+
fetch_initial=False,
|
|
246
|
+
max_buffer=MAX_LOG_LINES,
|
|
247
|
+
)
|
|
248
|
+
await log_stream.connect()
|
|
249
|
+
log_task = asyncio.create_task(stream_logs_task(log_stream))
|
|
250
|
+
ws_status = "[green]● live[/]"
|
|
251
|
+
except Exception:
|
|
252
|
+
pass
|
|
253
|
+
await asyncio.sleep(1)
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
# Start background tasks
|
|
257
|
+
metrics_task = asyncio.create_task(fetch_metrics_task())
|
|
258
|
+
job_task = asyncio.create_task(fetch_job_task())
|
|
259
|
+
|
|
260
|
+
with Live(console=console, refresh_per_second=10, screen=True) as live:
|
|
261
|
+
while True:
|
|
262
|
+
try:
|
|
263
|
+
elapsed = int(time.time() - job.started_at) if job and job.started_at else 0
|
|
264
|
+
|
|
265
|
+
# Drain status queue if provided
|
|
266
|
+
if status_q:
|
|
267
|
+
while True:
|
|
268
|
+
try:
|
|
269
|
+
job_status = status_q.get_nowait()
|
|
270
|
+
except Empty:
|
|
271
|
+
break
|
|
272
|
+
|
|
273
|
+
# Calculate layout
|
|
274
|
+
term_height = console.size.height
|
|
275
|
+
header_height = 3 + (len(metrics.gpus) if metrics and metrics.gpus else 0) + (1 if metrics and metrics.system else 0)
|
|
276
|
+
content_height = max(10, term_height - header_height - 4)
|
|
277
|
+
|
|
278
|
+
# Update display
|
|
279
|
+
if job:
|
|
280
|
+
layout = build_layout(job, elapsed, metrics, logs, ws_status, job_status, content_height)
|
|
281
|
+
live.update(layout)
|
|
282
|
+
|
|
283
|
+
# Check completion
|
|
284
|
+
if stop_on_status_complete and job_status and job_status.complete:
|
|
285
|
+
await asyncio.sleep(3)
|
|
286
|
+
break
|
|
287
|
+
|
|
288
|
+
# Job terminated
|
|
289
|
+
if job and job.state in ("succeeded", "failed", "canceled", "terminated"):
|
|
290
|
+
# Fetch final logs ONCE
|
|
291
|
+
final = await asyncio.to_thread(fetch_logs, c3, job_id, MAX_LOG_LINES)
|
|
292
|
+
logs.clear()
|
|
293
|
+
for line in final:
|
|
294
|
+
logs.append(line)
|
|
295
|
+
ws_status = "[dim]● ended[/]"
|
|
296
|
+
|
|
297
|
+
layout = build_layout(job, elapsed, metrics, logs, ws_status, job_status, content_height)
|
|
298
|
+
live.update(layout)
|
|
299
|
+
await asyncio.sleep(1)
|
|
300
|
+
break
|
|
301
|
+
|
|
302
|
+
except KeyboardInterrupt:
|
|
303
|
+
raise
|
|
304
|
+
except Exception as e:
|
|
305
|
+
console.print(f"[red]{e}[/]")
|
|
306
|
+
|
|
307
|
+
await asyncio.sleep(0.05) # 20fps render loop
|
|
308
|
+
|
|
309
|
+
console.print(f"\n[bold]Job {job.state}[/]")
|
|
310
|
+
|
|
311
|
+
except KeyboardInterrupt:
|
|
312
|
+
console.print("\n[dim]Stopped[/]")
|
|
313
|
+
finally:
|
|
314
|
+
stop_event.set()
|
|
315
|
+
if log_task:
|
|
316
|
+
log_task.cancel()
|
|
317
|
+
if log_stream:
|
|
318
|
+
await log_stream.close()
|
|
319
|
+
metrics_task.cancel()
|
|
320
|
+
job_task.cancel()
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def run_job_monitor(
|
|
324
|
+
job_id: str,
|
|
325
|
+
status_q: Queue = None,
|
|
326
|
+
stop_on_status_complete: bool = False,
|
|
327
|
+
):
|
|
328
|
+
"""Run the job monitor TUI (sync wrapper).
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
job_id: The job ID to monitor
|
|
332
|
+
status_q: Optional queue receiving JobStatus updates for status pane
|
|
333
|
+
stop_on_status_complete: If True, exit when JobStatus.complete is True
|
|
334
|
+
"""
|
|
335
|
+
asyncio.run(_run_job_monitor_async(job_id, status_q, stop_on_status_complete))
|
c3cli/user.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""c3 user commands"""
|
|
2
|
+
import typer
|
|
3
|
+
from c3 import C3
|
|
4
|
+
from .output import output, spinner
|
|
5
|
+
|
|
6
|
+
app = typer.Typer(help="User account commands")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@app.callback(invoke_without_command=True)
|
|
10
|
+
def user_info(
|
|
11
|
+
ctx: typer.Context,
|
|
12
|
+
fmt: str = typer.Option("table", "--output", "-o", help="Output format: table|json"),
|
|
13
|
+
):
|
|
14
|
+
"""Get current user info"""
|
|
15
|
+
if ctx.invoked_subcommand is None:
|
|
16
|
+
c3 = C3()
|
|
17
|
+
with spinner("Fetching user info..."):
|
|
18
|
+
user = c3.user.get()
|
|
19
|
+
output(user, fmt)
|