hud-python 0.3.5__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hud-python might be problematic. Click here for more details.
- hud/__init__.py +22 -89
- hud/agents/__init__.py +15 -0
- hud/agents/art.py +101 -0
- hud/agents/base.py +599 -0
- hud/{mcp → agents}/claude.py +373 -321
- hud/{mcp → agents}/langchain.py +250 -250
- hud/agents/misc/__init__.py +7 -0
- hud/{agent → agents}/misc/response_agent.py +80 -80
- hud/{mcp → agents}/openai.py +352 -334
- hud/agents/openai_chat_generic.py +154 -0
- hud/{mcp → agents}/tests/__init__.py +1 -1
- hud/agents/tests/test_base.py +742 -0
- hud/agents/tests/test_claude.py +324 -0
- hud/{mcp → agents}/tests/test_client.py +363 -324
- hud/{mcp → agents}/tests/test_openai.py +237 -238
- hud/cli/__init__.py +617 -0
- hud/cli/__main__.py +8 -0
- hud/cli/analyze.py +371 -0
- hud/cli/analyze_metadata.py +230 -0
- hud/cli/build.py +427 -0
- hud/cli/clone.py +185 -0
- hud/cli/cursor.py +92 -0
- hud/cli/debug.py +392 -0
- hud/cli/docker_utils.py +83 -0
- hud/cli/init.py +281 -0
- hud/cli/interactive.py +353 -0
- hud/cli/mcp_server.py +756 -0
- hud/cli/pull.py +336 -0
- hud/cli/push.py +370 -0
- hud/cli/remote_runner.py +311 -0
- hud/cli/runner.py +160 -0
- hud/cli/tests/__init__.py +3 -0
- hud/cli/tests/test_analyze.py +284 -0
- hud/cli/tests/test_cli_init.py +265 -0
- hud/cli/tests/test_cli_main.py +27 -0
- hud/cli/tests/test_clone.py +142 -0
- hud/cli/tests/test_cursor.py +253 -0
- hud/cli/tests/test_debug.py +453 -0
- hud/cli/tests/test_mcp_server.py +139 -0
- hud/cli/tests/test_utils.py +388 -0
- hud/cli/utils.py +263 -0
- hud/clients/README.md +143 -0
- hud/clients/__init__.py +16 -0
- hud/clients/base.py +379 -0
- hud/clients/fastmcp.py +222 -0
- hud/clients/mcp_use.py +278 -0
- hud/clients/tests/__init__.py +1 -0
- hud/clients/tests/test_client_integration.py +111 -0
- hud/clients/tests/test_fastmcp.py +342 -0
- hud/clients/tests/test_protocol.py +188 -0
- hud/clients/utils/__init__.py +1 -0
- hud/clients/utils/retry_transport.py +160 -0
- hud/datasets.py +322 -192
- hud/misc/__init__.py +1 -0
- hud/{agent → misc}/claude_plays_pokemon.py +292 -283
- hud/otel/__init__.py +35 -0
- hud/otel/collector.py +142 -0
- hud/otel/config.py +164 -0
- hud/otel/context.py +536 -0
- hud/otel/exporters.py +366 -0
- hud/otel/instrumentation.py +97 -0
- hud/otel/processors.py +118 -0
- hud/otel/tests/__init__.py +1 -0
- hud/otel/tests/test_processors.py +197 -0
- hud/server/__init__.py +5 -5
- hud/server/context.py +114 -0
- hud/server/helper/__init__.py +5 -0
- hud/server/low_level.py +132 -0
- hud/server/server.py +166 -0
- hud/server/tests/__init__.py +3 -0
- hud/settings.py +73 -79
- hud/shared/__init__.py +5 -0
- hud/{exceptions.py → shared/exceptions.py} +180 -180
- hud/{server → shared}/requests.py +264 -264
- hud/shared/tests/test_exceptions.py +157 -0
- hud/{server → shared}/tests/test_requests.py +275 -275
- hud/telemetry/__init__.py +25 -30
- hud/telemetry/instrument.py +379 -0
- hud/telemetry/job.py +309 -141
- hud/telemetry/replay.py +74 -0
- hud/telemetry/trace.py +83 -0
- hud/tools/__init__.py +33 -34
- hud/tools/base.py +365 -65
- hud/tools/bash.py +161 -137
- hud/tools/computer/__init__.py +15 -13
- hud/tools/computer/anthropic.py +437 -420
- hud/tools/computer/hud.py +376 -334
- hud/tools/computer/openai.py +295 -292
- hud/tools/computer/settings.py +82 -0
- hud/tools/edit.py +314 -290
- hud/tools/executors/__init__.py +30 -30
- hud/tools/executors/base.py +539 -532
- hud/tools/executors/pyautogui.py +621 -619
- hud/tools/executors/tests/__init__.py +1 -1
- hud/tools/executors/tests/test_base_executor.py +338 -338
- hud/tools/executors/tests/test_pyautogui_executor.py +165 -165
- hud/tools/executors/xdo.py +511 -503
- hud/tools/{playwright_tool.py → playwright.py} +412 -379
- hud/tools/tests/__init__.py +3 -3
- hud/tools/tests/test_base.py +282 -0
- hud/tools/tests/test_bash.py +158 -152
- hud/tools/tests/test_bash_extended.py +197 -0
- hud/tools/tests/test_computer.py +425 -52
- hud/tools/tests/test_computer_actions.py +34 -34
- hud/tools/tests/test_edit.py +259 -240
- hud/tools/tests/test_init.py +27 -27
- hud/tools/tests/test_playwright_tool.py +183 -183
- hud/tools/tests/test_tools.py +145 -157
- hud/tools/tests/test_utils.py +156 -156
- hud/tools/types.py +72 -0
- hud/tools/utils.py +50 -50
- hud/types.py +136 -89
- hud/utils/__init__.py +10 -16
- hud/utils/async_utils.py +65 -0
- hud/utils/design.py +168 -0
- hud/utils/mcp.py +55 -0
- hud/utils/progress.py +149 -149
- hud/utils/telemetry.py +66 -66
- hud/utils/tests/test_async_utils.py +173 -0
- hud/utils/tests/test_init.py +17 -21
- hud/utils/tests/test_progress.py +261 -225
- hud/utils/tests/test_telemetry.py +82 -37
- hud/utils/tests/test_version.py +8 -8
- hud/version.py +7 -7
- hud_python-0.4.1.dist-info/METADATA +476 -0
- hud_python-0.4.1.dist-info/RECORD +132 -0
- hud_python-0.4.1.dist-info/entry_points.txt +3 -0
- {hud_python-0.3.5.dist-info → hud_python-0.4.1.dist-info}/licenses/LICENSE +21 -21
- hud/adapters/__init__.py +0 -8
- hud/adapters/claude/__init__.py +0 -5
- hud/adapters/claude/adapter.py +0 -180
- hud/adapters/claude/tests/__init__.py +0 -1
- hud/adapters/claude/tests/test_adapter.py +0 -519
- hud/adapters/common/__init__.py +0 -6
- hud/adapters/common/adapter.py +0 -178
- hud/adapters/common/tests/test_adapter.py +0 -289
- hud/adapters/common/types.py +0 -446
- hud/adapters/operator/__init__.py +0 -5
- hud/adapters/operator/adapter.py +0 -108
- hud/adapters/operator/tests/__init__.py +0 -1
- hud/adapters/operator/tests/test_adapter.py +0 -370
- hud/agent/__init__.py +0 -19
- hud/agent/base.py +0 -126
- hud/agent/claude.py +0 -271
- hud/agent/langchain.py +0 -215
- hud/agent/misc/__init__.py +0 -3
- hud/agent/operator.py +0 -268
- hud/agent/tests/__init__.py +0 -1
- hud/agent/tests/test_base.py +0 -202
- hud/env/__init__.py +0 -11
- hud/env/client.py +0 -35
- hud/env/docker_client.py +0 -349
- hud/env/environment.py +0 -446
- hud/env/local_docker_client.py +0 -358
- hud/env/remote_client.py +0 -212
- hud/env/remote_docker_client.py +0 -292
- hud/gym.py +0 -130
- hud/job.py +0 -773
- hud/mcp/__init__.py +0 -17
- hud/mcp/base.py +0 -631
- hud/mcp/client.py +0 -312
- hud/mcp/tests/test_base.py +0 -512
- hud/mcp/tests/test_claude.py +0 -294
- hud/task.py +0 -149
- hud/taskset.py +0 -237
- hud/telemetry/_trace.py +0 -347
- hud/telemetry/context.py +0 -230
- hud/telemetry/exporter.py +0 -575
- hud/telemetry/instrumentation/__init__.py +0 -3
- hud/telemetry/instrumentation/mcp.py +0 -259
- hud/telemetry/instrumentation/registry.py +0 -59
- hud/telemetry/mcp_models.py +0 -270
- hud/telemetry/tests/__init__.py +0 -1
- hud/telemetry/tests/test_context.py +0 -210
- hud/telemetry/tests/test_trace.py +0 -312
- hud/tools/helper/README.md +0 -56
- hud/tools/helper/__init__.py +0 -9
- hud/tools/helper/mcp_server.py +0 -78
- hud/tools/helper/server_initialization.py +0 -115
- hud/tools/helper/utils.py +0 -58
- hud/trajectory.py +0 -94
- hud/utils/agent.py +0 -37
- hud/utils/common.py +0 -256
- hud/utils/config.py +0 -120
- hud/utils/deprecation.py +0 -115
- hud/utils/misc.py +0 -53
- hud/utils/tests/test_common.py +0 -277
- hud/utils/tests/test_config.py +0 -129
- hud_python-0.3.5.dist-info/METADATA +0 -284
- hud_python-0.3.5.dist-info/RECORD +0 -120
- /hud/{adapters/common → shared}/tests/__init__.py +0 -0
- {hud_python-0.3.5.dist-info → hud_python-0.4.1.dist-info}/WHEEL +0 -0
hud/utils/mcp.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
from hud.settings import settings
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class MCPConfigPatch(BaseModel):
|
|
11
|
+
"""Patch for MCP config."""
|
|
12
|
+
|
|
13
|
+
headers: dict[str, Any] | None = Field(default_factory=dict, alias="headers")
|
|
14
|
+
meta: dict[str, Any] | None = Field(default_factory=dict, alias="meta")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def patch_mcp_config(mcp_config: dict[str, dict[str, Any]], patch: MCPConfigPatch) -> None:
|
|
18
|
+
"""Patch MCP config with additional values."""
|
|
19
|
+
hud_mcp_url = settings.hud_mcp_url
|
|
20
|
+
|
|
21
|
+
for server_cfg in mcp_config.values():
|
|
22
|
+
url = server_cfg.get("url", "")
|
|
23
|
+
|
|
24
|
+
# 1) HTTP header lane (only for hud MCP servers)
|
|
25
|
+
if hud_mcp_url in url and patch.headers:
|
|
26
|
+
for key, value in patch.headers.items():
|
|
27
|
+
headers = server_cfg.setdefault("headers", {})
|
|
28
|
+
headers.setdefault(key, value)
|
|
29
|
+
|
|
30
|
+
# 2) Metadata lane (for all servers)
|
|
31
|
+
if patch.meta:
|
|
32
|
+
for key, value in patch.meta.items():
|
|
33
|
+
meta = server_cfg.setdefault("meta", {})
|
|
34
|
+
meta.setdefault(key, value)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def setup_hud_telemetry(mcp_config: dict[str, dict[str, Any]], auto_trace: bool = True) -> None:
|
|
38
|
+
"""Setup telemetry for hud servers."""
|
|
39
|
+
if not mcp_config:
|
|
40
|
+
raise ValueError("Please run initialize() before setting up client-side telemetry")
|
|
41
|
+
|
|
42
|
+
from hud.otel import get_current_task_run_id
|
|
43
|
+
from hud.telemetry import trace
|
|
44
|
+
|
|
45
|
+
run_id = get_current_task_run_id()
|
|
46
|
+
if not run_id and auto_trace:
|
|
47
|
+
auto_trace_cm = trace("My Trace")
|
|
48
|
+
run_id = auto_trace_cm.__enter__()
|
|
49
|
+
|
|
50
|
+
# Patch HUD servers with run-id (works whether auto or user trace)
|
|
51
|
+
if run_id:
|
|
52
|
+
patch_mcp_config(
|
|
53
|
+
mcp_config,
|
|
54
|
+
MCPConfigPatch(headers={"Run-Id": run_id}, meta={"run_id": run_id}),
|
|
55
|
+
)
|
hud/utils/progress.py
CHANGED
|
@@ -1,149 +1,149 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import time
|
|
4
|
-
from collections import defaultdict
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class StepProgressTracker:
|
|
8
|
-
"""
|
|
9
|
-
Tracks progress across potentially parallel async tasks based on steps completed.
|
|
10
|
-
Provides estimates assuming tasks run up to max_steps_per_task.
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
def __init__(self, total_tasks: int, max_steps_per_task: int) -> None:
|
|
14
|
-
"""
|
|
15
|
-
Initialize the StepProgressTracker.
|
|
16
|
-
|
|
17
|
-
Args:
|
|
18
|
-
total_tasks: The total number of tasks to track.
|
|
19
|
-
max_steps_per_task: The maximum number of steps per task.
|
|
20
|
-
|
|
21
|
-
Raises:
|
|
22
|
-
ValueError: If total_tasks or max_steps_per_task is not positive.
|
|
23
|
-
"""
|
|
24
|
-
if total_tasks <= 0:
|
|
25
|
-
raise ValueError("total_tasks must be positive")
|
|
26
|
-
if max_steps_per_task <= 0:
|
|
27
|
-
raise ValueError("max_steps_per_task must be positive")
|
|
28
|
-
|
|
29
|
-
self.total_tasks = total_tasks
|
|
30
|
-
self.max_steps_per_task = max_steps_per_task
|
|
31
|
-
self.total_potential_steps = total_tasks * max_steps_per_task
|
|
32
|
-
|
|
33
|
-
# Use asyncio.Lock for potentially concurrent updates/reads if needed,
|
|
34
|
-
# but start without for simplicity in single-threaded asyncio.
|
|
35
|
-
# self._lock = asyncio.Lock()
|
|
36
|
-
self._task_steps: dict[str, int] = defaultdict(int)
|
|
37
|
-
self._finished_tasks: dict[str, bool] = defaultdict(bool)
|
|
38
|
-
self._tasks_started = 0
|
|
39
|
-
self._tasks_finished = 0
|
|
40
|
-
|
|
41
|
-
self.start_time: float | None = None
|
|
42
|
-
self.current_total_steps = 0
|
|
43
|
-
|
|
44
|
-
def start_task(self, task_id: str) -> None:
|
|
45
|
-
# async with self._lock: # If using lock
|
|
46
|
-
if self.start_time is None:
|
|
47
|
-
self.start_time = time.monotonic()
|
|
48
|
-
self._task_steps[task_id] = 0
|
|
49
|
-
self._finished_tasks[task_id] = False
|
|
50
|
-
self._tasks_started += 1
|
|
51
|
-
|
|
52
|
-
def increment_step(self, task_id: str) -> None:
|
|
53
|
-
# async with self._lock:
|
|
54
|
-
if (
|
|
55
|
-
not self._finished_tasks[task_id]
|
|
56
|
-
and self._task_steps[task_id] < self.max_steps_per_task
|
|
57
|
-
):
|
|
58
|
-
self._task_steps[task_id] += 1
|
|
59
|
-
# Update overall progress immediately
|
|
60
|
-
self._update_total_steps()
|
|
61
|
-
|
|
62
|
-
def finish_task(self, task_id: str) -> None:
|
|
63
|
-
# async with self._lock:
|
|
64
|
-
if not self._finished_tasks[task_id]:
|
|
65
|
-
# For calculation, consider a finished task as having completed max steps
|
|
66
|
-
self._task_steps[task_id] = self.max_steps_per_task
|
|
67
|
-
self._finished_tasks[task_id] = True
|
|
68
|
-
self._tasks_finished += 1
|
|
69
|
-
# Update overall progress
|
|
70
|
-
self._update_total_steps()
|
|
71
|
-
|
|
72
|
-
def _update_total_steps(self) -> None:
|
|
73
|
-
# This could be expensive if called extremely frequently.
|
|
74
|
-
# Called after increment or finish.
|
|
75
|
-
# async with self._lock:
|
|
76
|
-
self.current_total_steps = sum(self._task_steps.values())
|
|
77
|
-
|
|
78
|
-
def get_progress(self) -> tuple[int, int, float]:
|
|
79
|
-
"""Returns (current_steps, total_potential_steps, percentage)."""
|
|
80
|
-
# async with self._lock:
|
|
81
|
-
# Recalculate here for safety, though _update_total_steps should keep it current
|
|
82
|
-
# current_steps = sum(self._task_steps.values())
|
|
83
|
-
current_steps = self.current_total_steps
|
|
84
|
-
|
|
85
|
-
percentage = 0.0
|
|
86
|
-
if self.total_potential_steps > 0:
|
|
87
|
-
percentage = (current_steps / self.total_potential_steps) * 100
|
|
88
|
-
return current_steps, self.total_potential_steps, percentage
|
|
89
|
-
|
|
90
|
-
def get_stats(self) -> tuple[float, float | None]:
|
|
91
|
-
"""Returns (rate_steps_per_minute, eta_seconds_upper_bound)."""
|
|
92
|
-
# async with self._lock:
|
|
93
|
-
if self.start_time is None or self._tasks_started == 0:
|
|
94
|
-
return 0.0, None # No rate or ETA yet
|
|
95
|
-
|
|
96
|
-
elapsed_time = time.monotonic() - self.start_time
|
|
97
|
-
current_steps = self.current_total_steps
|
|
98
|
-
|
|
99
|
-
rate_sec = 0.0
|
|
100
|
-
if elapsed_time > 0:
|
|
101
|
-
rate_sec = current_steps / elapsed_time
|
|
102
|
-
|
|
103
|
-
rate_min = rate_sec * 60 # Convert rate to steps per minute
|
|
104
|
-
|
|
105
|
-
eta = None
|
|
106
|
-
# ETA calculation still uses rate_sec (steps/second) for time estimation in seconds
|
|
107
|
-
if rate_sec > 0:
|
|
108
|
-
remaining_steps = self.total_potential_steps - current_steps
|
|
109
|
-
eta = remaining_steps / rate_sec if remaining_steps > 0 else 0.0
|
|
110
|
-
|
|
111
|
-
return rate_min, eta # Return rate in steps/min
|
|
112
|
-
|
|
113
|
-
def is_finished(self) -> bool:
|
|
114
|
-
# async with self._lock:
|
|
115
|
-
return self._tasks_finished >= self.total_tasks
|
|
116
|
-
|
|
117
|
-
def display(self, bar_length: int = 40) -> str:
|
|
118
|
-
"""Generates a progress string similar to tqdm."""
|
|
119
|
-
current_steps, total_steps, percentage = self.get_progress()
|
|
120
|
-
rate_min, eta = self.get_stats() # Rate is now per minute
|
|
121
|
-
|
|
122
|
-
# Ensure valid values for display
|
|
123
|
-
current_steps = min(current_steps, total_steps)
|
|
124
|
-
percentage = max(0.0, min(100.0, percentage))
|
|
125
|
-
|
|
126
|
-
filled_length = int(bar_length * current_steps // total_steps) if total_steps else 0
|
|
127
|
-
bar = "█" * filled_length + "-" * (bar_length - filled_length)
|
|
128
|
-
|
|
129
|
-
# Format time
|
|
130
|
-
elapsed_str = "0:00"
|
|
131
|
-
eta_str = "??:??"
|
|
132
|
-
if self.start_time:
|
|
133
|
-
elapsed_seconds = int(time.monotonic() - self.start_time)
|
|
134
|
-
elapsed_str = f"{elapsed_seconds // 60}:{elapsed_seconds % 60:02d}"
|
|
135
|
-
if eta is not None:
|
|
136
|
-
eta_seconds = int(eta)
|
|
137
|
-
eta_str = f"{eta_seconds // 60}:{eta_seconds % 60:02d}"
|
|
138
|
-
elif self.is_finished():
|
|
139
|
-
eta_str = "0:00"
|
|
140
|
-
|
|
141
|
-
# Update rate string format
|
|
142
|
-
rate_str = f"{rate_min:.1f} steps/min" if rate_min > 0 else "?? steps/min"
|
|
143
|
-
|
|
144
|
-
# Format steps - use K/M for large numbers if desired, keep simple for now
|
|
145
|
-
steps_str = f"{current_steps}/{total_steps}"
|
|
146
|
-
|
|
147
|
-
# tasks_str = f" {self._tasks_finished}/{self.total_tasks} tasks" # Optional tasks counter
|
|
148
|
-
|
|
149
|
-
return f"{percentage:3.0f}%|{bar}| {steps_str} [{elapsed_str}<{eta_str}, {rate_str}]"
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class StepProgressTracker:
|
|
8
|
+
"""
|
|
9
|
+
Tracks progress across potentially parallel async tasks based on steps completed.
|
|
10
|
+
Provides estimates assuming tasks run up to max_steps_per_task.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, total_tasks: int, max_steps_per_task: int) -> None:
|
|
14
|
+
"""
|
|
15
|
+
Initialize the StepProgressTracker.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
total_tasks: The total number of tasks to track.
|
|
19
|
+
max_steps_per_task: The maximum number of steps per task.
|
|
20
|
+
|
|
21
|
+
Raises:
|
|
22
|
+
ValueError: If total_tasks or max_steps_per_task is not positive.
|
|
23
|
+
"""
|
|
24
|
+
if total_tasks <= 0:
|
|
25
|
+
raise ValueError("total_tasks must be positive")
|
|
26
|
+
if max_steps_per_task <= 0:
|
|
27
|
+
raise ValueError("max_steps_per_task must be positive")
|
|
28
|
+
|
|
29
|
+
self.total_tasks = total_tasks
|
|
30
|
+
self.max_steps_per_task = max_steps_per_task
|
|
31
|
+
self.total_potential_steps = total_tasks * max_steps_per_task
|
|
32
|
+
|
|
33
|
+
# Use asyncio.Lock for potentially concurrent updates/reads if needed,
|
|
34
|
+
# but start without for simplicity in single-threaded asyncio.
|
|
35
|
+
# self._lock = asyncio.Lock()
|
|
36
|
+
self._task_steps: dict[str, int] = defaultdict(int)
|
|
37
|
+
self._finished_tasks: dict[str, bool] = defaultdict(bool)
|
|
38
|
+
self._tasks_started = 0
|
|
39
|
+
self._tasks_finished = 0
|
|
40
|
+
|
|
41
|
+
self.start_time: float | None = None
|
|
42
|
+
self.current_total_steps = 0
|
|
43
|
+
|
|
44
|
+
def start_task(self, task_id: str) -> None:
|
|
45
|
+
# async with self._lock: # If using lock
|
|
46
|
+
if self.start_time is None:
|
|
47
|
+
self.start_time = time.monotonic()
|
|
48
|
+
self._task_steps[task_id] = 0
|
|
49
|
+
self._finished_tasks[task_id] = False
|
|
50
|
+
self._tasks_started += 1
|
|
51
|
+
|
|
52
|
+
def increment_step(self, task_id: str) -> None:
|
|
53
|
+
# async with self._lock:
|
|
54
|
+
if (
|
|
55
|
+
not self._finished_tasks[task_id]
|
|
56
|
+
and self._task_steps[task_id] < self.max_steps_per_task
|
|
57
|
+
):
|
|
58
|
+
self._task_steps[task_id] += 1
|
|
59
|
+
# Update overall progress immediately
|
|
60
|
+
self._update_total_steps()
|
|
61
|
+
|
|
62
|
+
def finish_task(self, task_id: str) -> None:
|
|
63
|
+
# async with self._lock:
|
|
64
|
+
if not self._finished_tasks[task_id]:
|
|
65
|
+
# For calculation, consider a finished task as having completed max steps
|
|
66
|
+
self._task_steps[task_id] = self.max_steps_per_task
|
|
67
|
+
self._finished_tasks[task_id] = True
|
|
68
|
+
self._tasks_finished += 1
|
|
69
|
+
# Update overall progress
|
|
70
|
+
self._update_total_steps()
|
|
71
|
+
|
|
72
|
+
def _update_total_steps(self) -> None:
|
|
73
|
+
# This could be expensive if called extremely frequently.
|
|
74
|
+
# Called after increment or finish.
|
|
75
|
+
# async with self._lock:
|
|
76
|
+
self.current_total_steps = sum(self._task_steps.values())
|
|
77
|
+
|
|
78
|
+
def get_progress(self) -> tuple[int, int, float]:
|
|
79
|
+
"""Returns (current_steps, total_potential_steps, percentage)."""
|
|
80
|
+
# async with self._lock:
|
|
81
|
+
# Recalculate here for safety, though _update_total_steps should keep it current
|
|
82
|
+
# current_steps = sum(self._task_steps.values())
|
|
83
|
+
current_steps = self.current_total_steps
|
|
84
|
+
|
|
85
|
+
percentage = 0.0
|
|
86
|
+
if self.total_potential_steps > 0:
|
|
87
|
+
percentage = (current_steps / self.total_potential_steps) * 100
|
|
88
|
+
return current_steps, self.total_potential_steps, percentage
|
|
89
|
+
|
|
90
|
+
def get_stats(self) -> tuple[float, float | None]:
|
|
91
|
+
"""Returns (rate_steps_per_minute, eta_seconds_upper_bound)."""
|
|
92
|
+
# async with self._lock:
|
|
93
|
+
if self.start_time is None or self._tasks_started == 0:
|
|
94
|
+
return 0.0, None # No rate or ETA yet
|
|
95
|
+
|
|
96
|
+
elapsed_time = time.monotonic() - self.start_time
|
|
97
|
+
current_steps = self.current_total_steps
|
|
98
|
+
|
|
99
|
+
rate_sec = 0.0
|
|
100
|
+
if elapsed_time > 0:
|
|
101
|
+
rate_sec = current_steps / elapsed_time
|
|
102
|
+
|
|
103
|
+
rate_min = rate_sec * 60 # Convert rate to steps per minute
|
|
104
|
+
|
|
105
|
+
eta = None
|
|
106
|
+
# ETA calculation still uses rate_sec (steps/second) for time estimation in seconds
|
|
107
|
+
if rate_sec > 0:
|
|
108
|
+
remaining_steps = self.total_potential_steps - current_steps
|
|
109
|
+
eta = remaining_steps / rate_sec if remaining_steps > 0 else 0.0
|
|
110
|
+
|
|
111
|
+
return rate_min, eta # Return rate in steps/min
|
|
112
|
+
|
|
113
|
+
def is_finished(self) -> bool:
|
|
114
|
+
# async with self._lock:
|
|
115
|
+
return self._tasks_finished >= self.total_tasks
|
|
116
|
+
|
|
117
|
+
def display(self, bar_length: int = 40) -> str:
|
|
118
|
+
"""Generates a progress string similar to tqdm."""
|
|
119
|
+
current_steps, total_steps, percentage = self.get_progress()
|
|
120
|
+
rate_min, eta = self.get_stats() # Rate is now per minute
|
|
121
|
+
|
|
122
|
+
# Ensure valid values for display
|
|
123
|
+
current_steps = min(current_steps, total_steps)
|
|
124
|
+
percentage = max(0.0, min(100.0, percentage))
|
|
125
|
+
|
|
126
|
+
filled_length = int(bar_length * current_steps // total_steps) if total_steps else 0
|
|
127
|
+
bar = "█" * filled_length + "-" * (bar_length - filled_length)
|
|
128
|
+
|
|
129
|
+
# Format time
|
|
130
|
+
elapsed_str = "0:00"
|
|
131
|
+
eta_str = "??:??"
|
|
132
|
+
if self.start_time:
|
|
133
|
+
elapsed_seconds = int(time.monotonic() - self.start_time)
|
|
134
|
+
elapsed_str = f"{elapsed_seconds // 60}:{elapsed_seconds % 60:02d}"
|
|
135
|
+
if eta is not None:
|
|
136
|
+
eta_seconds = int(eta)
|
|
137
|
+
eta_str = f"{eta_seconds // 60}:{eta_seconds % 60:02d}"
|
|
138
|
+
elif self.is_finished():
|
|
139
|
+
eta_str = "0:00"
|
|
140
|
+
|
|
141
|
+
# Update rate string format
|
|
142
|
+
rate_str = f"{rate_min:.1f} steps/min" if rate_min > 0 else "?? steps/min"
|
|
143
|
+
|
|
144
|
+
# Format steps - use K/M for large numbers if desired, keep simple for now
|
|
145
|
+
steps_str = f"{current_steps}/{total_steps}"
|
|
146
|
+
|
|
147
|
+
# tasks_str = f" {self._tasks_finished}/{self.total_tasks} tasks" # Optional tasks counter
|
|
148
|
+
|
|
149
|
+
return f"{percentage:3.0f}%|{bar}| {steps_str} [{elapsed_str}<{eta_str}, {rate_str}]"
|
hud/utils/telemetry.py
CHANGED
|
@@ -1,66 +1,66 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
import logging
|
|
4
|
-
|
|
5
|
-
logger = logging.getLogger(__name__)
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
def stream(live_url: str) -> str:
|
|
9
|
-
"""
|
|
10
|
-
Display a stream in the HUD system.
|
|
11
|
-
"""
|
|
12
|
-
from IPython.display import HTML, display
|
|
13
|
-
|
|
14
|
-
html_content = f"""
|
|
15
|
-
<div style="width: 960px; height: 540px; overflow: hidden;">
|
|
16
|
-
<div style="transform: scale(0.5); transform-origin: top left;">
|
|
17
|
-
<iframe src="{live_url}" width="1920" height="1080" style="border: 1px solid #ddd;">
|
|
18
|
-
</iframe>
|
|
19
|
-
</div>
|
|
20
|
-
</div>
|
|
21
|
-
"""
|
|
22
|
-
try:
|
|
23
|
-
display(HTML(html_content))
|
|
24
|
-
except Exception as e:
|
|
25
|
-
logger.warning(e)
|
|
26
|
-
|
|
27
|
-
return html_content
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def display_screenshot(base64_image: str, width: int = 960, height: int = 540) -> str:
|
|
31
|
-
"""
|
|
32
|
-
Display a base64-encoded screenshot image.
|
|
33
|
-
|
|
34
|
-
Args:
|
|
35
|
-
base64_image: Base64-encoded image string (without the data URI prefix)
|
|
36
|
-
width: Display width in pixels
|
|
37
|
-
height: Display height in pixels
|
|
38
|
-
|
|
39
|
-
Returns:
|
|
40
|
-
The HTML string used to display the image
|
|
41
|
-
|
|
42
|
-
Note:
|
|
43
|
-
This function will both display the image in IPython environments
|
|
44
|
-
and return the HTML string for other contexts.
|
|
45
|
-
"""
|
|
46
|
-
from IPython.display import HTML, display
|
|
47
|
-
|
|
48
|
-
# Ensure the base64 image doesn't already have the data URI prefix
|
|
49
|
-
if base64_image.startswith("data:image"):
|
|
50
|
-
img_src = base64_image
|
|
51
|
-
else:
|
|
52
|
-
img_src = f"data:image/png;base64,{base64_image}"
|
|
53
|
-
|
|
54
|
-
html_content = f"""
|
|
55
|
-
<div style="width: {width}px; height: {height}px; overflow: hidden; margin: 10px 0; border: 1px solid #ddd;">
|
|
56
|
-
<img src="{img_src}" style="max-width: 100%; max-height: 100%;">
|
|
57
|
-
</div>
|
|
58
|
-
""" # noqa: E501
|
|
59
|
-
|
|
60
|
-
# Display in IPython environments
|
|
61
|
-
try:
|
|
62
|
-
display(HTML(html_content))
|
|
63
|
-
except Exception as e:
|
|
64
|
-
logger.warning(e)
|
|
65
|
-
|
|
66
|
-
return html_content
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
|
|
5
|
+
logger = logging.getLogger(__name__)
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def stream(live_url: str) -> str:
|
|
9
|
+
"""
|
|
10
|
+
Display a stream in the HUD system.
|
|
11
|
+
"""
|
|
12
|
+
from IPython.display import HTML, display
|
|
13
|
+
|
|
14
|
+
html_content = f"""
|
|
15
|
+
<div style="width: 960px; height: 540px; overflow: hidden;">
|
|
16
|
+
<div style="transform: scale(0.5); transform-origin: top left;">
|
|
17
|
+
<iframe src="{live_url}" width="1920" height="1080" style="border: 1px solid #ddd;">
|
|
18
|
+
</iframe>
|
|
19
|
+
</div>
|
|
20
|
+
</div>
|
|
21
|
+
"""
|
|
22
|
+
try:
|
|
23
|
+
display(HTML(html_content))
|
|
24
|
+
except Exception as e:
|
|
25
|
+
logger.warning(e)
|
|
26
|
+
|
|
27
|
+
return html_content
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def display_screenshot(base64_image: str, width: int = 960, height: int = 540) -> str:
|
|
31
|
+
"""
|
|
32
|
+
Display a base64-encoded screenshot image.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
base64_image: Base64-encoded image string (without the data URI prefix)
|
|
36
|
+
width: Display width in pixels
|
|
37
|
+
height: Display height in pixels
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
The HTML string used to display the image
|
|
41
|
+
|
|
42
|
+
Note:
|
|
43
|
+
This function will both display the image in IPython environments
|
|
44
|
+
and return the HTML string for other contexts.
|
|
45
|
+
"""
|
|
46
|
+
from IPython.display import HTML, display
|
|
47
|
+
|
|
48
|
+
# Ensure the base64 image doesn't already have the data URI prefix
|
|
49
|
+
if base64_image.startswith("data:image"):
|
|
50
|
+
img_src = base64_image
|
|
51
|
+
else:
|
|
52
|
+
img_src = f"data:image/png;base64,{base64_image}"
|
|
53
|
+
|
|
54
|
+
html_content = f"""
|
|
55
|
+
<div style="width: {width}px; height: {height}px; overflow: hidden; margin: 10px 0; border: 1px solid #ddd;">
|
|
56
|
+
<img src="{img_src}" style="max-width: 100%; max-height: 100%;">
|
|
57
|
+
</div>
|
|
58
|
+
""" # noqa: E501
|
|
59
|
+
|
|
60
|
+
# Display in IPython environments
|
|
61
|
+
try:
|
|
62
|
+
display(HTML(html_content))
|
|
63
|
+
except Exception as e:
|
|
64
|
+
logger.warning(e)
|
|
65
|
+
|
|
66
|
+
return html_content
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""Tests for async utilities."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import logging
|
|
7
|
+
import threading
|
|
8
|
+
from unittest.mock import patch
|
|
9
|
+
|
|
10
|
+
import pytest
|
|
11
|
+
|
|
12
|
+
from hud.utils.async_utils import fire_and_forget
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class TestFireAndForget:
|
|
16
|
+
"""Test fire_and_forget function."""
|
|
17
|
+
|
|
18
|
+
@pytest.mark.asyncio
|
|
19
|
+
async def test_fire_and_forget_with_running_loop(self, caplog):
|
|
20
|
+
"""Test fire_and_forget when event loop is already running."""
|
|
21
|
+
# Create a simple coroutine that sets a flag
|
|
22
|
+
flag = []
|
|
23
|
+
|
|
24
|
+
async def test_coro():
|
|
25
|
+
flag.append(True)
|
|
26
|
+
|
|
27
|
+
# Call fire_and_forget in async context
|
|
28
|
+
fire_and_forget(test_coro(), description="test task")
|
|
29
|
+
|
|
30
|
+
# Give it a moment to execute
|
|
31
|
+
await asyncio.sleep(0.1)
|
|
32
|
+
|
|
33
|
+
# Check that the coroutine was executed
|
|
34
|
+
assert flag == [True]
|
|
35
|
+
|
|
36
|
+
@pytest.mark.asyncio
|
|
37
|
+
async def test_fire_and_forget_with_exception(self, caplog):
|
|
38
|
+
"""Test fire_and_forget handles exceptions gracefully."""
|
|
39
|
+
|
|
40
|
+
async def failing_coro():
|
|
41
|
+
raise ValueError("Test exception")
|
|
42
|
+
|
|
43
|
+
# This should not raise
|
|
44
|
+
fire_and_forget(failing_coro(), description="failing task")
|
|
45
|
+
|
|
46
|
+
# Give it a moment to execute
|
|
47
|
+
await asyncio.sleep(0.1)
|
|
48
|
+
|
|
49
|
+
# The exception should be handled silently
|
|
50
|
+
|
|
51
|
+
def test_fire_and_forget_no_event_loop(self):
|
|
52
|
+
"""Test fire_and_forget when no event loop is running."""
|
|
53
|
+
# This test runs in sync context
|
|
54
|
+
flag = threading.Event()
|
|
55
|
+
|
|
56
|
+
async def test_coro():
|
|
57
|
+
flag.set()
|
|
58
|
+
|
|
59
|
+
# Call fire_and_forget in sync context
|
|
60
|
+
fire_and_forget(test_coro(), description="sync test")
|
|
61
|
+
|
|
62
|
+
# Wait for the thread to complete
|
|
63
|
+
assert flag.wait(timeout=2.0), "Coroutine did not execute in thread"
|
|
64
|
+
|
|
65
|
+
def test_fire_and_forget_thread_exception(self, caplog):
|
|
66
|
+
"""Test fire_and_forget handles thread exceptions."""
|
|
67
|
+
|
|
68
|
+
async def failing_coro():
|
|
69
|
+
raise ValueError("Thread exception")
|
|
70
|
+
|
|
71
|
+
# Patch the logger to capture the debug call
|
|
72
|
+
from unittest.mock import patch
|
|
73
|
+
|
|
74
|
+
with patch("hud.utils.async_utils.logger") as mock_logger:
|
|
75
|
+
fire_and_forget(failing_coro(), description="thread fail")
|
|
76
|
+
|
|
77
|
+
# Give thread time to execute and log
|
|
78
|
+
import time
|
|
79
|
+
|
|
80
|
+
time.sleep(0.5) # Wait for thread to complete
|
|
81
|
+
|
|
82
|
+
# Check that error was logged with correct format
|
|
83
|
+
mock_logger.debug.assert_called()
|
|
84
|
+
# Get the actual call arguments
|
|
85
|
+
calls = mock_logger.debug.call_args_list
|
|
86
|
+
assert any(
|
|
87
|
+
call[0][0] == "Error in threaded %s: %s"
|
|
88
|
+
and call[0][1] == "thread fail"
|
|
89
|
+
and "Thread exception" in str(call[0][2])
|
|
90
|
+
for call in calls
|
|
91
|
+
), f"Expected log message not found in calls: {calls}"
|
|
92
|
+
|
|
93
|
+
def test_fire_and_forget_interpreter_shutdown(self, caplog):
|
|
94
|
+
"""Test fire_and_forget handles interpreter shutdown gracefully."""
|
|
95
|
+
|
|
96
|
+
async def test_coro():
|
|
97
|
+
pass
|
|
98
|
+
|
|
99
|
+
# Mock the scenario where we get interpreter shutdown error
|
|
100
|
+
with patch("asyncio.get_running_loop") as mock_get_loop:
|
|
101
|
+
mock_get_loop.side_effect = RuntimeError("no running event loop")
|
|
102
|
+
|
|
103
|
+
with patch("threading.Thread") as mock_thread:
|
|
104
|
+
mock_thread.side_effect = RuntimeError(
|
|
105
|
+
"cannot schedule new futures after interpreter shutdown"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
with caplog.at_level(logging.DEBUG):
|
|
109
|
+
# This should not raise or log
|
|
110
|
+
fire_and_forget(test_coro(), description="shutdown test")
|
|
111
|
+
|
|
112
|
+
# No error should be logged for interpreter shutdown
|
|
113
|
+
assert not any(
|
|
114
|
+
"Could not shutdown test" in record.message for record in caplog.records
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
def test_fire_and_forget_other_thread_error(self, caplog):
|
|
118
|
+
"""Test fire_and_forget logs non-shutdown thread errors."""
|
|
119
|
+
|
|
120
|
+
async def test_coro():
|
|
121
|
+
pass
|
|
122
|
+
|
|
123
|
+
# Mock the scenario where we get a different error
|
|
124
|
+
with patch("asyncio.get_running_loop") as mock_get_loop:
|
|
125
|
+
mock_get_loop.side_effect = RuntimeError("no running event loop")
|
|
126
|
+
|
|
127
|
+
with patch("threading.Thread") as mock_thread:
|
|
128
|
+
mock_thread.side_effect = RuntimeError("Some other error")
|
|
129
|
+
|
|
130
|
+
# Patch the logger to capture the debug call
|
|
131
|
+
with patch("hud.utils.async_utils.logger") as mock_logger:
|
|
132
|
+
fire_and_forget(test_coro(), description="error test")
|
|
133
|
+
|
|
134
|
+
# Check that error was logged with correct format
|
|
135
|
+
mock_logger.debug.assert_called_once_with(
|
|
136
|
+
"Could not %s - no event loop available: %s",
|
|
137
|
+
"error test",
|
|
138
|
+
mock_thread.side_effect,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
@pytest.mark.asyncio
|
|
142
|
+
async def test_fire_and_forget_cancelled_task(self):
|
|
143
|
+
"""Test fire_and_forget handles cancelled tasks."""
|
|
144
|
+
|
|
145
|
+
cancel_event = asyncio.Event()
|
|
146
|
+
|
|
147
|
+
async def long_running_coro():
|
|
148
|
+
await cancel_event.wait()
|
|
149
|
+
|
|
150
|
+
# Get the current loop
|
|
151
|
+
loop = asyncio.get_running_loop()
|
|
152
|
+
|
|
153
|
+
# Patch create_task to capture the task
|
|
154
|
+
created_task = None
|
|
155
|
+
original_create_task = loop.create_task
|
|
156
|
+
|
|
157
|
+
def mock_create_task(coro):
|
|
158
|
+
nonlocal created_task
|
|
159
|
+
created_task = original_create_task(coro)
|
|
160
|
+
return created_task
|
|
161
|
+
|
|
162
|
+
with patch.object(loop, "create_task", side_effect=mock_create_task):
|
|
163
|
+
fire_and_forget(long_running_coro(), description="cancel test")
|
|
164
|
+
|
|
165
|
+
# Give it a moment to start
|
|
166
|
+
await asyncio.sleep(0.01)
|
|
167
|
+
|
|
168
|
+
# Cancel the task
|
|
169
|
+
assert created_task is not None
|
|
170
|
+
created_task.cancel()
|
|
171
|
+
|
|
172
|
+
# This should not raise any exceptions
|
|
173
|
+
await asyncio.sleep(0.01)
|