hud-python 0.4.45__py3-none-any.whl → 0.5.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hud/__init__.py +27 -7
- hud/agents/__init__.py +70 -5
- hud/agents/base.py +238 -500
- hud/agents/claude.py +236 -247
- hud/agents/gateway.py +42 -0
- hud/agents/gemini.py +264 -0
- hud/agents/gemini_cua.py +324 -0
- hud/agents/grounded_openai.py +98 -100
- hud/agents/misc/integration_test_agent.py +51 -20
- hud/agents/misc/response_agent.py +48 -36
- hud/agents/openai.py +282 -296
- hud/agents/{openai_chat_generic.py → openai_chat.py} +63 -33
- hud/agents/operator.py +199 -0
- hud/agents/resolver.py +70 -0
- hud/agents/tests/conftest.py +133 -0
- hud/agents/tests/test_base.py +300 -622
- hud/agents/tests/test_base_runtime.py +233 -0
- hud/agents/tests/test_claude.py +381 -214
- hud/agents/tests/test_client.py +9 -10
- hud/agents/tests/test_gemini.py +369 -0
- hud/agents/tests/test_grounded_openai_agent.py +65 -50
- hud/agents/tests/test_openai.py +377 -140
- hud/agents/tests/test_operator.py +362 -0
- hud/agents/tests/test_resolver.py +192 -0
- hud/agents/tests/test_run_eval.py +179 -0
- hud/agents/types.py +148 -0
- hud/cli/__init__.py +493 -546
- hud/cli/analyze.py +43 -5
- hud/cli/build.py +699 -113
- hud/cli/debug.py +8 -5
- hud/cli/dev.py +889 -732
- hud/cli/eval.py +793 -667
- hud/cli/flows/dev.py +167 -0
- hud/cli/flows/init.py +191 -0
- hud/cli/flows/tasks.py +153 -56
- hud/cli/flows/templates.py +151 -0
- hud/cli/flows/tests/__init__.py +1 -0
- hud/cli/flows/tests/test_dev.py +126 -0
- hud/cli/init.py +60 -58
- hud/cli/pull.py +1 -1
- hud/cli/push.py +38 -13
- hud/cli/rft.py +311 -0
- hud/cli/rft_status.py +145 -0
- hud/cli/tests/test_analyze.py +5 -5
- hud/cli/tests/test_analyze_metadata.py +3 -2
- hud/cli/tests/test_analyze_module.py +120 -0
- hud/cli/tests/test_build.py +110 -8
- hud/cli/tests/test_build_failure.py +41 -0
- hud/cli/tests/test_build_module.py +50 -0
- hud/cli/tests/test_cli_init.py +6 -1
- hud/cli/tests/test_cli_more_wrappers.py +30 -0
- hud/cli/tests/test_cli_root.py +140 -0
- hud/cli/tests/test_convert.py +361 -0
- hud/cli/tests/test_debug.py +12 -10
- hud/cli/tests/test_dev.py +197 -0
- hud/cli/tests/test_eval.py +251 -0
- hud/cli/tests/test_eval_bedrock.py +51 -0
- hud/cli/tests/test_init.py +124 -0
- hud/cli/tests/test_main_module.py +11 -5
- hud/cli/tests/test_mcp_server.py +12 -100
- hud/cli/tests/test_push.py +1 -1
- hud/cli/tests/test_push_happy.py +74 -0
- hud/cli/tests/test_push_wrapper.py +23 -0
- hud/cli/tests/test_registry.py +1 -1
- hud/cli/tests/test_utils.py +1 -1
- hud/cli/{rl → utils}/celebrate.py +14 -12
- hud/cli/utils/config.py +18 -1
- hud/cli/utils/docker.py +130 -4
- hud/cli/utils/env_check.py +9 -9
- hud/cli/utils/git.py +136 -0
- hud/cli/utils/interactive.py +39 -5
- hud/cli/utils/metadata.py +70 -1
- hud/cli/utils/runner.py +1 -1
- hud/cli/utils/server.py +2 -2
- hud/cli/utils/source_hash.py +3 -3
- hud/cli/utils/tasks.py +4 -1
- hud/cli/utils/tests/__init__.py +0 -0
- hud/cli/utils/tests/test_config.py +58 -0
- hud/cli/utils/tests/test_docker.py +93 -0
- hud/cli/utils/tests/test_docker_hints.py +71 -0
- hud/cli/utils/tests/test_env_check.py +74 -0
- hud/cli/utils/tests/test_environment.py +42 -0
- hud/cli/utils/tests/test_git.py +142 -0
- hud/cli/utils/tests/test_interactive_module.py +60 -0
- hud/cli/utils/tests/test_local_runner.py +50 -0
- hud/cli/utils/tests/test_logging_utils.py +23 -0
- hud/cli/utils/tests/test_metadata.py +49 -0
- hud/cli/utils/tests/test_package_runner.py +35 -0
- hud/cli/utils/tests/test_registry_utils.py +49 -0
- hud/cli/utils/tests/test_remote_runner.py +25 -0
- hud/cli/utils/tests/test_runner_modules.py +52 -0
- hud/cli/utils/tests/test_source_hash.py +36 -0
- hud/cli/utils/tests/test_tasks.py +80 -0
- hud/cli/utils/version_check.py +258 -0
- hud/cli/{rl → utils}/viewer.py +2 -2
- hud/clients/README.md +12 -11
- hud/clients/__init__.py +4 -3
- hud/clients/base.py +166 -26
- hud/clients/environment.py +51 -0
- hud/clients/fastmcp.py +13 -6
- hud/clients/mcp_use.py +45 -15
- hud/clients/tests/test_analyze_scenarios.py +206 -0
- hud/clients/tests/test_protocol.py +9 -3
- hud/datasets/__init__.py +23 -20
- hud/datasets/loader.py +326 -0
- hud/datasets/runner.py +198 -105
- hud/datasets/tests/__init__.py +0 -0
- hud/datasets/tests/test_loader.py +221 -0
- hud/datasets/tests/test_utils.py +315 -0
- hud/datasets/utils.py +270 -90
- hud/environment/__init__.py +52 -0
- hud/environment/connection.py +258 -0
- hud/environment/connectors/__init__.py +33 -0
- hud/environment/connectors/base.py +68 -0
- hud/environment/connectors/local.py +177 -0
- hud/environment/connectors/mcp_config.py +137 -0
- hud/environment/connectors/openai.py +101 -0
- hud/environment/connectors/remote.py +172 -0
- hud/environment/environment.py +835 -0
- hud/environment/integrations/__init__.py +45 -0
- hud/environment/integrations/adk.py +67 -0
- hud/environment/integrations/anthropic.py +196 -0
- hud/environment/integrations/gemini.py +92 -0
- hud/environment/integrations/langchain.py +82 -0
- hud/environment/integrations/llamaindex.py +68 -0
- hud/environment/integrations/openai.py +238 -0
- hud/environment/mock.py +306 -0
- hud/environment/router.py +263 -0
- hud/environment/scenarios.py +620 -0
- hud/environment/tests/__init__.py +1 -0
- hud/environment/tests/test_connection.py +317 -0
- hud/environment/tests/test_connectors.py +205 -0
- hud/environment/tests/test_environment.py +593 -0
- hud/environment/tests/test_integrations.py +257 -0
- hud/environment/tests/test_local_connectors.py +242 -0
- hud/environment/tests/test_scenarios.py +1086 -0
- hud/environment/tests/test_tools.py +208 -0
- hud/environment/types.py +23 -0
- hud/environment/utils/__init__.py +35 -0
- hud/environment/utils/formats.py +215 -0
- hud/environment/utils/schema.py +171 -0
- hud/environment/utils/tool_wrappers.py +113 -0
- hud/eval/__init__.py +67 -0
- hud/eval/context.py +727 -0
- hud/eval/display.py +299 -0
- hud/eval/instrument.py +187 -0
- hud/eval/manager.py +533 -0
- hud/eval/parallel.py +268 -0
- hud/eval/task.py +372 -0
- hud/eval/tests/__init__.py +1 -0
- hud/eval/tests/test_context.py +178 -0
- hud/eval/tests/test_eval.py +210 -0
- hud/eval/tests/test_manager.py +152 -0
- hud/eval/tests/test_parallel.py +168 -0
- hud/eval/tests/test_task.py +291 -0
- hud/eval/types.py +65 -0
- hud/eval/utils.py +194 -0
- hud/patches/__init__.py +19 -0
- hud/patches/mcp_patches.py +308 -0
- hud/patches/warnings.py +54 -0
- hud/samples/browser.py +4 -4
- hud/server/__init__.py +2 -1
- hud/server/low_level.py +2 -1
- hud/server/router.py +164 -0
- hud/server/server.py +567 -80
- hud/server/tests/test_mcp_server_integration.py +11 -11
- hud/server/tests/test_mcp_server_more.py +1 -1
- hud/server/tests/test_server_extra.py +2 -0
- hud/settings.py +45 -3
- hud/shared/exceptions.py +36 -10
- hud/shared/hints.py +26 -1
- hud/shared/requests.py +15 -3
- hud/shared/tests/test_exceptions.py +40 -31
- hud/shared/tests/test_hints.py +167 -0
- hud/telemetry/__init__.py +20 -19
- hud/telemetry/exporter.py +201 -0
- hud/telemetry/instrument.py +165 -253
- hud/telemetry/tests/test_eval_telemetry.py +356 -0
- hud/telemetry/tests/test_exporter.py +258 -0
- hud/telemetry/tests/test_instrument.py +401 -0
- hud/tools/__init__.py +18 -2
- hud/tools/agent.py +223 -0
- hud/tools/apply_patch.py +639 -0
- hud/tools/base.py +54 -4
- hud/tools/bash.py +2 -2
- hud/tools/computer/__init__.py +36 -3
- hud/tools/computer/anthropic.py +2 -2
- hud/tools/computer/gemini.py +385 -0
- hud/tools/computer/hud.py +23 -6
- hud/tools/computer/openai.py +20 -21
- hud/tools/computer/qwen.py +434 -0
- hud/tools/computer/settings.py +37 -0
- hud/tools/edit.py +3 -7
- hud/tools/executors/base.py +4 -2
- hud/tools/executors/pyautogui.py +1 -1
- hud/tools/grounding/grounded_tool.py +13 -18
- hud/tools/grounding/grounder.py +10 -31
- hud/tools/grounding/tests/test_grounded_tool.py +26 -44
- hud/tools/jupyter.py +330 -0
- hud/tools/playwright.py +18 -3
- hud/tools/shell.py +308 -0
- hud/tools/tests/test_agent_tool.py +355 -0
- hud/tools/tests/test_apply_patch.py +718 -0
- hud/tools/tests/test_computer.py +4 -9
- hud/tools/tests/test_computer_actions.py +24 -2
- hud/tools/tests/test_jupyter_tool.py +181 -0
- hud/tools/tests/test_shell.py +596 -0
- hud/tools/tests/test_submit.py +85 -0
- hud/tools/tests/test_types.py +193 -0
- hud/tools/types.py +21 -1
- hud/types.py +194 -56
- hud/utils/__init__.py +2 -0
- hud/utils/env.py +67 -0
- hud/utils/hud_console.py +89 -18
- hud/utils/mcp.py +15 -58
- hud/utils/strict_schema.py +162 -0
- hud/utils/tests/test_init.py +1 -2
- hud/utils/tests/test_mcp.py +1 -28
- hud/utils/tests/test_pretty_errors.py +186 -0
- hud/utils/tests/test_tool_shorthand.py +154 -0
- hud/utils/tests/test_version.py +1 -1
- hud/utils/types.py +20 -0
- hud/version.py +1 -1
- hud_python-0.5.13.dist-info/METADATA +264 -0
- hud_python-0.5.13.dist-info/RECORD +305 -0
- {hud_python-0.4.45.dist-info → hud_python-0.5.13.dist-info}/WHEEL +1 -1
- hud/agents/langchain.py +0 -261
- hud/agents/lite_llm.py +0 -72
- hud/cli/rl/__init__.py +0 -180
- hud/cli/rl/config.py +0 -101
- hud/cli/rl/display.py +0 -133
- hud/cli/rl/gpu.py +0 -63
- hud/cli/rl/gpu_utils.py +0 -321
- hud/cli/rl/local_runner.py +0 -595
- hud/cli/rl/presets.py +0 -96
- hud/cli/rl/remote_runner.py +0 -463
- hud/cli/rl/rl_api.py +0 -150
- hud/cli/rl/vllm.py +0 -177
- hud/cli/rl/wait_utils.py +0 -89
- hud/datasets/parallel.py +0 -687
- hud/misc/__init__.py +0 -1
- hud/misc/claude_plays_pokemon.py +0 -292
- hud/otel/__init__.py +0 -35
- hud/otel/collector.py +0 -142
- hud/otel/config.py +0 -181
- hud/otel/context.py +0 -570
- hud/otel/exporters.py +0 -369
- hud/otel/instrumentation.py +0 -135
- hud/otel/processors.py +0 -121
- hud/otel/tests/__init__.py +0 -1
- hud/otel/tests/test_processors.py +0 -197
- hud/rl/README.md +0 -30
- hud/rl/__init__.py +0 -1
- hud/rl/actor.py +0 -176
- hud/rl/buffer.py +0 -405
- hud/rl/chat_template.jinja +0 -101
- hud/rl/config.py +0 -192
- hud/rl/distributed.py +0 -132
- hud/rl/learner.py +0 -637
- hud/rl/tests/__init__.py +0 -1
- hud/rl/tests/test_learner.py +0 -186
- hud/rl/train.py +0 -382
- hud/rl/types.py +0 -101
- hud/rl/utils/start_vllm_server.sh +0 -30
- hud/rl/utils.py +0 -524
- hud/rl/vllm_adapter.py +0 -143
- hud/telemetry/job.py +0 -352
- hud/telemetry/replay.py +0 -74
- hud/telemetry/tests/test_replay.py +0 -40
- hud/telemetry/tests/test_trace.py +0 -63
- hud/telemetry/trace.py +0 -158
- hud/utils/agent_factories.py +0 -86
- hud/utils/async_utils.py +0 -65
- hud/utils/group_eval.py +0 -223
- hud/utils/progress.py +0 -149
- hud/utils/tasks.py +0 -127
- hud/utils/tests/test_async_utils.py +0 -173
- hud/utils/tests/test_progress.py +0 -261
- hud_python-0.4.45.dist-info/METADATA +0 -552
- hud_python-0.4.45.dist-info/RECORD +0 -228
- {hud_python-0.4.45.dist-info → hud_python-0.5.13.dist-info}/entry_points.txt +0 -0
- {hud_python-0.4.45.dist-info → hud_python-0.5.13.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,173 +0,0 @@
|
|
|
1
|
-
"""Tests for async utilities."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import asyncio
|
|
6
|
-
import logging
|
|
7
|
-
import threading
|
|
8
|
-
from unittest.mock import patch
|
|
9
|
-
|
|
10
|
-
import pytest
|
|
11
|
-
|
|
12
|
-
from hud.utils.async_utils import fire_and_forget
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class TestFireAndForget:
|
|
16
|
-
"""Test fire_and_forget function."""
|
|
17
|
-
|
|
18
|
-
@pytest.mark.asyncio
|
|
19
|
-
async def test_fire_and_forget_with_running_loop(self, caplog):
|
|
20
|
-
"""Test fire_and_forget when event loop is already running."""
|
|
21
|
-
# Create a simple coroutine that sets a flag
|
|
22
|
-
flag = []
|
|
23
|
-
|
|
24
|
-
async def test_coro():
|
|
25
|
-
flag.append(True)
|
|
26
|
-
|
|
27
|
-
# Call fire_and_forget in async context
|
|
28
|
-
fire_and_forget(test_coro(), description="test task")
|
|
29
|
-
|
|
30
|
-
# Give it a moment to execute
|
|
31
|
-
await asyncio.sleep(0.1)
|
|
32
|
-
|
|
33
|
-
# Check that the coroutine was executed
|
|
34
|
-
assert flag == [True]
|
|
35
|
-
|
|
36
|
-
@pytest.mark.asyncio
|
|
37
|
-
async def test_fire_and_forget_with_exception(self, caplog):
|
|
38
|
-
"""Test fire_and_forget handles exceptions gracefully."""
|
|
39
|
-
|
|
40
|
-
async def failing_coro():
|
|
41
|
-
raise ValueError("Test exception")
|
|
42
|
-
|
|
43
|
-
# This should not raise
|
|
44
|
-
fire_and_forget(failing_coro(), description="failing task")
|
|
45
|
-
|
|
46
|
-
# Give it a moment to execute
|
|
47
|
-
await asyncio.sleep(0.1)
|
|
48
|
-
|
|
49
|
-
# The exception should be handled silently
|
|
50
|
-
|
|
51
|
-
def test_fire_and_forget_no_event_loop(self):
|
|
52
|
-
"""Test fire_and_forget when no event loop is running."""
|
|
53
|
-
# This test runs in sync context
|
|
54
|
-
flag = threading.Event()
|
|
55
|
-
|
|
56
|
-
async def test_coro():
|
|
57
|
-
flag.set()
|
|
58
|
-
|
|
59
|
-
# Call fire_and_forget in sync context
|
|
60
|
-
fire_and_forget(test_coro(), description="sync test")
|
|
61
|
-
|
|
62
|
-
# Wait for the thread to complete
|
|
63
|
-
assert flag.wait(timeout=2.0), "Coroutine did not execute in thread"
|
|
64
|
-
|
|
65
|
-
def test_fire_and_forget_thread_exception(self, caplog):
|
|
66
|
-
"""Test fire_and_forget handles thread exceptions."""
|
|
67
|
-
|
|
68
|
-
async def failing_coro():
|
|
69
|
-
raise ValueError("Thread exception")
|
|
70
|
-
|
|
71
|
-
# Patch the logger to capture the debug call
|
|
72
|
-
from unittest.mock import patch
|
|
73
|
-
|
|
74
|
-
with patch("hud.utils.async_utils.logger") as mock_logger:
|
|
75
|
-
fire_and_forget(failing_coro(), description="thread fail")
|
|
76
|
-
|
|
77
|
-
# Give thread time to execute and log
|
|
78
|
-
import time
|
|
79
|
-
|
|
80
|
-
time.sleep(0.5) # Wait for thread to complete
|
|
81
|
-
|
|
82
|
-
# Check that error was logged with correct format
|
|
83
|
-
mock_logger.debug.assert_called()
|
|
84
|
-
# Get the actual call arguments
|
|
85
|
-
calls = mock_logger.debug.call_args_list
|
|
86
|
-
assert any(
|
|
87
|
-
call[0][0] == "Error in threaded %s: %s"
|
|
88
|
-
and call[0][1] == "thread fail"
|
|
89
|
-
and "Thread exception" in str(call[0][2])
|
|
90
|
-
for call in calls
|
|
91
|
-
), f"Expected log message not found in calls: {calls}"
|
|
92
|
-
|
|
93
|
-
def test_fire_and_forget_interpreter_shutdown(self, caplog):
|
|
94
|
-
"""Test fire_and_forget handles interpreter shutdown gracefully."""
|
|
95
|
-
|
|
96
|
-
async def test_coro():
|
|
97
|
-
pass
|
|
98
|
-
|
|
99
|
-
# Mock the scenario where we get interpreter shutdown error
|
|
100
|
-
with patch("asyncio.get_running_loop") as mock_get_loop:
|
|
101
|
-
mock_get_loop.side_effect = RuntimeError("no running event loop")
|
|
102
|
-
|
|
103
|
-
with patch("threading.Thread") as mock_thread:
|
|
104
|
-
mock_thread.side_effect = RuntimeError(
|
|
105
|
-
"cannot schedule new futures after interpreter shutdown"
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
with caplog.at_level(logging.DEBUG):
|
|
109
|
-
# This should not raise or log
|
|
110
|
-
fire_and_forget(test_coro(), description="shutdown test")
|
|
111
|
-
|
|
112
|
-
# No error should be logged for interpreter shutdown
|
|
113
|
-
assert not any(
|
|
114
|
-
"Could not shutdown test" in record.message for record in caplog.records
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
def test_fire_and_forget_other_thread_error(self, caplog):
|
|
118
|
-
"""Test fire_and_forget logs non-shutdown thread errors."""
|
|
119
|
-
|
|
120
|
-
async def test_coro():
|
|
121
|
-
pass
|
|
122
|
-
|
|
123
|
-
# Mock the scenario where we get a different error
|
|
124
|
-
with patch("asyncio.get_running_loop") as mock_get_loop:
|
|
125
|
-
mock_get_loop.side_effect = RuntimeError("no running event loop")
|
|
126
|
-
|
|
127
|
-
with patch("threading.Thread") as mock_thread:
|
|
128
|
-
mock_thread.side_effect = RuntimeError("Some other error")
|
|
129
|
-
|
|
130
|
-
# Patch the logger to capture the debug call
|
|
131
|
-
with patch("hud.utils.async_utils.logger") as mock_logger:
|
|
132
|
-
fire_and_forget(test_coro(), description="error test")
|
|
133
|
-
|
|
134
|
-
# Check that error was logged with correct format
|
|
135
|
-
mock_logger.debug.assert_called_once_with(
|
|
136
|
-
"Could not %s - no event loop available: %s",
|
|
137
|
-
"error test",
|
|
138
|
-
mock_thread.side_effect,
|
|
139
|
-
)
|
|
140
|
-
|
|
141
|
-
@pytest.mark.asyncio
|
|
142
|
-
async def test_fire_and_forget_cancelled_task(self):
|
|
143
|
-
"""Test fire_and_forget handles cancelled tasks."""
|
|
144
|
-
|
|
145
|
-
cancel_event = asyncio.Event()
|
|
146
|
-
|
|
147
|
-
async def long_running_coro():
|
|
148
|
-
await cancel_event.wait()
|
|
149
|
-
|
|
150
|
-
# Get the current loop
|
|
151
|
-
loop = asyncio.get_running_loop()
|
|
152
|
-
|
|
153
|
-
# Patch create_task to capture the task
|
|
154
|
-
created_task = None
|
|
155
|
-
original_create_task = loop.create_task
|
|
156
|
-
|
|
157
|
-
def mock_create_task(coro):
|
|
158
|
-
nonlocal created_task
|
|
159
|
-
created_task = original_create_task(coro)
|
|
160
|
-
return created_task
|
|
161
|
-
|
|
162
|
-
with patch.object(loop, "create_task", side_effect=mock_create_task):
|
|
163
|
-
fire_and_forget(long_running_coro(), description="cancel test")
|
|
164
|
-
|
|
165
|
-
# Give it a moment to start
|
|
166
|
-
await asyncio.sleep(0.01)
|
|
167
|
-
|
|
168
|
-
# Cancel the task
|
|
169
|
-
assert created_task is not None
|
|
170
|
-
created_task.cancel()
|
|
171
|
-
|
|
172
|
-
# This should not raise any exceptions
|
|
173
|
-
await asyncio.sleep(0.01)
|
hud/utils/tests/test_progress.py
DELETED
|
@@ -1,261 +0,0 @@
|
|
|
1
|
-
"""Tests for the progress tracking utilities."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import pytest
|
|
6
|
-
|
|
7
|
-
from hud.utils.progress import StepProgressTracker
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
@pytest.fixture
|
|
11
|
-
def tracker():
|
|
12
|
-
return StepProgressTracker(total_tasks=2, max_steps_per_task=10)
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def test_invalid_inputs_init():
|
|
16
|
-
with pytest.raises(ValueError, match="total_tasks must be positive"):
|
|
17
|
-
StepProgressTracker(total_tasks=0, max_steps_per_task=10)
|
|
18
|
-
|
|
19
|
-
with pytest.raises(ValueError, match="max_steps_per_task must be positive"):
|
|
20
|
-
StepProgressTracker(total_tasks=5, max_steps_per_task=0)
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
def test_start_task(tracker):
|
|
24
|
-
assert tracker.start_time is None
|
|
25
|
-
assert tracker._tasks_started == 0
|
|
26
|
-
|
|
27
|
-
tracker.start_task("task1")
|
|
28
|
-
|
|
29
|
-
assert tracker.start_time is not None
|
|
30
|
-
assert tracker._tasks_started == 1
|
|
31
|
-
assert tracker._task_steps["task1"] == 0
|
|
32
|
-
assert not tracker._finished_tasks["task1"]
|
|
33
|
-
|
|
34
|
-
tracker.start_task("task2")
|
|
35
|
-
assert tracker._tasks_started == 2
|
|
36
|
-
assert tracker._task_steps["task2"] == 0
|
|
37
|
-
assert not tracker._finished_tasks["task2"]
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
def test_increment_step(tracker):
|
|
41
|
-
tracker.start_task("task1")
|
|
42
|
-
assert tracker.current_total_steps == 0
|
|
43
|
-
|
|
44
|
-
tracker.increment_step("task1")
|
|
45
|
-
assert tracker._task_steps["task1"] == 1
|
|
46
|
-
assert tracker.current_total_steps == 1
|
|
47
|
-
|
|
48
|
-
tracker.increment_step("task1")
|
|
49
|
-
tracker.increment_step("task1")
|
|
50
|
-
assert tracker._task_steps["task1"] == 3
|
|
51
|
-
assert tracker.current_total_steps == 3
|
|
52
|
-
|
|
53
|
-
tracker.start_task("task2")
|
|
54
|
-
tracker.increment_step("task2")
|
|
55
|
-
assert tracker._task_steps["task2"] == 1
|
|
56
|
-
assert tracker.current_total_steps == 4
|
|
57
|
-
|
|
58
|
-
tracker.finish_task("task1")
|
|
59
|
-
initial_steps = tracker.current_total_steps
|
|
60
|
-
tracker.increment_step("task1")
|
|
61
|
-
assert tracker.current_total_steps == initial_steps
|
|
62
|
-
|
|
63
|
-
for _ in range(15):
|
|
64
|
-
tracker.increment_step("task2")
|
|
65
|
-
assert tracker._task_steps["task2"] <= tracker.max_steps_per_task
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def test_finish_task(tracker):
|
|
69
|
-
tracker.start_task("task1")
|
|
70
|
-
tracker.start_task("task2")
|
|
71
|
-
|
|
72
|
-
tracker.increment_step("task1")
|
|
73
|
-
tracker.increment_step("task1")
|
|
74
|
-
initial_steps = tracker._task_steps["task1"]
|
|
75
|
-
|
|
76
|
-
tracker.finish_task("task1")
|
|
77
|
-
|
|
78
|
-
assert tracker._finished_tasks["task1"]
|
|
79
|
-
assert tracker._tasks_finished == 1
|
|
80
|
-
assert tracker._task_steps["task1"] == tracker.max_steps_per_task
|
|
81
|
-
assert tracker.current_total_steps > initial_steps
|
|
82
|
-
|
|
83
|
-
current_steps = tracker.current_total_steps
|
|
84
|
-
tracker.finish_task("task1")
|
|
85
|
-
assert tracker._tasks_finished == 1
|
|
86
|
-
assert tracker.current_total_steps == current_steps
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def test_get_progress(tracker):
|
|
90
|
-
steps, total, percentage = tracker.get_progress()
|
|
91
|
-
assert steps == 0
|
|
92
|
-
assert total == tracker.total_potential_steps
|
|
93
|
-
assert percentage == 0.0
|
|
94
|
-
|
|
95
|
-
tracker.start_task("task1")
|
|
96
|
-
tracker.increment_step("task1")
|
|
97
|
-
steps, total, percentage = tracker.get_progress()
|
|
98
|
-
assert steps == 1
|
|
99
|
-
assert total == tracker.total_potential_steps
|
|
100
|
-
assert percentage == (1 / tracker.total_potential_steps) * 100
|
|
101
|
-
|
|
102
|
-
tracker.finish_task("task1")
|
|
103
|
-
steps, total, percentage = tracker.get_progress()
|
|
104
|
-
assert steps == tracker.max_steps_per_task
|
|
105
|
-
assert total == tracker.total_potential_steps
|
|
106
|
-
assert percentage == (tracker.max_steps_per_task / tracker.total_potential_steps) * 100
|
|
107
|
-
|
|
108
|
-
tracker.start_task("task2")
|
|
109
|
-
tracker.finish_task("task2")
|
|
110
|
-
steps, total, percentage = tracker.get_progress()
|
|
111
|
-
assert steps == tracker.total_potential_steps
|
|
112
|
-
assert percentage == 100.0
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
def test_get_stats_no_progress(tracker):
|
|
116
|
-
from unittest.mock import patch
|
|
117
|
-
|
|
118
|
-
rate, eta = tracker.get_stats()
|
|
119
|
-
assert rate == 0.0
|
|
120
|
-
assert eta is None
|
|
121
|
-
|
|
122
|
-
with patch("time.monotonic", return_value=100.0):
|
|
123
|
-
tracker.start_task("task1")
|
|
124
|
-
|
|
125
|
-
rate, eta = tracker.get_stats()
|
|
126
|
-
assert rate == 0.0
|
|
127
|
-
assert eta is None
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
def test_get_stats_with_progress():
|
|
131
|
-
from unittest.mock import patch
|
|
132
|
-
|
|
133
|
-
with patch("time.monotonic") as mock_time:
|
|
134
|
-
mock_time.return_value = 100.0
|
|
135
|
-
|
|
136
|
-
tracker = StepProgressTracker(total_tasks=1, max_steps_per_task=10)
|
|
137
|
-
tracker.start_task("task1")
|
|
138
|
-
|
|
139
|
-
mock_time.return_value = 160.0
|
|
140
|
-
for _ in range(5):
|
|
141
|
-
tracker.increment_step("task1")
|
|
142
|
-
|
|
143
|
-
rate, eta = tracker.get_stats()
|
|
144
|
-
|
|
145
|
-
assert rate == pytest.approx(5.0)
|
|
146
|
-
assert eta == pytest.approx(60.0)
|
|
147
|
-
|
|
148
|
-
for _ in range(5):
|
|
149
|
-
tracker.increment_step("task1")
|
|
150
|
-
|
|
151
|
-
rate, eta = tracker.get_stats()
|
|
152
|
-
assert rate == pytest.approx(10.0)
|
|
153
|
-
assert eta == pytest.approx(0.0)
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def test_is_finished(tracker):
|
|
157
|
-
assert not tracker.is_finished()
|
|
158
|
-
|
|
159
|
-
tracker.start_task("task1")
|
|
160
|
-
tracker.finish_task("task1")
|
|
161
|
-
assert not tracker.is_finished()
|
|
162
|
-
|
|
163
|
-
tracker.start_task("task2")
|
|
164
|
-
tracker.finish_task("task2")
|
|
165
|
-
assert tracker.is_finished()
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
def test_display(tracker):
|
|
169
|
-
from unittest.mock import patch
|
|
170
|
-
|
|
171
|
-
with patch("time.monotonic") as mock_time:
|
|
172
|
-
mock_time.return_value = 100.0
|
|
173
|
-
tracker.start_task("task1")
|
|
174
|
-
|
|
175
|
-
mock_time.return_value = 130.0
|
|
176
|
-
tracker.increment_step("task1")
|
|
177
|
-
tracker.increment_step("task1")
|
|
178
|
-
|
|
179
|
-
display_str = tracker.display()
|
|
180
|
-
|
|
181
|
-
assert "%" in display_str
|
|
182
|
-
assert "2/20" in display_str
|
|
183
|
-
assert "0:30" in display_str
|
|
184
|
-
assert "steps/min" in display_str
|
|
185
|
-
|
|
186
|
-
tracker.finish_task("task1")
|
|
187
|
-
display_str = tracker.display()
|
|
188
|
-
assert "10/20" in display_str
|
|
189
|
-
|
|
190
|
-
tracker.start_task("task2")
|
|
191
|
-
tracker.finish_task("task2")
|
|
192
|
-
display_str = tracker.display()
|
|
193
|
-
assert "100%" in display_str
|
|
194
|
-
assert "20/20" in display_str
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
def test_complex_workflow():
|
|
198
|
-
tracker = StepProgressTracker(total_tasks=5, max_steps_per_task=20)
|
|
199
|
-
|
|
200
|
-
for i in range(5):
|
|
201
|
-
tracker.start_task(f"task{i}")
|
|
202
|
-
|
|
203
|
-
for _ in range(10):
|
|
204
|
-
tracker.increment_step("task0")
|
|
205
|
-
|
|
206
|
-
for _ in range(5):
|
|
207
|
-
tracker.increment_step("task1")
|
|
208
|
-
|
|
209
|
-
tracker.finish_task("task2")
|
|
210
|
-
|
|
211
|
-
for _ in range(15):
|
|
212
|
-
tracker.increment_step("task3")
|
|
213
|
-
|
|
214
|
-
tracker.finish_task("task3")
|
|
215
|
-
|
|
216
|
-
steps, total, percentage = tracker.get_progress()
|
|
217
|
-
expected_steps = 10 + 5 + 20 + 20 + 0
|
|
218
|
-
assert steps == expected_steps
|
|
219
|
-
assert total == 5 * 20
|
|
220
|
-
assert percentage == (expected_steps / total) * 100
|
|
221
|
-
|
|
222
|
-
assert tracker._tasks_finished == 2
|
|
223
|
-
assert not tracker.is_finished()
|
|
224
|
-
|
|
225
|
-
tracker.finish_task("task0")
|
|
226
|
-
tracker.finish_task("task1")
|
|
227
|
-
tracker.finish_task("task4")
|
|
228
|
-
|
|
229
|
-
assert tracker.is_finished()
|
|
230
|
-
assert tracker.get_progress()[2] == 100.0
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
def test_display_eta_when_finished(tracker):
|
|
234
|
-
from unittest.mock import patch
|
|
235
|
-
|
|
236
|
-
"""Test that ETA shows 0:00 when progress is finished."""
|
|
237
|
-
|
|
238
|
-
with patch("time.monotonic") as mock_time:
|
|
239
|
-
mock_time.return_value = 100.0
|
|
240
|
-
|
|
241
|
-
# Start and complete all tasks
|
|
242
|
-
tracker.start_task("task1")
|
|
243
|
-
for _ in range(10):
|
|
244
|
-
tracker.increment_step("task1")
|
|
245
|
-
tracker.finish_task("task1")
|
|
246
|
-
|
|
247
|
-
tracker.start_task("task2")
|
|
248
|
-
for _ in range(10):
|
|
249
|
-
tracker.increment_step("task2")
|
|
250
|
-
tracker.finish_task("task2")
|
|
251
|
-
|
|
252
|
-
# Some time has passed
|
|
253
|
-
mock_time.return_value = 120.0
|
|
254
|
-
|
|
255
|
-
display = tracker.display()
|
|
256
|
-
|
|
257
|
-
# When finished, ETA should be 0:00 (not ??:??)
|
|
258
|
-
assert tracker.is_finished()
|
|
259
|
-
assert "0:00" in display
|
|
260
|
-
assert "100%" in display
|
|
261
|
-
assert "20/20" in display
|