hud-python 0.4.22__tar.gz → 0.4.23__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hud-python might be problematic. Click here for more details.
- {hud_python-0.4.22 → hud_python-0.4.23}/PKG-INFO +3 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/base.py +37 -39
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/grounded_openai.py +3 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/misc/response_agent.py +3 -2
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/openai.py +2 -2
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/openai_chat_generic.py +3 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/__init__.py +34 -24
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/analyze.py +27 -26
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/build.py +50 -46
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/debug.py +7 -7
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/dev.py +107 -99
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/eval.py +31 -29
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/hf.py +53 -53
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/init.py +28 -28
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/list_func.py +22 -22
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/pull.py +36 -36
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/push.py +76 -74
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/remove.py +42 -40
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/__init__.py +2 -2
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/init.py +41 -41
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/pod.py +97 -91
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/ssh.py +42 -40
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/train.py +75 -73
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/utils.py +10 -10
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_analyze.py +1 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_analyze_metadata.py +2 -2
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_pull.py +45 -45
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_push.py +31 -29
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_registry.py +15 -15
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/environment.py +11 -11
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/interactive.py +17 -17
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/logging.py +12 -12
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/metadata.py +12 -12
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/registry.py +5 -5
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/runner.py +23 -23
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/server.py +16 -16
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/hints.py +7 -7
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/grounding/grounder.py +2 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/types.py +4 -4
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/__init__.py +3 -3
- hud_python-0.4.22/hud/utils/design.py → hud_python-0.4.23/hud/utils/hud_console.py +39 -33
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/pretty_errors.py +6 -6
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/test_version.py +1 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/version.py +1 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/pyproject.toml +2 -1
- {hud_python-0.4.22 → hud_python-0.4.23}/.gitignore +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/LICENSE +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/apps/2048/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/apps/2048/backend/pyproject.toml +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/apps/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/apps/todo/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/apps/todo/backend/pyproject.toml +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/browser/pyproject.toml +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/remote_browser/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/remote_browser/pyproject.toml +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/remote_browser/src/hud_controller/providers/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/text_2048/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/environments/text_2048/pyproject.toml +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/examples/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/__main__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/claude.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/langchain.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/misc/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/tests/test_base.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/tests/test_claude.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/tests/test_client.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/tests/test_grounded_openai_agent.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/agents/tests/test_openai.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/__main__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/clone.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/rl/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_build.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_cli_init.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_cli_main.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_clone.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_cursor.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_debug.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_list_func.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_main_module.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_mcp_server.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/tests/test_utils.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/cursor.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/docker.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/cli/utils/remote_runner.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/base.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/fastmcp.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/mcp_use.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/tests/test_client_integration.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/tests/test_fastmcp.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/tests/test_protocol.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/utils/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/clients/utils/retry_transport.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/datasets/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/datasets/execution/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/datasets/execution/parallel.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/datasets/execution/runner.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/datasets/task.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/datasets/utils.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/misc/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/misc/claude_plays_pokemon.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/native/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/native/comparator.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/native/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/native/tests/test_comparator.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/native/tests/test_native_init.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/collector.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/config.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/context.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/exporters.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/instrumentation.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/processors.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/otel/tests/test_processors.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/py.typed +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/server/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/server/context.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/server/helper/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/server/low_level.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/server/server.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/server/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/settings.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/exceptions.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/requests.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/tests/test_exceptions.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/shared/tests/test_requests.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/instrument.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/job.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/replay.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/tests/test_replay.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/tests/test_trace.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/telemetry/trace.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/base.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/bash.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/computer/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/computer/anthropic.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/computer/hud.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/computer/openai.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/computer/settings.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/edit.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/base.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/pyautogui.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/tests/test_base_executor.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/tests/test_pyautogui_executor.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/executors/xdo.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/grounding/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/grounding/config.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/grounding/grounded_tool.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/grounding/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/grounding/tests/test_grounded_tool.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/playwright.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/response.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/submit.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_base.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_bash.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_bash_extended.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_computer.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_computer_actions.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_edit.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_init.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_playwright_tool.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_response.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_tools.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_tools_init.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/tests/test_utils.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/types.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/tools/utils.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/agent_factories.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/async_utils.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/mcp.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/progress.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/telemetry.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/__init__.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/test_async_utils.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/test_init.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/test_mcp.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/test_progress.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/hud/utils/tests/test_telemetry.py +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/rl/README.md +0 -0
- {hud_python-0.4.22 → hud_python-0.4.23}/rl/pyproject.toml +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: hud-python
|
|
3
|
-
Version: 0.4.
|
|
3
|
+
Version: 0.4.23
|
|
4
4
|
Summary: SDK for the HUD platform.
|
|
5
5
|
Project-URL: Homepage, https://github.com/hud-evals/hud-python
|
|
6
6
|
Project-URL: Bug Tracker, https://github.com/hud-evals/hud-python/issues
|
|
@@ -66,6 +66,7 @@ Requires-Dist: langchain-anthropic; extra == 'agent'
|
|
|
66
66
|
Requires-Dist: langchain-openai; extra == 'agent'
|
|
67
67
|
Requires-Dist: numpy>=1.24.0; extra == 'agent'
|
|
68
68
|
Requires-Dist: openai; extra == 'agent'
|
|
69
|
+
Requires-Dist: pillow>=11.1.0; extra == 'agent'
|
|
69
70
|
Provides-Extra: agents
|
|
70
71
|
Requires-Dist: anthropic; extra == 'agents'
|
|
71
72
|
Requires-Dist: datasets>=2.14.0; extra == 'agents'
|
|
@@ -79,6 +80,7 @@ Requires-Dist: langchain-anthropic; extra == 'agents'
|
|
|
79
80
|
Requires-Dist: langchain-openai; extra == 'agents'
|
|
80
81
|
Requires-Dist: numpy>=1.24.0; extra == 'agents'
|
|
81
82
|
Requires-Dist: openai; extra == 'agents'
|
|
83
|
+
Requires-Dist: pillow>=11.1.0; extra == 'agents'
|
|
82
84
|
Provides-Extra: dev
|
|
83
85
|
Requires-Dist: aiodocker>=0.24.0; extra == 'dev'
|
|
84
86
|
Requires-Dist: anthropic; extra == 'dev'
|
|
@@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Any, ClassVar, Literal
|
|
|
11
11
|
import mcp.types as types
|
|
12
12
|
|
|
13
13
|
from hud.types import AgentResponse, MCPToolCall, MCPToolResult, Trace
|
|
14
|
-
from hud.utils.
|
|
14
|
+
from hud.utils.hud_console import HUDConsole
|
|
15
15
|
from hud.utils.mcp import MCPConfigPatch, patch_mcp_config, setup_hud_telemetry
|
|
16
16
|
|
|
17
17
|
if TYPE_CHECKING:
|
|
@@ -37,7 +37,7 @@ class MCPAgent(ABC):
|
|
|
37
37
|
and automatic marking of lifecycle tools (setup/evaluate) from a `Task`.
|
|
38
38
|
- Messaging: system prompt handling, optional inclusion of setup output on
|
|
39
39
|
the first turn, and control over initial screenshots.
|
|
40
|
-
- Telemetry & UX: standardized logging/printing via `
|
|
40
|
+
- Telemetry & UX: standardized logging/printing via `HUDConsole` and optional
|
|
41
41
|
automatic tracing (`auto_trace`).
|
|
42
42
|
|
|
43
43
|
Subclasses implement provider-specific formatting and response fetching
|
|
@@ -92,13 +92,11 @@ class MCPAgent(ABC):
|
|
|
92
92
|
self._auto_created_client = False # Track if we created the client
|
|
93
93
|
|
|
94
94
|
self.model_name = model_name
|
|
95
|
-
self.
|
|
96
|
-
|
|
97
|
-
self.metadata = {}
|
|
95
|
+
self.console = HUDConsole(logger=logger)
|
|
98
96
|
|
|
99
97
|
# Set verbose mode if requested
|
|
100
98
|
if verbose:
|
|
101
|
-
self.
|
|
99
|
+
self.console.set_verbose(True)
|
|
102
100
|
|
|
103
101
|
# Filtering
|
|
104
102
|
self.allowed_tools = allowed_tools
|
|
@@ -133,7 +131,7 @@ class MCPAgent(ABC):
|
|
|
133
131
|
|
|
134
132
|
self.mcp_client = MCPClient(mcp_config=task.mcp_config)
|
|
135
133
|
self._auto_created_client = True
|
|
136
|
-
self.
|
|
134
|
+
self.console.info_log("Auto-created MCPClient from task.mcp_config")
|
|
137
135
|
|
|
138
136
|
# Ensure we have a client
|
|
139
137
|
if self.mcp_client is None:
|
|
@@ -170,7 +168,7 @@ class MCPAgent(ABC):
|
|
|
170
168
|
await self._filter_tools()
|
|
171
169
|
|
|
172
170
|
num_tools = len(self._available_tools)
|
|
173
|
-
self.
|
|
171
|
+
self.console.success_log(
|
|
174
172
|
f"Agent initialized with {num_tools} available tools (after filtering)"
|
|
175
173
|
)
|
|
176
174
|
|
|
@@ -245,7 +243,7 @@ class MCPAgent(ABC):
|
|
|
245
243
|
|
|
246
244
|
# Execute the setup tool and append the initial observation to the context
|
|
247
245
|
if task.setup_tool is not None:
|
|
248
|
-
self.
|
|
246
|
+
self.console.progress_log(f"Setting up tool phase: {task.setup_tool}")
|
|
249
247
|
results = await self.call_tools(task.setup_tool)
|
|
250
248
|
if any(result.isError for result in results):
|
|
251
249
|
raise RuntimeError(f"{results}")
|
|
@@ -259,7 +257,7 @@ class MCPAgent(ABC):
|
|
|
259
257
|
prompt_result = await self._run_context(start_context, max_steps=max_steps)
|
|
260
258
|
|
|
261
259
|
except Exception as e:
|
|
262
|
-
self.
|
|
260
|
+
self.console.error_log(f"Task execution failed: {e}")
|
|
263
261
|
# Create an error result but don't return yet - we still want to evaluate
|
|
264
262
|
prompt_result = Trace(reward=0.0, done=True, content=str(e), isError=True)
|
|
265
263
|
prompt_result.populate_from_context()
|
|
@@ -267,7 +265,7 @@ class MCPAgent(ABC):
|
|
|
267
265
|
# Always evaluate if we have a prompt result and evaluate tool
|
|
268
266
|
if prompt_result is not None and task.evaluate_tool is not None:
|
|
269
267
|
try:
|
|
270
|
-
self.
|
|
268
|
+
self.console.progress_log(f"Evaluating tool phase: {task.evaluate_tool}")
|
|
271
269
|
results = await self.call_tools(task.evaluate_tool)
|
|
272
270
|
|
|
273
271
|
if any(result.isError for result in results):
|
|
@@ -290,7 +288,7 @@ class MCPAgent(ABC):
|
|
|
290
288
|
prompt_result.content = eval_content
|
|
291
289
|
|
|
292
290
|
except Exception as e:
|
|
293
|
-
self.
|
|
291
|
+
self.console.error_log(f"Evaluation phase failed: {e}")
|
|
294
292
|
# Continue with the prompt result even if evaluation failed
|
|
295
293
|
|
|
296
294
|
return (
|
|
@@ -321,21 +319,21 @@ class MCPAgent(ABC):
|
|
|
321
319
|
|
|
322
320
|
# Add initial context
|
|
323
321
|
messages.extend(await self.format_message(context))
|
|
324
|
-
self.
|
|
322
|
+
self.console.debug(f"Messages: {messages}")
|
|
325
323
|
|
|
326
324
|
step_count = 0
|
|
327
325
|
while max_steps == -1 or step_count < max_steps:
|
|
328
326
|
step_count += 1
|
|
329
327
|
if max_steps == -1:
|
|
330
|
-
self.
|
|
328
|
+
self.console.debug(f"Step {step_count} (unlimited)")
|
|
331
329
|
else:
|
|
332
|
-
self.
|
|
330
|
+
self.console.debug(f"Step {step_count}/{max_steps}")
|
|
333
331
|
|
|
334
332
|
try:
|
|
335
333
|
# 1. Get model response
|
|
336
334
|
response = await self.get_response(messages)
|
|
337
335
|
|
|
338
|
-
self.
|
|
336
|
+
self.console.debug(f"Agent:\n{response}")
|
|
339
337
|
|
|
340
338
|
# Check if we should stop
|
|
341
339
|
if response.done or not response.tool_calls:
|
|
@@ -347,16 +345,16 @@ class MCPAgent(ABC):
|
|
|
347
345
|
response.content
|
|
348
346
|
)
|
|
349
347
|
except Exception as e:
|
|
350
|
-
self.
|
|
348
|
+
self.console.warning_log(f"ResponseAgent failed: {e}")
|
|
351
349
|
if decision == "STOP":
|
|
352
350
|
# Try to submit response through lifecycle tool
|
|
353
351
|
await self._maybe_submit_response(response, messages)
|
|
354
352
|
|
|
355
|
-
self.
|
|
353
|
+
self.console.debug("Stopping execution")
|
|
356
354
|
final_response = response
|
|
357
355
|
break
|
|
358
356
|
else:
|
|
359
|
-
self.
|
|
357
|
+
self.console.debug("Continuing execution")
|
|
360
358
|
messages.extend(await self.format_message(decision))
|
|
361
359
|
continue
|
|
362
360
|
|
|
@@ -378,21 +376,21 @@ class MCPAgent(ABC):
|
|
|
378
376
|
for call, result in zip(tool_calls, tool_results, strict=False):
|
|
379
377
|
step_info += f"\n{call}\n{result}"
|
|
380
378
|
|
|
381
|
-
self.
|
|
379
|
+
self.console.info_log(step_info)
|
|
382
380
|
|
|
383
381
|
except Exception as e:
|
|
384
|
-
self.
|
|
382
|
+
self.console.error_log(f"Step failed: {e}")
|
|
385
383
|
error = str(e)
|
|
386
384
|
break
|
|
387
385
|
|
|
388
386
|
except KeyboardInterrupt:
|
|
389
|
-
self.
|
|
387
|
+
self.console.warning_log("Agent execution interrupted by user")
|
|
390
388
|
error = "Interrupted by user"
|
|
391
389
|
except asyncio.CancelledError:
|
|
392
|
-
self.
|
|
390
|
+
self.console.warning_log("Agent execution cancelled")
|
|
393
391
|
error = "Cancelled"
|
|
394
392
|
except Exception as e:
|
|
395
|
-
self.
|
|
393
|
+
self.console.error_log(f"Unexpected error: {e}")
|
|
396
394
|
error = str(e)
|
|
397
395
|
|
|
398
396
|
# Build result
|
|
@@ -433,17 +431,17 @@ class MCPAgent(ABC):
|
|
|
433
431
|
results: list[MCPToolResult] = []
|
|
434
432
|
for tc in tool_call:
|
|
435
433
|
try:
|
|
436
|
-
self.
|
|
434
|
+
self.console.debug(f"Calling tool: {tc}")
|
|
437
435
|
results.append(await self.mcp_client.call_tool(tc))
|
|
438
436
|
except TimeoutError as e:
|
|
439
|
-
self.
|
|
437
|
+
self.console.error_log(f"Tool execution timed out: {e}")
|
|
440
438
|
try:
|
|
441
439
|
await self.mcp_client.shutdown()
|
|
442
440
|
except Exception as close_err:
|
|
443
|
-
self.
|
|
441
|
+
self.console.debug(f"Failed to close MCP client cleanly: {close_err}")
|
|
444
442
|
raise
|
|
445
443
|
except Exception as e:
|
|
446
|
-
self.
|
|
444
|
+
self.console.error_log(f"Tool execution failed: {e}")
|
|
447
445
|
results.append(_format_error_result(str(e)))
|
|
448
446
|
return results
|
|
449
447
|
|
|
@@ -575,7 +573,7 @@ class MCPAgent(ABC):
|
|
|
575
573
|
|
|
576
574
|
# Add to lifecycle tools if found
|
|
577
575
|
if response_tool_name and response_tool_name not in self.lifecycle_tools:
|
|
578
|
-
self.
|
|
576
|
+
self.console.debug(f"Auto-detected '{response_tool_name}' tool as a lifecycle tool")
|
|
579
577
|
self.response_tool_name = response_tool_name
|
|
580
578
|
self.lifecycle_tools.append(response_tool_name)
|
|
581
579
|
|
|
@@ -599,7 +597,7 @@ class MCPAgent(ABC):
|
|
|
599
597
|
messages: The current message history (will be modified in-place)
|
|
600
598
|
"""
|
|
601
599
|
if self.response_tool_name:
|
|
602
|
-
self.
|
|
600
|
+
self.console.debug(f"Calling response lifecycle tool: {self.response_tool_name}")
|
|
603
601
|
try:
|
|
604
602
|
# Call the response tool with the agent's response
|
|
605
603
|
response_tool_call = MCPToolCall(
|
|
@@ -614,9 +612,9 @@ class MCPAgent(ABC):
|
|
|
614
612
|
messages.extend(response_messages)
|
|
615
613
|
|
|
616
614
|
# Mark the task as done
|
|
617
|
-
self.
|
|
615
|
+
self.console.debug("Response lifecycle tool executed, marking task as done")
|
|
618
616
|
except Exception as e:
|
|
619
|
-
self.
|
|
617
|
+
self.console.error_log(f"Response lifecycle tool failed: {e}")
|
|
620
618
|
|
|
621
619
|
async def _setup_config(self, mcp_config: dict[str, dict[str, Any]]) -> None:
|
|
622
620
|
"""Inject metadata into the metadata of the initialize request."""
|
|
@@ -670,9 +668,9 @@ class MCPAgent(ABC):
|
|
|
670
668
|
if self._auto_trace_cm:
|
|
671
669
|
try:
|
|
672
670
|
self._auto_trace_cm.__exit__(None, None, None)
|
|
673
|
-
self.
|
|
671
|
+
self.console.debug("Closed auto-created trace")
|
|
674
672
|
except Exception as e:
|
|
675
|
-
self.
|
|
673
|
+
self.console.warning_log(f"Failed to close auto-created trace: {e}")
|
|
676
674
|
finally:
|
|
677
675
|
self._auto_trace_cm = None
|
|
678
676
|
|
|
@@ -680,9 +678,9 @@ class MCPAgent(ABC):
|
|
|
680
678
|
if self._auto_created_client and self.mcp_client:
|
|
681
679
|
try:
|
|
682
680
|
await self.mcp_client.shutdown()
|
|
683
|
-
self.
|
|
681
|
+
self.console.debug("Closed auto-created MCPClient")
|
|
684
682
|
except Exception as e:
|
|
685
|
-
self.
|
|
683
|
+
self.console.warning_log(f"Failed to close auto-created client: {e}")
|
|
686
684
|
finally:
|
|
687
685
|
self.mcp_client = None
|
|
688
686
|
self._auto_created_client = False
|
|
@@ -715,13 +713,13 @@ class MCPAgent(ABC):
|
|
|
715
713
|
if self._is_connection_error(e):
|
|
716
714
|
msg = self._get_connection_error_message(e)
|
|
717
715
|
# Always show connection errors, not just when logging is enabled
|
|
718
|
-
self.
|
|
719
|
-
self.
|
|
716
|
+
self.console.error(f"❌ {msg}")
|
|
717
|
+
self.console.info("💡 Make sure the MCP server is started before running the agent.")
|
|
720
718
|
|
|
721
719
|
# For localhost, provide specific instructions
|
|
722
720
|
error_str = str(e).lower()
|
|
723
721
|
if "localhost" in error_str or "127.0.0.1" in error_str:
|
|
724
|
-
self.
|
|
722
|
+
self.console.info(" Run 'hud dev' in another terminal to start the MCP server")
|
|
725
723
|
|
|
726
724
|
raise RuntimeError(msg) from e
|
|
727
725
|
raise
|
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import json
|
|
6
|
-
from typing import Any
|
|
6
|
+
from typing import Any, ClassVar
|
|
7
7
|
|
|
8
8
|
from hud import instrument
|
|
9
9
|
from hud.tools.grounding import GroundedComputerTool, Grounder, GrounderConfig
|
|
@@ -26,6 +26,8 @@ class GroundedOpenAIChatAgent(GenericOpenAIChatAgent):
|
|
|
26
26
|
- Grounding model (Qwen2-VL etc) handles visual element detection
|
|
27
27
|
"""
|
|
28
28
|
|
|
29
|
+
metadata: ClassVar[dict[str, Any]] = {}
|
|
30
|
+
|
|
29
31
|
def __init__(
|
|
30
32
|
self,
|
|
31
33
|
*,
|
|
@@ -16,7 +16,7 @@ class ResponseAgent:
|
|
|
16
16
|
based on the agent's final response message.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
-
def __init__(self, api_key: str | None = None) -> None:
|
|
19
|
+
def __init__(self, api_key: str | None = None, model: str = "gpt-4o") -> None:
|
|
20
20
|
self.api_key = api_key or settings.openai_api_key or os.environ.get("OPENAI_API_KEY")
|
|
21
21
|
if not self.api_key:
|
|
22
22
|
raise ValueError(
|
|
@@ -24,6 +24,7 @@ class ResponseAgent:
|
|
|
24
24
|
)
|
|
25
25
|
|
|
26
26
|
self.client = AsyncOpenAI(api_key=self.api_key)
|
|
27
|
+
self.model = model
|
|
27
28
|
|
|
28
29
|
self.system_prompt = """
|
|
29
30
|
You are an assistant that helps determine the appropriate response to an agent's message.
|
|
@@ -54,7 +55,7 @@ class ResponseAgent:
|
|
|
54
55
|
"""
|
|
55
56
|
try:
|
|
56
57
|
response = await self.client.chat.completions.create(
|
|
57
|
-
model=
|
|
58
|
+
model=self.model,
|
|
58
59
|
messages=[
|
|
59
60
|
{"role": "system", "content": self.system_prompt},
|
|
60
61
|
{
|
|
@@ -204,7 +204,7 @@ class OperatorAgent(MCPAgent):
|
|
|
204
204
|
break
|
|
205
205
|
|
|
206
206
|
if not latest_screenshot:
|
|
207
|
-
self.
|
|
207
|
+
self.console.warning_log("No screenshot provided for response to action")
|
|
208
208
|
return AgentResponse(
|
|
209
209
|
content="No screenshot available for next action",
|
|
210
210
|
tool_calls=[],
|
|
@@ -327,7 +327,7 @@ class OperatorAgent(MCPAgent):
|
|
|
327
327
|
for content in result.content:
|
|
328
328
|
if isinstance(content, types.TextContent):
|
|
329
329
|
# Don't add error text as input_text, just track it
|
|
330
|
-
self.
|
|
330
|
+
self.console.error_log(f"Tool error: {content.text}")
|
|
331
331
|
elif isinstance(content, types.ImageContent):
|
|
332
332
|
# Even error results might have images
|
|
333
333
|
latest_screenshot = content.data
|
|
@@ -17,7 +17,7 @@ from __future__ import annotations
|
|
|
17
17
|
|
|
18
18
|
import json
|
|
19
19
|
import logging
|
|
20
|
-
from typing import TYPE_CHECKING, Any, cast
|
|
20
|
+
from typing import TYPE_CHECKING, Any, ClassVar, cast
|
|
21
21
|
|
|
22
22
|
import mcp.types as types
|
|
23
23
|
|
|
@@ -36,6 +36,8 @@ logger = logging.getLogger(__name__)
|
|
|
36
36
|
class GenericOpenAIChatAgent(MCPAgent):
|
|
37
37
|
"""MCP-enabled agent that speaks the OpenAI *chat.completions* protocol."""
|
|
38
38
|
|
|
39
|
+
metadata: ClassVar[dict[str, Any]] = {}
|
|
40
|
+
|
|
39
41
|
def __init__(
|
|
40
42
|
self,
|
|
41
43
|
*,
|
|
@@ -184,7 +184,7 @@ def debug(
|
|
|
184
184
|
hud debug . --max-phase 3 # Stop after phase 3
|
|
185
185
|
"""
|
|
186
186
|
# Import here to avoid circular imports
|
|
187
|
-
from hud.utils.
|
|
187
|
+
from hud.utils.hud_console import HUDConsole
|
|
188
188
|
|
|
189
189
|
from .utils.environment import (
|
|
190
190
|
build_environment,
|
|
@@ -193,7 +193,7 @@ def debug(
|
|
|
193
193
|
is_environment_directory,
|
|
194
194
|
)
|
|
195
195
|
|
|
196
|
-
|
|
196
|
+
hud_console = HUDConsole()
|
|
197
197
|
|
|
198
198
|
# Determine the command to run
|
|
199
199
|
command = None
|
|
@@ -227,7 +227,7 @@ def debug(
|
|
|
227
227
|
image_name, source = get_image_name(directory)
|
|
228
228
|
|
|
229
229
|
if source == "auto":
|
|
230
|
-
|
|
230
|
+
hud_console.info(f"Auto-generated image name: {image_name}")
|
|
231
231
|
|
|
232
232
|
# Build if requested or if image doesn't exist
|
|
233
233
|
if build or not image_exists(image_name):
|
|
@@ -263,20 +263,20 @@ def debug(
|
|
|
263
263
|
phases_completed = asyncio.run(debug_mcp_stdio(command, logger, max_phase=max_phase))
|
|
264
264
|
|
|
265
265
|
# Show summary using design system
|
|
266
|
-
from hud.utils.
|
|
266
|
+
from hud.utils.hud_console import HUDConsole
|
|
267
267
|
|
|
268
|
-
|
|
268
|
+
hud_console = HUDConsole()
|
|
269
269
|
|
|
270
|
-
|
|
271
|
-
|
|
270
|
+
hud_console.info("") # Empty line
|
|
271
|
+
hud_console.section_title("Debug Summary")
|
|
272
272
|
|
|
273
273
|
if phases_completed == max_phase:
|
|
274
|
-
|
|
274
|
+
hud_console.success(f"All {max_phase} phases completed successfully!")
|
|
275
275
|
if max_phase == 5:
|
|
276
|
-
|
|
276
|
+
hud_console.info("Your MCP server is fully functional and ready for production use.")
|
|
277
277
|
else:
|
|
278
|
-
|
|
279
|
-
|
|
278
|
+
hud_console.warning(f"Completed {phases_completed} out of {max_phase} phases")
|
|
279
|
+
hud_console.info("Check the errors above for troubleshooting.")
|
|
280
280
|
|
|
281
281
|
# Exit with appropriate code
|
|
282
282
|
if phases_completed < max_phase:
|
|
@@ -831,9 +831,9 @@ def eval(
|
|
|
831
831
|
),
|
|
832
832
|
) -> None:
|
|
833
833
|
"""🚀 Run evaluation on datasets or individual tasks with agents."""
|
|
834
|
-
from hud.utils.
|
|
834
|
+
from hud.utils.hud_console import HUDConsole
|
|
835
835
|
|
|
836
|
-
|
|
836
|
+
hud_console = HUDConsole()
|
|
837
837
|
|
|
838
838
|
# If no source provided, look for task/eval JSON files in current directory
|
|
839
839
|
if source is None:
|
|
@@ -863,30 +863,30 @@ def eval(
|
|
|
863
863
|
json_files = sorted(set(json_files))
|
|
864
864
|
|
|
865
865
|
if not json_files:
|
|
866
|
-
|
|
866
|
+
hud_console.error(
|
|
867
867
|
"No source provided and no task/eval JSON files found in current directory"
|
|
868
868
|
)
|
|
869
|
-
|
|
869
|
+
hud_console.info(
|
|
870
870
|
"Usage: hud eval <source> or create a task JSON file "
|
|
871
871
|
"(e.g., task.json, eval_config.json)"
|
|
872
872
|
)
|
|
873
873
|
raise typer.Exit(1)
|
|
874
874
|
elif len(json_files) == 1:
|
|
875
875
|
source = str(json_files[0])
|
|
876
|
-
|
|
876
|
+
hud_console.info(f"Found task file: {source}")
|
|
877
877
|
else:
|
|
878
878
|
# Multiple files found, let user choose
|
|
879
|
-
|
|
880
|
-
file_choice =
|
|
879
|
+
hud_console.info("Multiple task files found:")
|
|
880
|
+
file_choice = hud_console.select(
|
|
881
881
|
"Select a task file to run:",
|
|
882
882
|
choices=[str(f) for f in json_files],
|
|
883
883
|
)
|
|
884
884
|
source = file_choice
|
|
885
|
-
|
|
885
|
+
hud_console.success(f"Selected: {source}")
|
|
886
886
|
|
|
887
887
|
# If no agent specified, prompt for selection
|
|
888
888
|
if agent is None:
|
|
889
|
-
agent =
|
|
889
|
+
agent = hud_console.select(
|
|
890
890
|
"Select an agent to use:",
|
|
891
891
|
choices=[
|
|
892
892
|
{"name": "Claude 4 Sonnet", "value": "claude"},
|
|
@@ -898,14 +898,14 @@ def eval(
|
|
|
898
898
|
# Validate agent choice
|
|
899
899
|
valid_agents = ["claude", "openai"]
|
|
900
900
|
if agent not in valid_agents:
|
|
901
|
-
|
|
901
|
+
hud_console.error(f"Invalid agent: {agent}. Must be one of: {', '.join(valid_agents)}")
|
|
902
902
|
raise typer.Exit(1)
|
|
903
903
|
|
|
904
904
|
# Import eval_command lazily to avoid importing agent dependencies
|
|
905
905
|
try:
|
|
906
906
|
from .eval import eval_command
|
|
907
907
|
except ImportError as e:
|
|
908
|
-
|
|
908
|
+
hud_console.error(
|
|
909
909
|
"Evaluation dependencies are not installed. "
|
|
910
910
|
"Please install with: pip install 'hud-python[agent]'"
|
|
911
911
|
)
|
|
@@ -962,6 +962,16 @@ def hf(
|
|
|
962
962
|
|
|
963
963
|
def main() -> None:
|
|
964
964
|
"""Main entry point for the CLI."""
|
|
965
|
+
# Handle --version flag before Typer parses args
|
|
966
|
+
if "--version" in sys.argv:
|
|
967
|
+
try:
|
|
968
|
+
from hud import __version__
|
|
969
|
+
|
|
970
|
+
console.print(f"HUD CLI version: [cyan]{__version__}[/cyan]")
|
|
971
|
+
except ImportError:
|
|
972
|
+
console.print("HUD CLI version: [cyan]unknown[/cyan]")
|
|
973
|
+
return
|
|
974
|
+
|
|
965
975
|
try:
|
|
966
976
|
# Show header for main help
|
|
967
977
|
if len(sys.argv) == 1 or (len(sys.argv) == 2 and sys.argv[1] in ["--help", "-h"]):
|
|
@@ -995,9 +1005,9 @@ def main() -> None:
|
|
|
995
1005
|
except Exception:
|
|
996
1006
|
exit_code = 1
|
|
997
1007
|
if exit_code != 0:
|
|
998
|
-
from hud.utils.
|
|
1008
|
+
from hud.utils.hud_console import hud_console
|
|
999
1009
|
|
|
1000
|
-
|
|
1010
|
+
hud_console.info(SUPPORT_HINT)
|
|
1001
1011
|
raise
|
|
1002
1012
|
except Exception:
|
|
1003
1013
|
raise
|
|
@@ -13,10 +13,10 @@ from rich.table import Table
|
|
|
13
13
|
from rich.tree import Tree
|
|
14
14
|
|
|
15
15
|
from hud.clients import MCPClient
|
|
16
|
-
from hud.utils.
|
|
16
|
+
from hud.utils.hud_console import HUDConsole
|
|
17
17
|
|
|
18
18
|
console = Console()
|
|
19
|
-
|
|
19
|
+
hud_console = HUDConsole()
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
def parse_docker_command(docker_cmd: list[str]) -> dict:
|
|
@@ -28,14 +28,14 @@ def parse_docker_command(docker_cmd: list[str]) -> dict:
|
|
|
28
28
|
|
|
29
29
|
async def analyze_environment(docker_cmd: list[str], output_format: str, verbose: bool) -> None:
|
|
30
30
|
"""Analyze MCP environment and display results."""
|
|
31
|
-
|
|
31
|
+
hud_console.header("MCP Environment Analysis", icon="🔍")
|
|
32
32
|
|
|
33
33
|
# Convert Docker command to MCP config
|
|
34
34
|
mcp_config = parse_docker_command(docker_cmd)
|
|
35
35
|
|
|
36
36
|
# Display command being analyzed
|
|
37
|
-
|
|
38
|
-
|
|
37
|
+
hud_console.dim_info("Command:", " ".join(docker_cmd))
|
|
38
|
+
hud_console.info("") # Empty line
|
|
39
39
|
|
|
40
40
|
# Create client
|
|
41
41
|
with Progress(
|
|
@@ -85,9 +85,9 @@ async def analyze_environment(docker_cmd: list[str], output_format: str, verbose
|
|
|
85
85
|
def display_interactive(analysis: dict) -> None:
|
|
86
86
|
"""Display analysis results in interactive format."""
|
|
87
87
|
# Server metadata
|
|
88
|
-
|
|
88
|
+
hud_console.section_title("📊 Environment Overview")
|
|
89
89
|
meta_table = Table(show_header=False, box=None)
|
|
90
|
-
meta_table.add_column("Property", style="
|
|
90
|
+
meta_table.add_column("Property", style="bright_black")
|
|
91
91
|
meta_table.add_column("Value")
|
|
92
92
|
|
|
93
93
|
# Check if this is a live analysis (has metadata) or metadata-only analysis
|
|
@@ -126,19 +126,19 @@ def display_interactive(analysis: dict) -> None:
|
|
|
126
126
|
console.print(meta_table)
|
|
127
127
|
|
|
128
128
|
# Tools
|
|
129
|
-
|
|
130
|
-
tools_tree = Tree("Tools")
|
|
129
|
+
hud_console.section_title("🔧 Available Tools")
|
|
130
|
+
tools_tree = Tree("[bold bright_white]Tools[/bold bright_white]")
|
|
131
131
|
|
|
132
132
|
# Check if we have hub_tools info (live analysis) or not (metadata-only)
|
|
133
133
|
if "hub_tools" in analysis:
|
|
134
134
|
# Live analysis format - separate regular and hub tools
|
|
135
135
|
# Regular tools
|
|
136
|
-
regular_tools = tools_tree.add("Regular Tools")
|
|
136
|
+
regular_tools = tools_tree.add("[bright_white]Regular Tools[/bright_white]")
|
|
137
137
|
for tool in analysis["tools"]:
|
|
138
138
|
if tool["name"] not in analysis["hub_tools"]:
|
|
139
|
-
tool_node = regular_tools.add(f"[
|
|
139
|
+
tool_node = regular_tools.add(f"[bright_white]{tool['name']}[/bright_white]")
|
|
140
140
|
if tool["description"]:
|
|
141
|
-
tool_node.add(f"[
|
|
141
|
+
tool_node.add(f"[bright_black]{tool['description']}[/bright_black]")
|
|
142
142
|
|
|
143
143
|
# Show input schema if verbose
|
|
144
144
|
if analysis.get("verbose") and tool.get("input_schema"):
|
|
@@ -148,17 +148,17 @@ def display_interactive(analysis: dict) -> None:
|
|
|
148
148
|
|
|
149
149
|
# Hub tools
|
|
150
150
|
if analysis["hub_tools"]:
|
|
151
|
-
hub_tools = tools_tree.add("Hub Tools")
|
|
151
|
+
hub_tools = tools_tree.add("[bright_white]Hub Tools[/bright_white]")
|
|
152
152
|
for hub_name, functions in analysis["hub_tools"].items():
|
|
153
|
-
hub_node = hub_tools.add(f"[
|
|
153
|
+
hub_node = hub_tools.add(f"[rgb(181,137,0)]{hub_name}[/rgb(181,137,0)]")
|
|
154
154
|
for func in functions:
|
|
155
|
-
hub_node.add(f"[
|
|
155
|
+
hub_node.add(f"[bright_white]{func}[/bright_white]")
|
|
156
156
|
else:
|
|
157
157
|
# Metadata-only format - just list all tools
|
|
158
158
|
for tool in analysis["tools"]:
|
|
159
|
-
tool_node = tools_tree.add(f"[
|
|
159
|
+
tool_node = tools_tree.add(f"[bright_white]{tool['name']}[/bright_white]")
|
|
160
160
|
if tool.get("description"):
|
|
161
|
-
tool_node.add(f"[
|
|
161
|
+
tool_node.add(f"[bright_black]{tool['description']}[/bright_black]")
|
|
162
162
|
|
|
163
163
|
# Show input schema if verbose
|
|
164
164
|
if tool.get("inputSchema"):
|
|
@@ -170,11 +170,11 @@ def display_interactive(analysis: dict) -> None:
|
|
|
170
170
|
|
|
171
171
|
# Resources
|
|
172
172
|
if analysis["resources"]:
|
|
173
|
-
|
|
173
|
+
hud_console.section_title("📚 Available Resources")
|
|
174
174
|
resources_table = Table()
|
|
175
|
-
resources_table.add_column("URI", style="
|
|
176
|
-
resources_table.add_column("Name", style="
|
|
177
|
-
resources_table.add_column("Type", style="
|
|
175
|
+
resources_table.add_column("URI", style="bright_white")
|
|
176
|
+
resources_table.add_column("Name", style="bright_white")
|
|
177
|
+
resources_table.add_column("Type", style="bright_black")
|
|
178
178
|
|
|
179
179
|
for resource in analysis["resources"][:10]:
|
|
180
180
|
resources_table.add_row(
|
|
@@ -184,11 +184,12 @@ def display_interactive(analysis: dict) -> None:
|
|
|
184
184
|
console.print(resources_table)
|
|
185
185
|
|
|
186
186
|
if len(analysis["resources"]) > 10:
|
|
187
|
-
|
|
187
|
+
remaining = len(analysis["resources"]) - 10
|
|
188
|
+
console.print(f"[bright_black]... and {remaining} more resources[/bright_black]")
|
|
188
189
|
|
|
189
190
|
# Telemetry (only for live analysis)
|
|
190
191
|
if analysis.get("telemetry"):
|
|
191
|
-
|
|
192
|
+
hud_console.section_title("📡 Telemetry Data")
|
|
192
193
|
telemetry_table = Table(show_header=False, box=None)
|
|
193
194
|
telemetry_table.add_column("Key", style="dim")
|
|
194
195
|
telemetry_table.add_column("Value")
|
|
@@ -206,7 +207,7 @@ def display_interactive(analysis: dict) -> None:
|
|
|
206
207
|
|
|
207
208
|
# Environment variables (for metadata-only analysis)
|
|
208
209
|
if analysis.get("env_vars"):
|
|
209
|
-
|
|
210
|
+
hud_console.section_title("🔑 Environment Variables")
|
|
210
211
|
env_table = Table(show_header=False, box=None)
|
|
211
212
|
env_table.add_column("Type", style="dim")
|
|
212
213
|
env_table.add_column("Variables")
|
|
@@ -309,7 +310,7 @@ async def analyze_environment_from_config(
|
|
|
309
310
|
config_path: Path, output_format: str, verbose: bool
|
|
310
311
|
) -> None:
|
|
311
312
|
"""Analyze MCP environment from a JSON config file."""
|
|
312
|
-
|
|
313
|
+
hud_console.header("MCP Environment Analysis", icon="🔍")
|
|
313
314
|
|
|
314
315
|
# Load config from file
|
|
315
316
|
try:
|
|
@@ -327,7 +328,7 @@ async def analyze_environment_from_mcp_config(
|
|
|
327
328
|
mcp_config: dict[str, Any], output_format: str, verbose: bool
|
|
328
329
|
) -> None:
|
|
329
330
|
"""Analyze MCP environment from MCP config dict."""
|
|
330
|
-
|
|
331
|
+
hud_console.header("MCP Environment Analysis", icon="🔍")
|
|
331
332
|
await _analyze_with_config(mcp_config, output_format, verbose)
|
|
332
333
|
|
|
333
334
|
|