hud-python 0.4.22__tar.gz → 0.4.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hud-python might be problematic. Click here for more details.

Files changed (201) hide show
  1. {hud_python-0.4.22 → hud_python-0.4.24}/PKG-INFO +3 -1
  2. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/base.py +85 -59
  3. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/claude.py +5 -1
  4. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/grounded_openai.py +3 -1
  5. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/misc/response_agent.py +3 -2
  6. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/openai.py +2 -2
  7. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/openai_chat_generic.py +3 -1
  8. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/__init__.py +34 -24
  9. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/analyze.py +27 -26
  10. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/build.py +50 -46
  11. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/debug.py +7 -7
  12. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/dev.py +107 -99
  13. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/eval.py +31 -29
  14. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/hf.py +53 -53
  15. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/init.py +28 -28
  16. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/list_func.py +22 -22
  17. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/pull.py +36 -36
  18. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/push.py +76 -74
  19. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/remove.py +42 -40
  20. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/__init__.py +2 -2
  21. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/init.py +41 -41
  22. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/pod.py +97 -91
  23. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/ssh.py +42 -40
  24. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/train.py +75 -73
  25. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/utils.py +10 -10
  26. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_analyze.py +1 -1
  27. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_analyze_metadata.py +2 -2
  28. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_pull.py +45 -45
  29. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_push.py +31 -29
  30. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_registry.py +15 -15
  31. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/environment.py +11 -11
  32. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/interactive.py +17 -17
  33. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/logging.py +12 -12
  34. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/metadata.py +12 -12
  35. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/registry.py +5 -5
  36. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/runner.py +23 -23
  37. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/server.py +16 -16
  38. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/mcp_use.py +19 -5
  39. hud_python-0.4.24/hud/clients/utils/__init__.py +26 -0
  40. hud_python-0.4.24/hud/clients/utils/retry.py +186 -0
  41. {hud_python-0.4.22 → hud_python-0.4.24}/hud/datasets/execution/parallel.py +71 -46
  42. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/hints.py +7 -7
  43. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/grounding/grounder.py +2 -1
  44. {hud_python-0.4.22 → hud_python-0.4.24}/hud/types.py +4 -4
  45. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/__init__.py +3 -3
  46. hud_python-0.4.22/hud/utils/design.py → hud_python-0.4.24/hud/utils/hud_console.py +39 -33
  47. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/pretty_errors.py +6 -6
  48. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/test_version.py +1 -1
  49. {hud_python-0.4.22 → hud_python-0.4.24}/hud/version.py +1 -1
  50. {hud_python-0.4.22 → hud_python-0.4.24}/pyproject.toml +2 -1
  51. hud_python-0.4.22/hud/clients/utils/__init__.py +0 -1
  52. {hud_python-0.4.22 → hud_python-0.4.24}/.gitignore +0 -0
  53. {hud_python-0.4.22 → hud_python-0.4.24}/LICENSE +0 -0
  54. {hud_python-0.4.22 → hud_python-0.4.24}/README.md +0 -0
  55. {hud_python-0.4.22 → hud_python-0.4.24}/environments/README.md +0 -0
  56. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/README.md +0 -0
  57. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/apps/2048/README.md +0 -0
  58. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/apps/2048/backend/pyproject.toml +0 -0
  59. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/apps/README.md +0 -0
  60. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/apps/todo/README.md +0 -0
  61. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/apps/todo/backend/pyproject.toml +0 -0
  62. {hud_python-0.4.22 → hud_python-0.4.24}/environments/browser/pyproject.toml +0 -0
  63. {hud_python-0.4.22 → hud_python-0.4.24}/environments/remote_browser/README.md +0 -0
  64. {hud_python-0.4.22 → hud_python-0.4.24}/environments/remote_browser/pyproject.toml +0 -0
  65. {hud_python-0.4.22 → hud_python-0.4.24}/environments/remote_browser/src/hud_controller/providers/README.md +0 -0
  66. {hud_python-0.4.22 → hud_python-0.4.24}/environments/text_2048/README.md +0 -0
  67. {hud_python-0.4.22 → hud_python-0.4.24}/environments/text_2048/pyproject.toml +0 -0
  68. {hud_python-0.4.22 → hud_python-0.4.24}/examples/README.md +0 -0
  69. {hud_python-0.4.22 → hud_python-0.4.24}/hud/__init__.py +0 -0
  70. {hud_python-0.4.22 → hud_python-0.4.24}/hud/__main__.py +0 -0
  71. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/__init__.py +0 -0
  72. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/langchain.py +0 -0
  73. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/misc/__init__.py +0 -0
  74. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/tests/__init__.py +0 -0
  75. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/tests/test_base.py +0 -0
  76. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/tests/test_claude.py +0 -0
  77. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/tests/test_client.py +0 -0
  78. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/tests/test_grounded_openai_agent.py +0 -0
  79. {hud_python-0.4.22 → hud_python-0.4.24}/hud/agents/tests/test_openai.py +0 -0
  80. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/__main__.py +0 -0
  81. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/clone.py +0 -0
  82. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/rl/README.md +0 -0
  83. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/__init__.py +0 -0
  84. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_build.py +0 -0
  85. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_cli_init.py +0 -0
  86. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_cli_main.py +0 -0
  87. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_clone.py +0 -0
  88. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_cursor.py +0 -0
  89. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_debug.py +0 -0
  90. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_list_func.py +0 -0
  91. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_main_module.py +0 -0
  92. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_mcp_server.py +0 -0
  93. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/tests/test_utils.py +0 -0
  94. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/__init__.py +0 -0
  95. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/cursor.py +0 -0
  96. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/docker.py +0 -0
  97. {hud_python-0.4.22 → hud_python-0.4.24}/hud/cli/utils/remote_runner.py +0 -0
  98. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/README.md +0 -0
  99. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/__init__.py +0 -0
  100. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/base.py +0 -0
  101. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/fastmcp.py +0 -0
  102. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/tests/__init__.py +0 -0
  103. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/tests/test_client_integration.py +0 -0
  104. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/tests/test_fastmcp.py +0 -0
  105. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/tests/test_protocol.py +0 -0
  106. {hud_python-0.4.22 → hud_python-0.4.24}/hud/clients/utils/retry_transport.py +0 -0
  107. {hud_python-0.4.22 → hud_python-0.4.24}/hud/datasets/__init__.py +0 -0
  108. {hud_python-0.4.22 → hud_python-0.4.24}/hud/datasets/execution/__init__.py +0 -0
  109. {hud_python-0.4.22 → hud_python-0.4.24}/hud/datasets/execution/runner.py +0 -0
  110. {hud_python-0.4.22 → hud_python-0.4.24}/hud/datasets/task.py +0 -0
  111. {hud_python-0.4.22 → hud_python-0.4.24}/hud/datasets/utils.py +0 -0
  112. {hud_python-0.4.22 → hud_python-0.4.24}/hud/misc/__init__.py +0 -0
  113. {hud_python-0.4.22 → hud_python-0.4.24}/hud/misc/claude_plays_pokemon.py +0 -0
  114. {hud_python-0.4.22 → hud_python-0.4.24}/hud/native/__init__.py +0 -0
  115. {hud_python-0.4.22 → hud_python-0.4.24}/hud/native/comparator.py +0 -0
  116. {hud_python-0.4.22 → hud_python-0.4.24}/hud/native/tests/__init__.py +0 -0
  117. {hud_python-0.4.22 → hud_python-0.4.24}/hud/native/tests/test_comparator.py +0 -0
  118. {hud_python-0.4.22 → hud_python-0.4.24}/hud/native/tests/test_native_init.py +0 -0
  119. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/__init__.py +0 -0
  120. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/collector.py +0 -0
  121. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/config.py +0 -0
  122. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/context.py +0 -0
  123. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/exporters.py +0 -0
  124. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/instrumentation.py +0 -0
  125. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/processors.py +0 -0
  126. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/tests/__init__.py +0 -0
  127. {hud_python-0.4.22 → hud_python-0.4.24}/hud/otel/tests/test_processors.py +0 -0
  128. {hud_python-0.4.22 → hud_python-0.4.24}/hud/py.typed +0 -0
  129. {hud_python-0.4.22 → hud_python-0.4.24}/hud/server/__init__.py +0 -0
  130. {hud_python-0.4.22 → hud_python-0.4.24}/hud/server/context.py +0 -0
  131. {hud_python-0.4.22 → hud_python-0.4.24}/hud/server/helper/__init__.py +0 -0
  132. {hud_python-0.4.22 → hud_python-0.4.24}/hud/server/low_level.py +0 -0
  133. {hud_python-0.4.22 → hud_python-0.4.24}/hud/server/server.py +0 -0
  134. {hud_python-0.4.22 → hud_python-0.4.24}/hud/server/tests/__init__.py +0 -0
  135. {hud_python-0.4.22 → hud_python-0.4.24}/hud/settings.py +0 -0
  136. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/__init__.py +0 -0
  137. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/exceptions.py +0 -0
  138. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/requests.py +0 -0
  139. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/tests/__init__.py +0 -0
  140. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/tests/test_exceptions.py +0 -0
  141. {hud_python-0.4.22 → hud_python-0.4.24}/hud/shared/tests/test_requests.py +0 -0
  142. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/__init__.py +0 -0
  143. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/instrument.py +0 -0
  144. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/job.py +0 -0
  145. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/replay.py +0 -0
  146. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/tests/__init__.py +0 -0
  147. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/tests/test_replay.py +0 -0
  148. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/tests/test_trace.py +0 -0
  149. {hud_python-0.4.22 → hud_python-0.4.24}/hud/telemetry/trace.py +0 -0
  150. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/__init__.py +0 -0
  151. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/base.py +0 -0
  152. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/bash.py +0 -0
  153. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/computer/__init__.py +0 -0
  154. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/computer/anthropic.py +0 -0
  155. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/computer/hud.py +0 -0
  156. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/computer/openai.py +0 -0
  157. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/computer/settings.py +0 -0
  158. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/edit.py +0 -0
  159. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/__init__.py +0 -0
  160. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/base.py +0 -0
  161. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/pyautogui.py +0 -0
  162. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/tests/__init__.py +0 -0
  163. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/tests/test_base_executor.py +0 -0
  164. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/tests/test_pyautogui_executor.py +0 -0
  165. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/executors/xdo.py +0 -0
  166. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/grounding/__init__.py +0 -0
  167. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/grounding/config.py +0 -0
  168. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/grounding/grounded_tool.py +0 -0
  169. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/grounding/tests/__init__.py +0 -0
  170. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/grounding/tests/test_grounded_tool.py +0 -0
  171. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/playwright.py +0 -0
  172. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/response.py +0 -0
  173. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/submit.py +0 -0
  174. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/__init__.py +0 -0
  175. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_base.py +0 -0
  176. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_bash.py +0 -0
  177. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_bash_extended.py +0 -0
  178. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_computer.py +0 -0
  179. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_computer_actions.py +0 -0
  180. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_edit.py +0 -0
  181. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_init.py +0 -0
  182. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_playwright_tool.py +0 -0
  183. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_response.py +0 -0
  184. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_tools.py +0 -0
  185. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_tools_init.py +0 -0
  186. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/tests/test_utils.py +0 -0
  187. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/types.py +0 -0
  188. {hud_python-0.4.22 → hud_python-0.4.24}/hud/tools/utils.py +0 -0
  189. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/agent_factories.py +0 -0
  190. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/async_utils.py +0 -0
  191. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/mcp.py +0 -0
  192. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/progress.py +0 -0
  193. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/telemetry.py +0 -0
  194. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/__init__.py +0 -0
  195. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/test_async_utils.py +0 -0
  196. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/test_init.py +0 -0
  197. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/test_mcp.py +0 -0
  198. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/test_progress.py +0 -0
  199. {hud_python-0.4.22 → hud_python-0.4.24}/hud/utils/tests/test_telemetry.py +0 -0
  200. {hud_python-0.4.22 → hud_python-0.4.24}/rl/README.md +0 -0
  201. {hud_python-0.4.22 → hud_python-0.4.24}/rl/pyproject.toml +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hud-python
3
- Version: 0.4.22
3
+ Version: 0.4.24
4
4
  Summary: SDK for the HUD platform.
5
5
  Project-URL: Homepage, https://github.com/hud-evals/hud-python
6
6
  Project-URL: Bug Tracker, https://github.com/hud-evals/hud-python/issues
@@ -66,6 +66,7 @@ Requires-Dist: langchain-anthropic; extra == 'agent'
66
66
  Requires-Dist: langchain-openai; extra == 'agent'
67
67
  Requires-Dist: numpy>=1.24.0; extra == 'agent'
68
68
  Requires-Dist: openai; extra == 'agent'
69
+ Requires-Dist: pillow>=11.1.0; extra == 'agent'
69
70
  Provides-Extra: agents
70
71
  Requires-Dist: anthropic; extra == 'agents'
71
72
  Requires-Dist: datasets>=2.14.0; extra == 'agents'
@@ -79,6 +80,7 @@ Requires-Dist: langchain-anthropic; extra == 'agents'
79
80
  Requires-Dist: langchain-openai; extra == 'agents'
80
81
  Requires-Dist: numpy>=1.24.0; extra == 'agents'
81
82
  Requires-Dist: openai; extra == 'agents'
83
+ Requires-Dist: pillow>=11.1.0; extra == 'agents'
82
84
  Provides-Extra: dev
83
85
  Requires-Dist: aiodocker>=0.24.0; extra == 'dev'
84
86
  Requires-Dist: anthropic; extra == 'dev'
@@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Any, ClassVar, Literal
11
11
  import mcp.types as types
12
12
 
13
13
  from hud.types import AgentResponse, MCPToolCall, MCPToolResult, Trace
14
- from hud.utils.design import HUDDesign
14
+ from hud.utils.hud_console import HUDConsole
15
15
  from hud.utils.mcp import MCPConfigPatch, patch_mcp_config, setup_hud_telemetry
16
16
 
17
17
  if TYPE_CHECKING:
@@ -37,7 +37,7 @@ class MCPAgent(ABC):
37
37
  and automatic marking of lifecycle tools (setup/evaluate) from a `Task`.
38
38
  - Messaging: system prompt handling, optional inclusion of setup output on
39
39
  the first turn, and control over initial screenshots.
40
- - Telemetry & UX: standardized logging/printing via `HUDDesign` and optional
40
+ - Telemetry & UX: standardized logging/printing via `HUDConsole` and optional
41
41
  automatic tracing (`auto_trace`).
42
42
 
43
43
  Subclasses implement provider-specific formatting and response fetching
@@ -92,13 +92,11 @@ class MCPAgent(ABC):
92
92
  self._auto_created_client = False # Track if we created the client
93
93
 
94
94
  self.model_name = model_name
95
- self.design = HUDDesign(logger=logger)
96
-
97
- self.metadata = {}
95
+ self.console = HUDConsole(logger=logger)
98
96
 
99
97
  # Set verbose mode if requested
100
98
  if verbose:
101
- self.design.set_verbose(True)
99
+ self.console.set_verbose(True)
102
100
 
103
101
  # Filtering
104
102
  self.allowed_tools = allowed_tools
@@ -133,7 +131,7 @@ class MCPAgent(ABC):
133
131
 
134
132
  self.mcp_client = MCPClient(mcp_config=task.mcp_config)
135
133
  self._auto_created_client = True
136
- self.design.info_log("Auto-created MCPClient from task.mcp_config")
134
+ self.console.info_log("Auto-created MCPClient from task.mcp_config")
137
135
 
138
136
  # Ensure we have a client
139
137
  if self.mcp_client is None:
@@ -170,7 +168,7 @@ class MCPAgent(ABC):
170
168
  await self._filter_tools()
171
169
 
172
170
  num_tools = len(self._available_tools)
173
- self.design.success_log(
171
+ self.console.success_log(
174
172
  f"Agent initialized with {num_tools} available tools (after filtering)"
175
173
  )
176
174
 
@@ -209,6 +207,7 @@ class MCPAgent(ABC):
209
207
  else:
210
208
  raise TypeError(f"prompt_or_task must be str or Task, got {type(prompt_or_task)}")
211
209
  except Exception as e:
210
+ # Always return a Trace object for any exception
212
211
  if self._is_connection_error(e):
213
212
  # Return error trace for connection failures
214
213
  return Trace(
@@ -217,7 +216,15 @@ class MCPAgent(ABC):
217
216
  content=self._get_connection_error_message(e),
218
217
  isError=True,
219
218
  )
220
- raise
219
+ else:
220
+ # Return error trace for any other exception
221
+ return Trace(
222
+ reward=0.0,
223
+ done=True,
224
+ content=f"Task failed with error: {e}",
225
+ isError=True,
226
+ info={"error": str(e)},
227
+ )
221
228
  finally:
222
229
  # Cleanup auto-created resources
223
230
  await self._cleanup()
@@ -245,7 +252,7 @@ class MCPAgent(ABC):
245
252
 
246
253
  # Execute the setup tool and append the initial observation to the context
247
254
  if task.setup_tool is not None:
248
- self.design.progress_log(f"Setting up tool phase: {task.setup_tool}")
255
+ self.console.progress_log(f"Setting up tool phase: {task.setup_tool}")
249
256
  results = await self.call_tools(task.setup_tool)
250
257
  if any(result.isError for result in results):
251
258
  raise RuntimeError(f"{results}")
@@ -259,39 +266,58 @@ class MCPAgent(ABC):
259
266
  prompt_result = await self._run_context(start_context, max_steps=max_steps)
260
267
 
261
268
  except Exception as e:
262
- self.design.error_log(f"Task execution failed: {e}")
269
+ self.console.error_log(f"Task execution failed: {e}")
263
270
  # Create an error result but don't return yet - we still want to evaluate
264
271
  prompt_result = Trace(reward=0.0, done=True, content=str(e), isError=True)
265
272
  prompt_result.populate_from_context()
266
273
 
267
- # Always evaluate if we have a prompt result and evaluate tool
268
- if prompt_result is not None and task.evaluate_tool is not None:
274
+ # Always evaluate if we have evaluate tool, regardless of errors
275
+ if task.evaluate_tool is not None:
269
276
  try:
270
- self.design.progress_log(f"Evaluating tool phase: {task.evaluate_tool}")
277
+ self.console.progress_log(f"Evaluating tool phase: {task.evaluate_tool}")
271
278
  results = await self.call_tools(task.evaluate_tool)
272
279
 
273
280
  if any(result.isError for result in results):
274
- raise RuntimeError(f"{results}")
275
-
276
- # Extract reward and content from evaluation
277
- if results:
278
- reward = find_reward(results[0])
279
- eval_content = find_content(results[0])
280
-
281
- # Update the prompt result with evaluation reward
282
- prompt_result.reward = reward
283
-
284
- # Update the prompt result with evaluation content (if available)
285
- if eval_content:
286
- # Prompt result may already have final response content, so we append to it
287
- if prompt_result.content:
288
- prompt_result.content += "\n\n" + eval_content
281
+ self.console.warning_log(f"Evaluate tool returned error: {results}")
282
+ # Still extract what we can from the error response
283
+ if prompt_result is None:
284
+ prompt_result = Trace(
285
+ reward=0.0,
286
+ done=True,
287
+ content="Task failed before evaluation",
288
+ isError=True,
289
+ )
290
+ prompt_result.reward = 0.0 # Default to 0 on error
291
+ else:
292
+ # Extract reward and content from evaluation
293
+ if results:
294
+ reward = find_reward(results[0])
295
+ eval_content = find_content(results[0])
296
+
297
+ # Update the prompt result with evaluation reward
298
+ if prompt_result is None:
299
+ prompt_result = Trace(
300
+ reward=reward, done=True, content=eval_content or "", isError=False
301
+ )
289
302
  else:
290
- prompt_result.content = eval_content
303
+ prompt_result.reward = reward
304
+
305
+ # Update the prompt result with evaluation content (if available)
306
+ if eval_content:
307
+ # Prompt result may already have final response content,
308
+ # so we append to it
309
+ if prompt_result.content:
310
+ prompt_result.content += "\n\n" + eval_content
311
+ else:
312
+ prompt_result.content = eval_content
291
313
 
292
314
  except Exception as e:
293
- self.design.error_log(f"Evaluation phase failed: {e}")
294
- # Continue with the prompt result even if evaluation failed
315
+ self.console.error_log(f"Evaluation phase failed: {e}")
316
+ # Ensure we have a result even if evaluation failed
317
+ if prompt_result is None:
318
+ prompt_result = Trace(
319
+ reward=0.0, done=True, content=f"Evaluation failed: {e}", isError=True
320
+ )
295
321
 
296
322
  return (
297
323
  prompt_result
@@ -321,21 +347,21 @@ class MCPAgent(ABC):
321
347
 
322
348
  # Add initial context
323
349
  messages.extend(await self.format_message(context))
324
- self.design.debug(f"Messages: {messages}")
350
+ self.console.debug(f"Messages: {messages}")
325
351
 
326
352
  step_count = 0
327
353
  while max_steps == -1 or step_count < max_steps:
328
354
  step_count += 1
329
355
  if max_steps == -1:
330
- self.design.debug(f"Step {step_count} (unlimited)")
356
+ self.console.debug(f"Step {step_count} (unlimited)")
331
357
  else:
332
- self.design.debug(f"Step {step_count}/{max_steps}")
358
+ self.console.debug(f"Step {step_count}/{max_steps}")
333
359
 
334
360
  try:
335
361
  # 1. Get model response
336
362
  response = await self.get_response(messages)
337
363
 
338
- self.design.debug(f"Agent:\n{response}")
364
+ self.console.debug(f"Agent:\n{response}")
339
365
 
340
366
  # Check if we should stop
341
367
  if response.done or not response.tool_calls:
@@ -347,16 +373,16 @@ class MCPAgent(ABC):
347
373
  response.content
348
374
  )
349
375
  except Exception as e:
350
- self.design.warning_log(f"ResponseAgent failed: {e}")
376
+ self.console.warning_log(f"ResponseAgent failed: {e}")
351
377
  if decision == "STOP":
352
378
  # Try to submit response through lifecycle tool
353
379
  await self._maybe_submit_response(response, messages)
354
380
 
355
- self.design.debug("Stopping execution")
381
+ self.console.debug("Stopping execution")
356
382
  final_response = response
357
383
  break
358
384
  else:
359
- self.design.debug("Continuing execution")
385
+ self.console.debug("Continuing execution")
360
386
  messages.extend(await self.format_message(decision))
361
387
  continue
362
388
 
@@ -378,21 +404,21 @@ class MCPAgent(ABC):
378
404
  for call, result in zip(tool_calls, tool_results, strict=False):
379
405
  step_info += f"\n{call}\n{result}"
380
406
 
381
- self.design.info_log(step_info)
407
+ self.console.info_log(step_info)
382
408
 
383
409
  except Exception as e:
384
- self.design.error_log(f"Step failed: {e}")
410
+ self.console.error_log(f"Step failed: {e}")
385
411
  error = str(e)
386
412
  break
387
413
 
388
414
  except KeyboardInterrupt:
389
- self.design.warning_log("Agent execution interrupted by user")
415
+ self.console.warning_log("Agent execution interrupted by user")
390
416
  error = "Interrupted by user"
391
417
  except asyncio.CancelledError:
392
- self.design.warning_log("Agent execution cancelled")
418
+ self.console.warning_log("Agent execution cancelled")
393
419
  error = "Cancelled"
394
420
  except Exception as e:
395
- self.design.error_log(f"Unexpected error: {e}")
421
+ self.console.error_log(f"Unexpected error: {e}")
396
422
  error = str(e)
397
423
 
398
424
  # Build result
@@ -433,17 +459,17 @@ class MCPAgent(ABC):
433
459
  results: list[MCPToolResult] = []
434
460
  for tc in tool_call:
435
461
  try:
436
- self.design.debug(f"Calling tool: {tc}")
462
+ self.console.debug(f"Calling tool: {tc}")
437
463
  results.append(await self.mcp_client.call_tool(tc))
438
464
  except TimeoutError as e:
439
- self.design.error_log(f"Tool execution timed out: {e}")
465
+ self.console.error_log(f"Tool execution timed out: {e}")
440
466
  try:
441
467
  await self.mcp_client.shutdown()
442
468
  except Exception as close_err:
443
- self.design.debug(f"Failed to close MCP client cleanly: {close_err}")
469
+ self.console.debug(f"Failed to close MCP client cleanly: {close_err}")
444
470
  raise
445
471
  except Exception as e:
446
- self.design.error_log(f"Tool execution failed: {e}")
472
+ self.console.error_log(f"Tool execution failed: {e}")
447
473
  results.append(_format_error_result(str(e)))
448
474
  return results
449
475
 
@@ -575,7 +601,7 @@ class MCPAgent(ABC):
575
601
 
576
602
  # Add to lifecycle tools if found
577
603
  if response_tool_name and response_tool_name not in self.lifecycle_tools:
578
- self.design.debug(f"Auto-detected '{response_tool_name}' tool as a lifecycle tool")
604
+ self.console.debug(f"Auto-detected '{response_tool_name}' tool as a lifecycle tool")
579
605
  self.response_tool_name = response_tool_name
580
606
  self.lifecycle_tools.append(response_tool_name)
581
607
 
@@ -599,7 +625,7 @@ class MCPAgent(ABC):
599
625
  messages: The current message history (will be modified in-place)
600
626
  """
601
627
  if self.response_tool_name:
602
- self.design.debug(f"Calling response lifecycle tool: {self.response_tool_name}")
628
+ self.console.debug(f"Calling response lifecycle tool: {self.response_tool_name}")
603
629
  try:
604
630
  # Call the response tool with the agent's response
605
631
  response_tool_call = MCPToolCall(
@@ -614,9 +640,9 @@ class MCPAgent(ABC):
614
640
  messages.extend(response_messages)
615
641
 
616
642
  # Mark the task as done
617
- self.design.debug("Response lifecycle tool executed, marking task as done")
643
+ self.console.debug("Response lifecycle tool executed, marking task as done")
618
644
  except Exception as e:
619
- self.design.error_log(f"Response lifecycle tool failed: {e}")
645
+ self.console.error_log(f"Response lifecycle tool failed: {e}")
620
646
 
621
647
  async def _setup_config(self, mcp_config: dict[str, dict[str, Any]]) -> None:
622
648
  """Inject metadata into the metadata of the initialize request."""
@@ -670,9 +696,9 @@ class MCPAgent(ABC):
670
696
  if self._auto_trace_cm:
671
697
  try:
672
698
  self._auto_trace_cm.__exit__(None, None, None)
673
- self.design.debug("Closed auto-created trace")
699
+ self.console.debug("Closed auto-created trace")
674
700
  except Exception as e:
675
- self.design.warning_log(f"Failed to close auto-created trace: {e}")
701
+ self.console.warning_log(f"Failed to close auto-created trace: {e}")
676
702
  finally:
677
703
  self._auto_trace_cm = None
678
704
 
@@ -680,9 +706,9 @@ class MCPAgent(ABC):
680
706
  if self._auto_created_client and self.mcp_client:
681
707
  try:
682
708
  await self.mcp_client.shutdown()
683
- self.design.debug("Closed auto-created MCPClient")
709
+ self.console.debug("Closed auto-created MCPClient")
684
710
  except Exception as e:
685
- self.design.warning_log(f"Failed to close auto-created client: {e}")
711
+ self.console.warning_log(f"Failed to close auto-created client: {e}")
686
712
  finally:
687
713
  self.mcp_client = None
688
714
  self._auto_created_client = False
@@ -715,13 +741,13 @@ class MCPAgent(ABC):
715
741
  if self._is_connection_error(e):
716
742
  msg = self._get_connection_error_message(e)
717
743
  # Always show connection errors, not just when logging is enabled
718
- self.design.error(f"❌ {msg}")
719
- self.design.info("💡 Make sure the MCP server is started before running the agent.")
744
+ self.console.error(f"❌ {msg}")
745
+ self.console.info("💡 Make sure the MCP server is started before running the agent.")
720
746
 
721
747
  # For localhost, provide specific instructions
722
748
  error_str = str(e).lower()
723
749
  if "localhost" in error_str or "127.0.0.1" in error_str:
724
- self.design.info(" Run 'hud dev' in another terminal to start the MCP server")
750
+ self.console.info(" Run 'hud dev' in another terminal to start the MCP server")
725
751
 
726
752
  raise RuntimeError(msg) from e
727
753
  raise
@@ -196,7 +196,11 @@ class ClaudeAgent(MCPAgent):
196
196
  response = await self.anthropic_client.beta.messages.create(**create_kwargs)
197
197
  break
198
198
  except BadRequestError as e:
199
- if e.message.startswith("prompt is too long"):
199
+ if (
200
+ "prompt is too long" in str(e)
201
+ or "request_too_large" in str(e)
202
+ or e.status_code == 413
203
+ ):
200
204
  logger.warning("Prompt too long, truncating message history")
201
205
  # Keep first message and last 20 messages
202
206
  if len(current_messages) > 21:
@@ -3,7 +3,7 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import json
6
- from typing import Any
6
+ from typing import Any, ClassVar
7
7
 
8
8
  from hud import instrument
9
9
  from hud.tools.grounding import GroundedComputerTool, Grounder, GrounderConfig
@@ -26,6 +26,8 @@ class GroundedOpenAIChatAgent(GenericOpenAIChatAgent):
26
26
  - Grounding model (Qwen2-VL etc) handles visual element detection
27
27
  """
28
28
 
29
+ metadata: ClassVar[dict[str, Any]] = {}
30
+
29
31
  def __init__(
30
32
  self,
31
33
  *,
@@ -16,7 +16,7 @@ class ResponseAgent:
16
16
  based on the agent's final response message.
17
17
  """
18
18
 
19
- def __init__(self, api_key: str | None = None) -> None:
19
+ def __init__(self, api_key: str | None = None, model: str = "gpt-4o") -> None:
20
20
  self.api_key = api_key or settings.openai_api_key or os.environ.get("OPENAI_API_KEY")
21
21
  if not self.api_key:
22
22
  raise ValueError(
@@ -24,6 +24,7 @@ class ResponseAgent:
24
24
  )
25
25
 
26
26
  self.client = AsyncOpenAI(api_key=self.api_key)
27
+ self.model = model
27
28
 
28
29
  self.system_prompt = """
29
30
  You are an assistant that helps determine the appropriate response to an agent's message.
@@ -54,7 +55,7 @@ class ResponseAgent:
54
55
  """
55
56
  try:
56
57
  response = await self.client.chat.completions.create(
57
- model="gpt-5-nano",
58
+ model=self.model,
58
59
  messages=[
59
60
  {"role": "system", "content": self.system_prompt},
60
61
  {
@@ -204,7 +204,7 @@ class OperatorAgent(MCPAgent):
204
204
  break
205
205
 
206
206
  if not latest_screenshot:
207
- self.design.warning_log("No screenshot provided for response to action")
207
+ self.console.warning_log("No screenshot provided for response to action")
208
208
  return AgentResponse(
209
209
  content="No screenshot available for next action",
210
210
  tool_calls=[],
@@ -327,7 +327,7 @@ class OperatorAgent(MCPAgent):
327
327
  for content in result.content:
328
328
  if isinstance(content, types.TextContent):
329
329
  # Don't add error text as input_text, just track it
330
- self.design.error_log(f"Tool error: {content.text}")
330
+ self.console.error_log(f"Tool error: {content.text}")
331
331
  elif isinstance(content, types.ImageContent):
332
332
  # Even error results might have images
333
333
  latest_screenshot = content.data
@@ -17,7 +17,7 @@ from __future__ import annotations
17
17
 
18
18
  import json
19
19
  import logging
20
- from typing import TYPE_CHECKING, Any, cast
20
+ from typing import TYPE_CHECKING, Any, ClassVar, cast
21
21
 
22
22
  import mcp.types as types
23
23
 
@@ -36,6 +36,8 @@ logger = logging.getLogger(__name__)
36
36
  class GenericOpenAIChatAgent(MCPAgent):
37
37
  """MCP-enabled agent that speaks the OpenAI *chat.completions* protocol."""
38
38
 
39
+ metadata: ClassVar[dict[str, Any]] = {}
40
+
39
41
  def __init__(
40
42
  self,
41
43
  *,
@@ -184,7 +184,7 @@ def debug(
184
184
  hud debug . --max-phase 3 # Stop after phase 3
185
185
  """
186
186
  # Import here to avoid circular imports
187
- from hud.utils.design import HUDDesign
187
+ from hud.utils.hud_console import HUDConsole
188
188
 
189
189
  from .utils.environment import (
190
190
  build_environment,
@@ -193,7 +193,7 @@ def debug(
193
193
  is_environment_directory,
194
194
  )
195
195
 
196
- design = HUDDesign()
196
+ hud_console = HUDConsole()
197
197
 
198
198
  # Determine the command to run
199
199
  command = None
@@ -227,7 +227,7 @@ def debug(
227
227
  image_name, source = get_image_name(directory)
228
228
 
229
229
  if source == "auto":
230
- design.info(f"Auto-generated image name: {image_name}")
230
+ hud_console.info(f"Auto-generated image name: {image_name}")
231
231
 
232
232
  # Build if requested or if image doesn't exist
233
233
  if build or not image_exists(image_name):
@@ -263,20 +263,20 @@ def debug(
263
263
  phases_completed = asyncio.run(debug_mcp_stdio(command, logger, max_phase=max_phase))
264
264
 
265
265
  # Show summary using design system
266
- from hud.utils.design import HUDDesign
266
+ from hud.utils.hud_console import HUDConsole
267
267
 
268
- design = HUDDesign()
268
+ hud_console = HUDConsole()
269
269
 
270
- design.info("") # Empty line
271
- design.section_title("Debug Summary")
270
+ hud_console.info("") # Empty line
271
+ hud_console.section_title("Debug Summary")
272
272
 
273
273
  if phases_completed == max_phase:
274
- design.success(f"All {max_phase} phases completed successfully!")
274
+ hud_console.success(f"All {max_phase} phases completed successfully!")
275
275
  if max_phase == 5:
276
- design.info("Your MCP server is fully functional and ready for production use.")
276
+ hud_console.info("Your MCP server is fully functional and ready for production use.")
277
277
  else:
278
- design.warning(f"Completed {phases_completed} out of {max_phase} phases")
279
- design.info("Check the errors above for troubleshooting.")
278
+ hud_console.warning(f"Completed {phases_completed} out of {max_phase} phases")
279
+ hud_console.info("Check the errors above for troubleshooting.")
280
280
 
281
281
  # Exit with appropriate code
282
282
  if phases_completed < max_phase:
@@ -831,9 +831,9 @@ def eval(
831
831
  ),
832
832
  ) -> None:
833
833
  """🚀 Run evaluation on datasets or individual tasks with agents."""
834
- from hud.utils.design import HUDDesign
834
+ from hud.utils.hud_console import HUDConsole
835
835
 
836
- design = HUDDesign()
836
+ hud_console = HUDConsole()
837
837
 
838
838
  # If no source provided, look for task/eval JSON files in current directory
839
839
  if source is None:
@@ -863,30 +863,30 @@ def eval(
863
863
  json_files = sorted(set(json_files))
864
864
 
865
865
  if not json_files:
866
- design.error(
866
+ hud_console.error(
867
867
  "No source provided and no task/eval JSON files found in current directory"
868
868
  )
869
- design.info(
869
+ hud_console.info(
870
870
  "Usage: hud eval <source> or create a task JSON file "
871
871
  "(e.g., task.json, eval_config.json)"
872
872
  )
873
873
  raise typer.Exit(1)
874
874
  elif len(json_files) == 1:
875
875
  source = str(json_files[0])
876
- design.info(f"Found task file: {source}")
876
+ hud_console.info(f"Found task file: {source}")
877
877
  else:
878
878
  # Multiple files found, let user choose
879
- design.info("Multiple task files found:")
880
- file_choice = design.select(
879
+ hud_console.info("Multiple task files found:")
880
+ file_choice = hud_console.select(
881
881
  "Select a task file to run:",
882
882
  choices=[str(f) for f in json_files],
883
883
  )
884
884
  source = file_choice
885
- design.success(f"Selected: {source}")
885
+ hud_console.success(f"Selected: {source}")
886
886
 
887
887
  # If no agent specified, prompt for selection
888
888
  if agent is None:
889
- agent = design.select(
889
+ agent = hud_console.select(
890
890
  "Select an agent to use:",
891
891
  choices=[
892
892
  {"name": "Claude 4 Sonnet", "value": "claude"},
@@ -898,14 +898,14 @@ def eval(
898
898
  # Validate agent choice
899
899
  valid_agents = ["claude", "openai"]
900
900
  if agent not in valid_agents:
901
- design.error(f"Invalid agent: {agent}. Must be one of: {', '.join(valid_agents)}")
901
+ hud_console.error(f"Invalid agent: {agent}. Must be one of: {', '.join(valid_agents)}")
902
902
  raise typer.Exit(1)
903
903
 
904
904
  # Import eval_command lazily to avoid importing agent dependencies
905
905
  try:
906
906
  from .eval import eval_command
907
907
  except ImportError as e:
908
- design.error(
908
+ hud_console.error(
909
909
  "Evaluation dependencies are not installed. "
910
910
  "Please install with: pip install 'hud-python[agent]'"
911
911
  )
@@ -962,6 +962,16 @@ def hf(
962
962
 
963
963
  def main() -> None:
964
964
  """Main entry point for the CLI."""
965
+ # Handle --version flag before Typer parses args
966
+ if "--version" in sys.argv:
967
+ try:
968
+ from hud import __version__
969
+
970
+ console.print(f"HUD CLI version: [cyan]{__version__}[/cyan]")
971
+ except ImportError:
972
+ console.print("HUD CLI version: [cyan]unknown[/cyan]")
973
+ return
974
+
965
975
  try:
966
976
  # Show header for main help
967
977
  if len(sys.argv) == 1 or (len(sys.argv) == 2 and sys.argv[1] in ["--help", "-h"]):
@@ -995,9 +1005,9 @@ def main() -> None:
995
1005
  except Exception:
996
1006
  exit_code = 1
997
1007
  if exit_code != 0:
998
- from hud.utils.design import design
1008
+ from hud.utils.hud_console import hud_console
999
1009
 
1000
- design.info(SUPPORT_HINT)
1010
+ hud_console.info(SUPPORT_HINT)
1001
1011
  raise
1002
1012
  except Exception:
1003
1013
  raise