hud-python 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hud-python might be problematic. Click here for more details.

Files changed (50) hide show
  1. hud_python-0.1.0/.env.example +2 -0
  2. hud_python-0.1.0/.github/workflows/ci.yml +49 -0
  3. hud_python-0.1.0/.github/workflows/release.yml +22 -0
  4. hud_python-0.1.0/.gitignore +19 -0
  5. hud_python-0.1.0/LICENSE +21 -0
  6. hud_python-0.1.0/MANIFEST.in +13 -0
  7. hud_python-0.1.0/PKG-INFO +125 -0
  8. hud_python-0.1.0/README.md +69 -0
  9. hud_python-0.1.0/agent/base.py +7 -0
  10. hud_python-0.1.0/agent/claude.py +141 -0
  11. hud_python-0.1.0/agent/response_agent.py +54 -0
  12. hud_python-0.1.0/docs/api-reference/adapters.mdx +161 -0
  13. hud_python-0.1.0/docs/api-reference/client.mdx +159 -0
  14. hud_python-0.1.0/docs/api-reference/env.mdx +159 -0
  15. hud_python-0.1.0/docs/concepts/adapter.mdx +74 -0
  16. hud_python-0.1.0/docs/concepts/client.mdx +25 -0
  17. hud_python-0.1.0/docs/concepts/environment.mdx +91 -0
  18. hud_python-0.1.0/docs/concepts/gym.mdx +49 -0
  19. hud_python-0.1.0/docs/examples/basic.mdx +156 -0
  20. hud_python-0.1.0/docs/examples/claude-agent.mdx +306 -0
  21. hud_python-0.1.0/docs/examples/custom-agent.mdx +184 -0
  22. hud_python-0.1.0/docs/installation.mdx +77 -0
  23. hud_python-0.1.0/docs/introduction.mdx +54 -0
  24. hud_python-0.1.0/docs/logo/HUD.svg +5 -0
  25. hud_python-0.1.0/docs/mint.json +85 -0
  26. hud_python-0.1.0/docs/quickstart.mdx +81 -0
  27. hud_python-0.1.0/examples/README.md +22 -0
  28. hud_python-0.1.0/examples/basic_usage.py +81 -0
  29. hud_python-0.1.0/examples/claude_agent_example.py +134 -0
  30. hud_python-0.1.0/examples/simple_agent_example.py +162 -0
  31. hud_python-0.1.0/hud/__init__.py +22 -0
  32. hud_python-0.1.0/hud/adapters/__init__.py +5 -0
  33. hud_python-0.1.0/hud/adapters/claude/__init__.py +6 -0
  34. hud_python-0.1.0/hud/adapters/claude/adapter.py +131 -0
  35. hud_python-0.1.0/hud/adapters/common/__init__.py +6 -0
  36. hud_python-0.1.0/hud/adapters/common/adapter.py +167 -0
  37. hud_python-0.1.0/hud/adapters/common/types.py +92 -0
  38. hud_python-0.1.0/hud/client.py +184 -0
  39. hud_python-0.1.0/hud/env.py +258 -0
  40. hud_python-0.1.0/hud/gym.py +22 -0
  41. hud_python-0.1.0/hud/py.typed +0 -0
  42. hud_python-0.1.0/hud/run.py +157 -0
  43. hud_python-0.1.0/hud/server/__init__.py +5 -0
  44. hud_python-0.1.0/hud/server/requests.py +79 -0
  45. hud_python-0.1.0/hud/settings.py +39 -0
  46. hud_python-0.1.0/hud/utils/__init__.py +5 -0
  47. hud_python-0.1.0/hud/utils/config.py +7 -0
  48. hud_python-0.1.0/pyproject.toml +128 -0
  49. hud_python-0.1.0/tests/__init__.py +0 -0
  50. hud_python-0.1.0/tests/test_import.py +7 -0
@@ -0,0 +1,2 @@
1
+ # Required API keys
2
+ HUD_API_KEY="your-hud-api-key-here"
@@ -0,0 +1,49 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [ "main" ]
6
+ pull_request:
7
+ branches: [ "main" ]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+ strategy:
13
+ matrix:
14
+ python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
15
+
16
+ steps:
17
+ - name: Check out code
18
+ uses: actions/checkout@v4
19
+
20
+ - name: Install uv
21
+ uses: astral-sh/setup-uv@v5
22
+
23
+ - name: Install Python
24
+ run: uv python install ${{ matrix.python-version }}
25
+
26
+ - name: Run tests
27
+ run: uv run --python ${{ matrix.python-version }} --with=".[dev]" pytest
28
+
29
+ lint-ruff:
30
+ runs-on: ubuntu-latest
31
+ steps:
32
+ - uses: actions/checkout@v4
33
+ - name: Install uv
34
+ uses: astral-sh/setup-uv@v5
35
+
36
+ - name: Run ruff
37
+ run: |
38
+ uv run --with=".[dev]" ruff format .
39
+ uv run --with=".[dev]" ruff check .
40
+
41
+ lint-pyright:
42
+ runs-on: ubuntu-latest
43
+ steps:
44
+ - uses: actions/checkout@v4
45
+ - name: Install uv
46
+ uses: astral-sh/setup-uv@v5
47
+
48
+ - name: Run pyright
49
+ run: uv run --with=".[dev]" pyright
@@ -0,0 +1,22 @@
1
+ name: Release
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ jobs:
8
+ deploy:
9
+ runs-on: ubuntu-latest
10
+ environment: pypi
11
+ steps:
12
+ - uses: actions/checkout@v4
13
+
14
+ - name: Install uv
15
+ uses: astral-sh/setup-uv@v5
16
+
17
+ - name: Build and publish to PyPi
18
+ env:
19
+ UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
20
+ run: |
21
+ uv build
22
+ uv publish
@@ -0,0 +1,19 @@
1
+ .venv
2
+ venv
3
+ .env
4
+ __pycache__
5
+ *.pyc
6
+ .pytest_cache
7
+ dist/
8
+ build/
9
+ *.egg-info/
10
+ uv.lock
11
+
12
+ # Media files
13
+ *.png
14
+ *.jpg
15
+ *.jpeg
16
+ *.gif
17
+ *.bmp
18
+ *.tiff
19
+ *.ico
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Human Data Company
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,13 @@
1
+ # Include examples in source distribution
2
+ include examples/*.py
3
+ include examples/README.md
4
+
5
+ # Include license and documentation
6
+ include LICENSE
7
+ include README.md
8
+ include .env.example
9
+
10
+ # Exclude tests and development files
11
+ exclude tests/
12
+ exclude .github/
13
+ exclude *.ipynb
@@ -0,0 +1,125 @@
1
+ Metadata-Version: 2.4
2
+ Name: hud-python
3
+ Version: 0.1.0
4
+ Summary: SDK for the HUD evaluation platform.
5
+ Project-URL: Homepage, https://github.com/Human-Data/hud-sdk
6
+ Project-URL: Bug Tracker, https://github.com/Human-Data/hud-sdk/issues
7
+ Project-URL: Documentation, https://hud.so
8
+ Author-email: Human Union Data SDK <founders@hud.so>
9
+ License: MIT License
10
+
11
+ Copyright (c) 2025 Human Data Company
12
+
13
+ Permission is hereby granted, free of charge, to any person obtaining a copy
14
+ of this software and associated documentation files (the "Software"), to deal
15
+ in the Software without restriction, including without limitation the rights
16
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17
+ copies of the Software, and to permit persons to whom the Software is
18
+ furnished to do so, subject to the following conditions:
19
+
20
+ The above copyright notice and this permission notice shall be included in all
21
+ copies or substantial portions of the Software.
22
+
23
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29
+ SOFTWARE.
30
+ License-File: LICENSE
31
+ Classifier: Development Status :: 4 - Beta
32
+ Classifier: Intended Audience :: Developers
33
+ Classifier: Programming Language :: Python :: 3
34
+ Classifier: Programming Language :: Python :: 3.9
35
+ Classifier: Programming Language :: Python :: 3.10
36
+ Classifier: Programming Language :: Python :: 3.11
37
+ Classifier: Programming Language :: Python :: 3.12
38
+ Classifier: Programming Language :: Python :: 3.13
39
+ Requires-Python: <3.14,>=3.9
40
+ Requires-Dist: eval-type-backport>=0.2.2
41
+ Requires-Dist: httpx<1,>=0.23.0
42
+ Requires-Dist: pillow<12,>=11
43
+ Requires-Dist: pydantic-settings<3,>=2
44
+ Requires-Dist: pydantic<3,>=2
45
+ Provides-Extra: dev
46
+ Requires-Dist: anthropic; extra == 'dev'
47
+ Requires-Dist: ipykernel; extra == 'dev'
48
+ Requires-Dist: ipython<9; extra == 'dev'
49
+ Requires-Dist: jupyter-client; extra == 'dev'
50
+ Requires-Dist: jupyter-core; extra == 'dev'
51
+ Requires-Dist: openai; extra == 'dev'
52
+ Requires-Dist: pyright==1.1.364; extra == 'dev'
53
+ Requires-Dist: pytest<9,>=8.1.1; extra == 'dev'
54
+ Requires-Dist: ruff==0.9.8; extra == 'dev'
55
+ Description-Content-Type: text/markdown
56
+
57
+ # HUD SDK (Alpha Release)
58
+
59
+ A Python SDK for interacting with HUD environments and evaluation benchmarks for browser use and computer use models.
60
+
61
+ Visit [hud.so](https://hud.so) for more information about HUD.
62
+
63
+ > **Alpha Release Notice**: This SDK is currently in alpha status (v0.1.0-alpha). The API is still evolving and may change in future releases as we gather feedback and improve functionality.
64
+
65
+ [![PyPI version](https://img.shields.io/pypi/v/hud-python)](https://pypi.org/project/hud-python/)
66
+
67
+ [📚 Documentation](https://docs.hud.so) | [🏠 Homepage](https://hud.so)
68
+
69
+ ## Quick Start
70
+
71
+ ```bash
72
+ # Install the latest stable release
73
+ pip install hud-python
74
+
75
+ # Install the latest alpha release (may include breaking changes)
76
+ pip install --pre hud-python
77
+
78
+ # Install a specific alpha version
79
+ pip install hud-python==0.1.0-alpha
80
+ ```
81
+
82
+ ```python
83
+ import asyncio
84
+ from hud import HUDClient
85
+
86
+ async def main():
87
+ # Initialize client with API key
88
+ client = HUDClient(api_key="your-api-key")
89
+
90
+ # Load a gym and evaluation set
91
+ gym = await client.load_gym(id="OSWorld-Ubuntu")
92
+ evalset = await client.load_evalset(id="OSWorld-Ubuntu")
93
+
94
+ # Create a run and environment
95
+ run = client.create_run(name="example-run", gym=gym, evalset=evalset)
96
+ env = await run.make(metadata={"agent_id": "example"})
97
+
98
+ # Agent loop goes here
99
+ # For complete examples and usage guides, see our documentation
100
+
101
+ # Close the environment when done
102
+ await env.close()
103
+
104
+ if __name__ == "__main__":
105
+ asyncio.run(main())
106
+ ```
107
+
108
+ ## Key Features
109
+
110
+ - Connect to HUD evaluation environments
111
+ - Run benchmarks across various tasks
112
+ - Support for different agent adapters
113
+ - Asynchronous API for efficient interaction
114
+
115
+ ## Documentation
116
+
117
+ For comprehensive guides, examples, and API reference, visit:
118
+ - [Getting Started](https://docs.hud.so/introduction)
119
+ - [Installation](https://docs.hud.so/installation)
120
+ - [API Reference](https://docs.hud.so/api-reference)
121
+ - [Examples](https://docs.hud.so/examples)
122
+
123
+ ## License
124
+
125
+ [MIT License](LICENSE)
@@ -0,0 +1,69 @@
1
+ # HUD SDK (Alpha Release)
2
+
3
+ A Python SDK for interacting with HUD environments and evaluation benchmarks for browser use and computer use models.
4
+
5
+ Visit [hud.so](https://hud.so) for more information about HUD.
6
+
7
+ > **Alpha Release Notice**: This SDK is currently in alpha status (v0.1.0-alpha). The API is still evolving and may change in future releases as we gather feedback and improve functionality.
8
+
9
+ [![PyPI version](https://img.shields.io/pypi/v/hud-python)](https://pypi.org/project/hud-python/)
10
+
11
+ [📚 Documentation](https://docs.hud.so) | [🏠 Homepage](https://hud.so)
12
+
13
+ ## Quick Start
14
+
15
+ ```bash
16
+ # Install the latest stable release
17
+ pip install hud-python
18
+
19
+ # Install the latest alpha release (may include breaking changes)
20
+ pip install --pre hud-python
21
+
22
+ # Install a specific alpha version
23
+ pip install hud-python==0.1.0-alpha
24
+ ```
25
+
26
+ ```python
27
+ import asyncio
28
+ from hud import HUDClient
29
+
30
+ async def main():
31
+ # Initialize client with API key
32
+ client = HUDClient(api_key="your-api-key")
33
+
34
+ # Load a gym and evaluation set
35
+ gym = await client.load_gym(id="OSWorld-Ubuntu")
36
+ evalset = await client.load_evalset(id="OSWorld-Ubuntu")
37
+
38
+ # Create a run and environment
39
+ run = client.create_run(name="example-run", gym=gym, evalset=evalset)
40
+ env = await run.make(metadata={"agent_id": "example"})
41
+
42
+ # Agent loop goes here
43
+ # For complete examples and usage guides, see our documentation
44
+
45
+ # Close the environment when done
46
+ await env.close()
47
+
48
+ if __name__ == "__main__":
49
+ asyncio.run(main())
50
+ ```
51
+
52
+ ## Key Features
53
+
54
+ - Connect to HUD evaluation environments
55
+ - Run benchmarks across various tasks
56
+ - Support for different agent adapters
57
+ - Asynchronous API for efficient interaction
58
+
59
+ ## Documentation
60
+
61
+ For comprehensive guides, examples, and API reference, visit:
62
+ - [Getting Started](https://docs.hud.so/introduction)
63
+ - [Installation](https://docs.hud.so/installation)
64
+ - [API Reference](https://docs.hud.so/api-reference)
65
+ - [Examples](https://docs.hud.so/examples)
66
+
67
+ ## License
68
+
69
+ [MIT License](LICENSE)
@@ -0,0 +1,7 @@
1
+ class Agent:
2
+ def __init__(self):
3
+ self.messages = []
4
+ self.responses = []
5
+
6
+ def predict(self):
7
+ raise NotImplementedError("Subclasses must implement this method")
@@ -0,0 +1,141 @@
1
+ import os
2
+ import json
3
+ from agent.base import Agent
4
+ from anthropic import Anthropic
5
+
6
+
7
+ class Claude(Agent):
8
+ def __init__(self):
9
+ super().__init__()
10
+ self.client = Anthropic(api_key=os.getenv("ANTHROPIC_API_KEY"))
11
+ self.model = "claude-3-7-sonnet-20250219"
12
+ self.max_tokens = 4096
13
+ self.tool_version = "20250124"
14
+ self.thinking_budget = 1024
15
+ self.conversation = [] # Store the full conversation history including Claude's responses
16
+
17
+ async def predict(self, base64_image: str | None = None, input_text: str | None = None):
18
+ message = self._create_message(base64_image, input_text)
19
+
20
+ # Only append the message if it's not empty
21
+ if message:
22
+ self.conversation.append(message)
23
+
24
+ response = self._generate_response()
25
+
26
+ # Check if this response contains any tool_use blocks
27
+ for block in response.content:
28
+ if hasattr(block, "type") and block.type == "tool_use":
29
+ break
30
+
31
+ # Add Claude's response to the conversation history
32
+ assistant_message = {"role": "assistant", "content": response.content}
33
+ self.conversation.append(assistant_message)
34
+
35
+ self.responses.append(response)
36
+ return response
37
+
38
+ def _create_message(self, base64_image: str | None = None, input_text: str | None = None):
39
+ """Create appropriate message based on context and inputs"""
40
+
41
+ # Check if the previous response was from assistant and had tool_use
42
+ if len(self.conversation) >= 2 and self.conversation[-1]["role"] == "assistant":
43
+ last_assistant_message = self.conversation[-1]
44
+
45
+ # Look for tool_use blocks in the assistant's message
46
+ for block in last_assistant_message["content"]:
47
+ if hasattr(block, "type") and block.type == "tool_use":
48
+ if hasattr(block, "name") and block.name == "computer" and base64_image:
49
+ # Found the tool_use to respond to
50
+ return {
51
+ "role": "user",
52
+ "content": [
53
+ {
54
+ "type": "tool_result",
55
+ "tool_use_id": block.id,
56
+ "content": [
57
+ {
58
+ "type": "image",
59
+ "source": {
60
+ "type": "base64",
61
+ "media_type": "image/png",
62
+ "data": base64_image,
63
+ },
64
+ }
65
+ ],
66
+ }
67
+ ],
68
+ }
69
+
70
+ # Regular user message
71
+ if input_text or base64_image:
72
+ content = []
73
+ if input_text:
74
+ content.append({"type": "text", "text": input_text})
75
+ if base64_image:
76
+ content.append(
77
+ {
78
+ "type": "image",
79
+ "source": {
80
+ "type": "base64",
81
+ "media_type": "image/png",
82
+ "data": base64_image,
83
+ },
84
+ }
85
+ )
86
+
87
+ return {"role": "user", "content": content}
88
+
89
+ return None # Return None if no message could be created
90
+
91
+ def _generate_response(self):
92
+ beta_flag = (
93
+ "computer-use-2025-01-24"
94
+ if "20250124" in self.tool_version
95
+ else "computer-use-2024-10-22"
96
+ )
97
+
98
+ tools = [
99
+ {
100
+ "type": f"computer_{self.tool_version}",
101
+ "name": "computer",
102
+ "display_width_px": 1024,
103
+ "display_height_px": 768,
104
+ "display_number": 1,
105
+ }
106
+ ]
107
+
108
+ thinking = {"type": "enabled", "budget_tokens": self.thinking_budget}
109
+
110
+ try:
111
+ response = self.client.beta.messages.create(
112
+ model=self.model,
113
+ max_tokens=self.max_tokens,
114
+ messages=self.conversation, # Use the full conversation including assistant responses
115
+ tools=tools,
116
+ betas=[beta_flag],
117
+ thinking=thinking,
118
+ )
119
+ return response
120
+ except Exception as e:
121
+ raise
122
+
123
+ def process_response(self, response: dict) -> tuple[bool, str | None]:
124
+ # Check if response contains a computer tool use
125
+ has_computer_tool_use = False
126
+ computer_action = None
127
+ for block in response["content"]:
128
+ if block.type == "tool_use" and block.name == "computer":
129
+ has_computer_tool_use = True
130
+ computer_action = block.input
131
+ break
132
+
133
+ if not has_computer_tool_use:
134
+ # No computer tool use, treat as final response
135
+ return True, str(response["content"][-1].text)
136
+
137
+ # If we have a computer action, adapt it to environment actions
138
+ if computer_action:
139
+ return False, computer_action
140
+
141
+ return True, None
@@ -0,0 +1,54 @@
1
+ import json
2
+ import os
3
+ import openai
4
+ from typing import Literal, Optional
5
+
6
+ ResponseType = Literal["STOP", "CONTINUE"]
7
+
8
+ class ResponseAgent:
9
+ def __init__(self, api_key: Optional[str] = None):
10
+ self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
11
+ if not self.api_key:
12
+ raise ValueError("OpenAI API key must be provided or set as OPENAI_API_KEY environment variable")
13
+
14
+ self.client = openai.Client(api_key=self.api_key)
15
+
16
+ self.system_prompt = """
17
+ You are an assistant that helps determine the appropriate response to an agent's message.
18
+
19
+ You will receive messages from an agent that is performing tasks for a user.
20
+ Your job is to analyze these messages and respond with one of the following:
21
+
22
+ - STOP: If the agent indicates it has successfully completed a task, even if phrased as a question
23
+ like "I have returned to the previous website. Would you like me to do anything else?"
24
+
25
+ - CONTINUE: If the agent is asking for clarification before proceeding with a task
26
+ like "I'm about to clear cookies from this website. Would you like me to proceed?"
27
+
28
+ Respond ONLY with one of these two options.
29
+ """
30
+
31
+ def determine_response(self, agent_message: str) -> ResponseType:
32
+ try:
33
+ response = self.client.chat.completions.create(
34
+ model="gpt-4o",
35
+ messages=[
36
+ {"role": "system", "content": self.system_prompt},
37
+ {"role": "user", "content": f"Agent message: {agent_message}\n\nWhat is the appropriate response?"}
38
+ ],
39
+ temperature=0.1, # Low temperature for more deterministic responses
40
+ max_tokens=5 # We only need a short response
41
+ )
42
+
43
+ response_text = response.choices[0].message.content.strip().upper()
44
+
45
+ # Validate the response
46
+ if "STOP" in response_text:
47
+ return "STOP"
48
+ else:
49
+ return "CONTINUE"
50
+
51
+ except Exception as e:
52
+ print(f"Error determining response: {e}")
53
+ return "CONTINUE"
54
+
@@ -0,0 +1,161 @@
1
+ ---
2
+ title: 'Adapters API'
3
+ description: 'API reference for the adapter classes'
4
+ ---
5
+
6
+ # Adapters API Reference
7
+
8
+ ## Base Adapter Class
9
+
10
+ The `Adapter` class is the base class for all adapters in the HUD SDK.
11
+
12
+ ### Constructor
13
+
14
+ ```python
15
+ Adapter() -> Adapter
16
+ ```
17
+
18
+ Creates a new adapter.
19
+
20
+ ### Methods
21
+
22
+ #### convert
23
+
24
+ ```python
25
+ convert(data: Any) -> Any
26
+ ```
27
+
28
+ Converts an action from the agent's format to the CLA format.
29
+
30
+ **Parameters:**
31
+ - `data` (Any): The action data to convert
32
+
33
+ **Returns:**
34
+ - `Any`: The converted action in CLA format
35
+
36
+ #### adapt_list
37
+
38
+ ```python
39
+ adapt_list(actions: list[Any]) -> list[Any]
40
+ ```
41
+
42
+ Adapts a list of actions.
43
+
44
+ **Parameters:**
45
+ - `actions` (list[Any]): The list of actions to adapt
46
+
47
+ **Returns:**
48
+ - `list[Any]`: The adapted list of actions
49
+
50
+ ## Common Action Types
51
+
52
+ ### ClickAction
53
+
54
+ Represents a mouse click action.
55
+
56
+ ```python
57
+ ClickAction(point: Point, button: str = "left") -> ClickAction
58
+ ```
59
+
60
+ **Parameters:**
61
+ - `point` (Point): The point to click
62
+ - `button` (str, optional): The mouse button to use ("left", "right", "double")
63
+
64
+ ### TypeAction
65
+
66
+ Represents a keyboard typing action.
67
+
68
+ ```python
69
+ TypeAction(text: str, enter_after: bool = False) -> TypeAction
70
+ ```
71
+
72
+ **Parameters:**
73
+ - `text` (str): The text to type
74
+ - `enter_after` (bool, optional): Whether to press Enter after typing
75
+
76
+ ### ScrollAction
77
+
78
+ Represents a scrolling action.
79
+
80
+ ```python
81
+ ScrollAction(delta_x: int = 0, delta_y: int = 0) -> ScrollAction
82
+ ```
83
+
84
+ **Parameters:**
85
+ - `delta_x` (int, optional): The horizontal scroll amount
86
+ - `delta_y` (int, optional): The vertical scroll amount
87
+
88
+ ### DragAction
89
+
90
+ Represents a drag-and-drop action.
91
+
92
+ ```python
93
+ DragAction(start: Point, end: Point, button: str = "left") -> DragAction
94
+ ```
95
+
96
+ **Parameters:**
97
+ - `start` (Point): The starting point
98
+ - `end` (Point): The ending point
99
+ - `button` (str, optional): The mouse button to use
100
+
101
+ ### Point
102
+
103
+ Represents a point on the screen.
104
+
105
+ ```python
106
+ Point(x: int, y: int) -> Point
107
+ ```
108
+
109
+ **Parameters:**
110
+ - `x` (int): The x-coordinate
111
+ - `y` (int): The y-coordinate
112
+
113
+ ## Claude Adapter
114
+
115
+ The `ClaudeAdapter` class is an adapter for Anthropic's Claude models.
116
+
117
+ ### Constructor
118
+
119
+ ```python
120
+ ClaudeAdapter() -> ClaudeAdapter
121
+ ```
122
+
123
+ Creates a new Claude adapter.
124
+
125
+ ### Methods
126
+
127
+ #### convert
128
+
129
+ ```python
130
+ convert(data: Any) -> Any
131
+ ```
132
+
133
+ Converts a Claude action to the CLA format.
134
+
135
+ **Parameters:**
136
+ - `data` (Any): The Claude action data
137
+
138
+ **Returns:**
139
+ - `Any`: The converted action in CLA format
140
+
141
+ ## Usage Example
142
+
143
+ ```python
144
+ from hud.adapters.common import Adapter
145
+ from hud.adapters.common.types import ClickAction, Point
146
+
147
+ # Create a custom adapter
148
+ class MyAdapter(Adapter):
149
+ def convert(self, data: Any) -> Any:
150
+ if isinstance(data, dict) and "type" in data:
151
+ if data["type"] == "click":
152
+ return ClickAction(
153
+ point=Point(x=data["x"], y=data["y"]),
154
+ button=data.get("button", "left")
155
+ )
156
+ return super().convert(data)
157
+
158
+ # Use the adapter
159
+ adapter = MyAdapter()
160
+ env = await run.make(adapter=adapter, metadata={"agent_id": "my-agent"})
161
+ ```