minitap-mobile-use 0.0.1.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of minitap-mobile-use might be problematic. Click here for more details.
- minitap/mobile_use/__init__.py +0 -0
- minitap/mobile_use/agents/contextor/contextor.py +42 -0
- minitap/mobile_use/agents/cortex/cortex.md +93 -0
- minitap/mobile_use/agents/cortex/cortex.py +107 -0
- minitap/mobile_use/agents/cortex/types.py +11 -0
- minitap/mobile_use/agents/executor/executor.md +73 -0
- minitap/mobile_use/agents/executor/executor.py +84 -0
- minitap/mobile_use/agents/executor/executor_context_cleaner.py +27 -0
- minitap/mobile_use/agents/executor/utils.py +11 -0
- minitap/mobile_use/agents/hopper/hopper.md +13 -0
- minitap/mobile_use/agents/hopper/hopper.py +45 -0
- minitap/mobile_use/agents/orchestrator/human.md +13 -0
- minitap/mobile_use/agents/orchestrator/orchestrator.md +18 -0
- minitap/mobile_use/agents/orchestrator/orchestrator.py +114 -0
- minitap/mobile_use/agents/orchestrator/types.py +14 -0
- minitap/mobile_use/agents/outputter/human.md +25 -0
- minitap/mobile_use/agents/outputter/outputter.py +75 -0
- minitap/mobile_use/agents/outputter/test_outputter.py +107 -0
- minitap/mobile_use/agents/planner/human.md +12 -0
- minitap/mobile_use/agents/planner/planner.md +64 -0
- minitap/mobile_use/agents/planner/planner.py +64 -0
- minitap/mobile_use/agents/planner/types.py +44 -0
- minitap/mobile_use/agents/planner/utils.py +45 -0
- minitap/mobile_use/agents/summarizer/summarizer.py +34 -0
- minitap/mobile_use/clients/device_hardware_client.py +23 -0
- minitap/mobile_use/clients/ios_client.py +44 -0
- minitap/mobile_use/clients/screen_api_client.py +53 -0
- minitap/mobile_use/config.py +285 -0
- minitap/mobile_use/constants.py +2 -0
- minitap/mobile_use/context.py +65 -0
- minitap/mobile_use/controllers/__init__.py +0 -0
- minitap/mobile_use/controllers/mobile_command_controller.py +379 -0
- minitap/mobile_use/controllers/platform_specific_commands_controller.py +74 -0
- minitap/mobile_use/graph/graph.py +149 -0
- minitap/mobile_use/graph/state.py +73 -0
- minitap/mobile_use/main.py +122 -0
- minitap/mobile_use/sdk/__init__.py +12 -0
- minitap/mobile_use/sdk/agent.py +524 -0
- minitap/mobile_use/sdk/builders/__init__.py +10 -0
- minitap/mobile_use/sdk/builders/agent_config_builder.py +213 -0
- minitap/mobile_use/sdk/builders/index.py +15 -0
- minitap/mobile_use/sdk/builders/task_request_builder.py +218 -0
- minitap/mobile_use/sdk/constants.py +14 -0
- minitap/mobile_use/sdk/examples/README.md +45 -0
- minitap/mobile_use/sdk/examples/__init__.py +1 -0
- minitap/mobile_use/sdk/examples/simple_photo_organizer.py +76 -0
- minitap/mobile_use/sdk/examples/smart_notification_assistant.py +177 -0
- minitap/mobile_use/sdk/types/__init__.py +49 -0
- minitap/mobile_use/sdk/types/agent.py +73 -0
- minitap/mobile_use/sdk/types/exceptions.py +74 -0
- minitap/mobile_use/sdk/types/task.py +191 -0
- minitap/mobile_use/sdk/utils.py +28 -0
- minitap/mobile_use/servers/config.py +19 -0
- minitap/mobile_use/servers/device_hardware_bridge.py +212 -0
- minitap/mobile_use/servers/device_screen_api.py +143 -0
- minitap/mobile_use/servers/start_servers.py +151 -0
- minitap/mobile_use/servers/stop_servers.py +215 -0
- minitap/mobile_use/servers/utils.py +11 -0
- minitap/mobile_use/services/accessibility.py +100 -0
- minitap/mobile_use/services/llm.py +143 -0
- minitap/mobile_use/tools/index.py +54 -0
- minitap/mobile_use/tools/mobile/back.py +52 -0
- minitap/mobile_use/tools/mobile/copy_text_from.py +77 -0
- minitap/mobile_use/tools/mobile/erase_text.py +124 -0
- minitap/mobile_use/tools/mobile/input_text.py +74 -0
- minitap/mobile_use/tools/mobile/launch_app.py +59 -0
- minitap/mobile_use/tools/mobile/list_packages.py +78 -0
- minitap/mobile_use/tools/mobile/long_press_on.py +62 -0
- minitap/mobile_use/tools/mobile/open_link.py +59 -0
- minitap/mobile_use/tools/mobile/paste_text.py +66 -0
- minitap/mobile_use/tools/mobile/press_key.py +58 -0
- minitap/mobile_use/tools/mobile/run_flow.py +57 -0
- minitap/mobile_use/tools/mobile/stop_app.py +58 -0
- minitap/mobile_use/tools/mobile/swipe.py +56 -0
- minitap/mobile_use/tools/mobile/take_screenshot.py +70 -0
- minitap/mobile_use/tools/mobile/tap.py +66 -0
- minitap/mobile_use/tools/mobile/wait_for_animation_to_end.py +68 -0
- minitap/mobile_use/tools/tool_wrapper.py +33 -0
- minitap/mobile_use/utils/cli_helpers.py +40 -0
- minitap/mobile_use/utils/cli_selection.py +144 -0
- minitap/mobile_use/utils/conversations.py +31 -0
- minitap/mobile_use/utils/decorators.py +123 -0
- minitap/mobile_use/utils/errors.py +6 -0
- minitap/mobile_use/utils/file.py +13 -0
- minitap/mobile_use/utils/logger.py +184 -0
- minitap/mobile_use/utils/media.py +73 -0
- minitap/mobile_use/utils/recorder.py +55 -0
- minitap/mobile_use/utils/requests_utils.py +37 -0
- minitap/mobile_use/utils/shell_utils.py +20 -0
- minitap/mobile_use/utils/time.py +6 -0
- minitap/mobile_use/utils/ui_hierarchy.py +30 -0
- minitap_mobile_use-0.0.1.dev0.dist-info/METADATA +274 -0
- minitap_mobile_use-0.0.1.dev0.dist-info/RECORD +95 -0
- minitap_mobile_use-0.0.1.dev0.dist-info/WHEEL +4 -0
- minitap_mobile_use-0.0.1.dev0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Annotated, Any, Literal, Optional, Union
|
|
5
|
+
|
|
6
|
+
from dotenv import load_dotenv
|
|
7
|
+
from pydantic import BaseModel, Field, SecretStr, ValidationError, model_validator
|
|
8
|
+
from pydantic_settings import BaseSettings
|
|
9
|
+
|
|
10
|
+
from minitap.mobile_use.utils.file import load_jsonc
|
|
11
|
+
from minitap.mobile_use.utils.logger import get_logger
|
|
12
|
+
|
|
13
|
+
### Environment Variables
|
|
14
|
+
|
|
15
|
+
load_dotenv(verbose=True)
|
|
16
|
+
logger = get_logger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Settings(BaseSettings):
|
|
20
|
+
OPENAI_API_KEY: Optional[SecretStr] = None
|
|
21
|
+
GOOGLE_API_KEY: Optional[SecretStr] = None
|
|
22
|
+
XAI_API_KEY: Optional[SecretStr] = None
|
|
23
|
+
OPEN_ROUTER_API_KEY: Optional[SecretStr] = None
|
|
24
|
+
|
|
25
|
+
DEVICE_SCREEN_API_BASE_URL: Optional[str] = None
|
|
26
|
+
DEVICE_HARDWARE_BRIDGE_BASE_URL: Optional[str] = None
|
|
27
|
+
ADB_HOST: Optional[str] = None
|
|
28
|
+
ADB_PORT: Optional[int] = None
|
|
29
|
+
|
|
30
|
+
model_config = {"env_file": ".env", "extra": "ignore"}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
settings = Settings()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def prepare_output_files() -> tuple[str | None, str | None]:
|
|
37
|
+
events_output_path = os.getenv("EVENTS_OUTPUT_PATH") or None
|
|
38
|
+
results_output_path = os.getenv("RESULTS_OUTPUT_PATH") or None
|
|
39
|
+
|
|
40
|
+
def validate_and_prepare_file(file_path: str) -> str | None:
|
|
41
|
+
if not file_path:
|
|
42
|
+
return None
|
|
43
|
+
|
|
44
|
+
path_obj = Path(file_path)
|
|
45
|
+
|
|
46
|
+
if path_obj.exists() and path_obj.is_dir():
|
|
47
|
+
logger.error(f"Error: Path '{file_path}' points to an existing directory, not a file.")
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
if not path_obj.suffix or file_path.endswith(("/", "\\")):
|
|
51
|
+
logger.error(f"Error: Path '{file_path}' appears to be a directory path, not a file.")
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
path_obj.parent.mkdir(parents=True, exist_ok=True)
|
|
56
|
+
path_obj.touch(exist_ok=True)
|
|
57
|
+
return file_path
|
|
58
|
+
except OSError as e:
|
|
59
|
+
logger.error(f"Error creating file '{file_path}': {e}")
|
|
60
|
+
return None
|
|
61
|
+
|
|
62
|
+
validated_events_path = (
|
|
63
|
+
validate_and_prepare_file(events_output_path) if events_output_path else None
|
|
64
|
+
)
|
|
65
|
+
validated_results_path = (
|
|
66
|
+
validate_and_prepare_file(results_output_path) if results_output_path else None
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
return validated_events_path, validated_results_path
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def record_events(output_path: Path | None, events: Union[list[str], BaseModel, Any]):
|
|
73
|
+
if not output_path:
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
if isinstance(events, str):
|
|
77
|
+
events_content = events
|
|
78
|
+
elif isinstance(events, BaseModel):
|
|
79
|
+
events_content = events.model_dump_json(indent=2)
|
|
80
|
+
else:
|
|
81
|
+
events_content = json.dumps(events, indent=2)
|
|
82
|
+
|
|
83
|
+
with open(output_path, "w") as f:
|
|
84
|
+
f.write(events_content)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
### LLM Configuration
|
|
88
|
+
|
|
89
|
+
LLMProvider = Literal["openai", "google", "openrouter", "xai"]
|
|
90
|
+
LLMUtilsNode = Literal["outputter", "hopper"]
|
|
91
|
+
AgentNode = Literal["planner", "orchestrator", "cortex", "executor"]
|
|
92
|
+
AgentNodeWithFallback = Literal["cortex"]
|
|
93
|
+
|
|
94
|
+
ROOT_DIR = Path(__file__).parent.parent.parent
|
|
95
|
+
DEFAULT_LLM_CONFIG_FILENAME = "llm-config.defaults.jsonc"
|
|
96
|
+
OVERRIDE_LLM_CONFIG_FILENAME = "llm-config.override.jsonc"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class LLM(BaseModel):
|
|
100
|
+
provider: LLMProvider
|
|
101
|
+
model: str
|
|
102
|
+
|
|
103
|
+
def validate_provider(self, name: str):
|
|
104
|
+
match self.provider:
|
|
105
|
+
case "openai":
|
|
106
|
+
if not settings.OPENAI_API_KEY:
|
|
107
|
+
raise Exception(f"{name} requires OPENAI_API_KEY in .env")
|
|
108
|
+
case "google":
|
|
109
|
+
if not settings.GOOGLE_API_KEY:
|
|
110
|
+
raise Exception(f"{name} requires GOOGLE_API_KEY in .env")
|
|
111
|
+
case "openrouter":
|
|
112
|
+
if not settings.OPEN_ROUTER_API_KEY:
|
|
113
|
+
raise Exception(f"{name} requires OPEN_ROUTER_API_KEY in .env")
|
|
114
|
+
case "xai":
|
|
115
|
+
if not settings.XAI_API_KEY:
|
|
116
|
+
raise Exception(f"{name} requires XAI_API_KEY in .env")
|
|
117
|
+
|
|
118
|
+
def __str__(self):
|
|
119
|
+
return f"{self.provider}/{self.model}"
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class LLMWithFallback(LLM):
|
|
123
|
+
fallback: LLM
|
|
124
|
+
|
|
125
|
+
def __str__(self):
|
|
126
|
+
return f"{self.provider}/{self.model} (fallback: {self.fallback})"
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class LLMConfigUtils(BaseModel):
|
|
130
|
+
outputter: LLM
|
|
131
|
+
hopper: LLM
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class LLMConfig(BaseModel):
|
|
135
|
+
planner: LLM
|
|
136
|
+
orchestrator: LLM
|
|
137
|
+
cortex: LLMWithFallback
|
|
138
|
+
executor: LLM
|
|
139
|
+
utils: LLMConfigUtils
|
|
140
|
+
|
|
141
|
+
def validate_providers(self):
|
|
142
|
+
self.planner.validate_provider("Planner")
|
|
143
|
+
self.orchestrator.validate_provider("Orchestrator")
|
|
144
|
+
self.cortex.validate_provider("Cortex")
|
|
145
|
+
self.executor.validate_provider("Executor")
|
|
146
|
+
self.utils.outputter.validate_provider("Outputter")
|
|
147
|
+
self.utils.hopper.validate_provider("Hopper")
|
|
148
|
+
|
|
149
|
+
def __str__(self):
|
|
150
|
+
return f"""
|
|
151
|
+
📃 Planner: {self.planner}
|
|
152
|
+
🎯 Orchestrator: {self.orchestrator}
|
|
153
|
+
🧠 Cortex: {self.cortex}
|
|
154
|
+
🛠️ Executor: {self.executor}
|
|
155
|
+
🧩 Utils:
|
|
156
|
+
🔽 Hopper: {self.utils.hopper}
|
|
157
|
+
📝 Outputter: {self.utils.outputter}
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
def get_agent(self, item: AgentNode) -> LLM:
|
|
161
|
+
return getattr(self, item)
|
|
162
|
+
|
|
163
|
+
def get_utils(self, item: LLMUtilsNode) -> LLM:
|
|
164
|
+
return getattr(self.utils, item)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def get_default_llm_config() -> LLMConfig:
|
|
168
|
+
try:
|
|
169
|
+
if not os.path.exists(ROOT_DIR / DEFAULT_LLM_CONFIG_FILENAME):
|
|
170
|
+
raise Exception("Default llm config not found")
|
|
171
|
+
with open(ROOT_DIR / DEFAULT_LLM_CONFIG_FILENAME, "r") as f:
|
|
172
|
+
default_config_dict = load_jsonc(f)
|
|
173
|
+
return LLMConfig.model_validate(default_config_dict["default"])
|
|
174
|
+
except Exception as e:
|
|
175
|
+
logger.error(f"Failed to load default llm config: {e}. Falling back to hardcoded config")
|
|
176
|
+
return LLMConfig(
|
|
177
|
+
planner=LLM(provider="openai", model="gpt-4.1"),
|
|
178
|
+
orchestrator=LLM(provider="openai", model="gpt-4.1"),
|
|
179
|
+
cortex=LLMWithFallback(
|
|
180
|
+
provider="openai",
|
|
181
|
+
model="o3",
|
|
182
|
+
fallback=LLM(provider="openai", model="gpt-5"),
|
|
183
|
+
),
|
|
184
|
+
executor=LLM(provider="openai", model="gpt-4.1"),
|
|
185
|
+
utils=LLMConfigUtils(
|
|
186
|
+
outputter=LLM(provider="openai", model="gpt-5-nano"),
|
|
187
|
+
hopper=LLM(provider="openai", model="gpt-4.1"),
|
|
188
|
+
),
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def deep_merge_llm_config(default: LLMConfig, override: dict) -> LLMConfig:
|
|
193
|
+
def _deep_merge_dict(base: dict, extra: dict):
|
|
194
|
+
for key, value in extra.items():
|
|
195
|
+
if isinstance(value, dict):
|
|
196
|
+
_deep_merge_dict(base[key], value)
|
|
197
|
+
else:
|
|
198
|
+
base[key] = value
|
|
199
|
+
|
|
200
|
+
merged_dict = default.model_dump()
|
|
201
|
+
_deep_merge_dict(merged_dict, override)
|
|
202
|
+
return LLMConfig.model_validate(merged_dict)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def parse_llm_config() -> LLMConfig:
|
|
206
|
+
if not os.path.exists(ROOT_DIR / DEFAULT_LLM_CONFIG_FILENAME):
|
|
207
|
+
return get_default_llm_config()
|
|
208
|
+
|
|
209
|
+
override_config_dict = {}
|
|
210
|
+
if os.path.exists(ROOT_DIR / OVERRIDE_LLM_CONFIG_FILENAME):
|
|
211
|
+
logger.info("Loading custom llm config...")
|
|
212
|
+
with open(ROOT_DIR / OVERRIDE_LLM_CONFIG_FILENAME, "r") as f:
|
|
213
|
+
override_config_dict = load_jsonc(f)
|
|
214
|
+
else:
|
|
215
|
+
logger.warning("Custom llm config not found, loading default config")
|
|
216
|
+
|
|
217
|
+
try:
|
|
218
|
+
default_config = get_default_llm_config()
|
|
219
|
+
return deep_merge_llm_config(default_config, override_config_dict)
|
|
220
|
+
|
|
221
|
+
except ValidationError as e:
|
|
222
|
+
logger.error(f"Invalid llm config: {e}. Falling back to default config")
|
|
223
|
+
return get_default_llm_config()
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def initialize_llm_config() -> LLMConfig:
|
|
227
|
+
llm_config = parse_llm_config()
|
|
228
|
+
llm_config.validate_providers()
|
|
229
|
+
logger.success("LLM config initialized")
|
|
230
|
+
return llm_config
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
### Output config
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
class OutputConfig(BaseModel):
|
|
237
|
+
structured_output: Annotated[
|
|
238
|
+
Optional[Union[type[BaseModel], dict]],
|
|
239
|
+
Field(
|
|
240
|
+
default=None,
|
|
241
|
+
description=(
|
|
242
|
+
"Optional structured schema (as a BaseModel or dict) to shape the output. "
|
|
243
|
+
"If provided, it takes precedence over 'output_description'."
|
|
244
|
+
),
|
|
245
|
+
),
|
|
246
|
+
]
|
|
247
|
+
output_description: Annotated[
|
|
248
|
+
Optional[str],
|
|
249
|
+
Field(
|
|
250
|
+
default=None,
|
|
251
|
+
description=(
|
|
252
|
+
"Optional natural language description of the expected output format. "
|
|
253
|
+
"Used only if 'structured_output' is not provided. "
|
|
254
|
+
"Example: 'Output a JSON with 3 keys: color, price, websiteUrl'."
|
|
255
|
+
),
|
|
256
|
+
),
|
|
257
|
+
]
|
|
258
|
+
|
|
259
|
+
def __str__(self):
|
|
260
|
+
s_builder = ""
|
|
261
|
+
if self.structured_output:
|
|
262
|
+
s_builder += f"Structured Output: {self.structured_output}\n"
|
|
263
|
+
if self.output_description:
|
|
264
|
+
s_builder += f"Output Description: {self.output_description}\n"
|
|
265
|
+
if self.output_description and self.structured_output:
|
|
266
|
+
s_builder += (
|
|
267
|
+
"Both 'structured_output' and 'output_description' are provided. "
|
|
268
|
+
"'structured_output' will take precedence.\n"
|
|
269
|
+
)
|
|
270
|
+
return s_builder
|
|
271
|
+
|
|
272
|
+
@model_validator(mode="after")
|
|
273
|
+
def warn_if_both_outputs_provided(self):
|
|
274
|
+
if self.structured_output and self.output_description:
|
|
275
|
+
import warnings
|
|
276
|
+
|
|
277
|
+
warnings.warn(
|
|
278
|
+
"Both 'structured_output' and 'output_description' are provided. "
|
|
279
|
+
"'structured_output' will take precedence.",
|
|
280
|
+
stacklevel=2,
|
|
281
|
+
)
|
|
282
|
+
return self
|
|
283
|
+
|
|
284
|
+
def needs_structured_format(self):
|
|
285
|
+
return self.structured_output or self.output_description
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context variables for global state management.
|
|
3
|
+
|
|
4
|
+
Uses ContextVar to avoid prop drilling and maintain clean function signatures.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from adbutils import AdbClient
|
|
12
|
+
from openai import BaseModel
|
|
13
|
+
from pydantic import ConfigDict
|
|
14
|
+
from typing_extensions import Literal
|
|
15
|
+
|
|
16
|
+
from minitap.mobile_use.clients.device_hardware_client import DeviceHardwareClient
|
|
17
|
+
from minitap.mobile_use.clients.screen_api_client import ScreenApiClient
|
|
18
|
+
from minitap.mobile_use.config import LLMConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class DevicePlatform(str, Enum):
|
|
22
|
+
"""Mobile device platform enumeration."""
|
|
23
|
+
|
|
24
|
+
ANDROID = "android"
|
|
25
|
+
IOS = "ios"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class DeviceContext(BaseModel):
|
|
29
|
+
host_platform: Literal["WINDOWS", "LINUX"]
|
|
30
|
+
mobile_platform: DevicePlatform
|
|
31
|
+
device_id: str
|
|
32
|
+
device_width: int
|
|
33
|
+
device_height: int
|
|
34
|
+
|
|
35
|
+
def to_str(self):
|
|
36
|
+
return (
|
|
37
|
+
f"Host platform: {self.host_platform}\n"
|
|
38
|
+
f"Mobile platform: {self.mobile_platform.value}\n"
|
|
39
|
+
f"Device ID: {self.device_id}\n"
|
|
40
|
+
f"Device width: {self.device_width}\n"
|
|
41
|
+
f"Device height: {self.device_height}\n"
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ExecutionSetup(BaseModel):
|
|
46
|
+
"""Execution setup for a task."""
|
|
47
|
+
|
|
48
|
+
traces_path: Path
|
|
49
|
+
trace_id: str
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class MobileUseContext(BaseModel):
|
|
53
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
54
|
+
|
|
55
|
+
device: DeviceContext
|
|
56
|
+
hw_bridge_client: DeviceHardwareClient
|
|
57
|
+
screen_api_client: ScreenApiClient
|
|
58
|
+
llm_config: LLMConfig
|
|
59
|
+
adb_client: Optional[AdbClient] = None
|
|
60
|
+
execution_setup: Optional[ExecutionSetup] = None
|
|
61
|
+
|
|
62
|
+
def get_adb_client(self) -> AdbClient:
|
|
63
|
+
if self.adb_client is None:
|
|
64
|
+
raise ValueError("No ADB client in context.")
|
|
65
|
+
return self.adb_client # type: ignore
|
|
File without changes
|