optexity-browser-use 0.9.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- browser_use/__init__.py +157 -0
- browser_use/actor/__init__.py +11 -0
- browser_use/actor/element.py +1175 -0
- browser_use/actor/mouse.py +134 -0
- browser_use/actor/page.py +561 -0
- browser_use/actor/playground/flights.py +41 -0
- browser_use/actor/playground/mixed_automation.py +54 -0
- browser_use/actor/playground/playground.py +236 -0
- browser_use/actor/utils.py +176 -0
- browser_use/agent/cloud_events.py +282 -0
- browser_use/agent/gif.py +424 -0
- browser_use/agent/judge.py +170 -0
- browser_use/agent/message_manager/service.py +473 -0
- browser_use/agent/message_manager/utils.py +52 -0
- browser_use/agent/message_manager/views.py +98 -0
- browser_use/agent/prompts.py +413 -0
- browser_use/agent/service.py +2316 -0
- browser_use/agent/system_prompt.md +185 -0
- browser_use/agent/system_prompt_flash.md +10 -0
- browser_use/agent/system_prompt_no_thinking.md +183 -0
- browser_use/agent/views.py +743 -0
- browser_use/browser/__init__.py +41 -0
- browser_use/browser/cloud/cloud.py +203 -0
- browser_use/browser/cloud/views.py +89 -0
- browser_use/browser/events.py +578 -0
- browser_use/browser/profile.py +1158 -0
- browser_use/browser/python_highlights.py +548 -0
- browser_use/browser/session.py +3225 -0
- browser_use/browser/session_manager.py +399 -0
- browser_use/browser/video_recorder.py +162 -0
- browser_use/browser/views.py +200 -0
- browser_use/browser/watchdog_base.py +260 -0
- browser_use/browser/watchdogs/__init__.py +0 -0
- browser_use/browser/watchdogs/aboutblank_watchdog.py +253 -0
- browser_use/browser/watchdogs/crash_watchdog.py +335 -0
- browser_use/browser/watchdogs/default_action_watchdog.py +2729 -0
- browser_use/browser/watchdogs/dom_watchdog.py +817 -0
- browser_use/browser/watchdogs/downloads_watchdog.py +1277 -0
- browser_use/browser/watchdogs/local_browser_watchdog.py +461 -0
- browser_use/browser/watchdogs/permissions_watchdog.py +43 -0
- browser_use/browser/watchdogs/popups_watchdog.py +143 -0
- browser_use/browser/watchdogs/recording_watchdog.py +126 -0
- browser_use/browser/watchdogs/screenshot_watchdog.py +62 -0
- browser_use/browser/watchdogs/security_watchdog.py +280 -0
- browser_use/browser/watchdogs/storage_state_watchdog.py +335 -0
- browser_use/cli.py +2359 -0
- browser_use/code_use/__init__.py +16 -0
- browser_use/code_use/formatting.py +192 -0
- browser_use/code_use/namespace.py +665 -0
- browser_use/code_use/notebook_export.py +276 -0
- browser_use/code_use/service.py +1340 -0
- browser_use/code_use/system_prompt.md +574 -0
- browser_use/code_use/utils.py +150 -0
- browser_use/code_use/views.py +171 -0
- browser_use/config.py +505 -0
- browser_use/controller/__init__.py +3 -0
- browser_use/dom/enhanced_snapshot.py +161 -0
- browser_use/dom/markdown_extractor.py +169 -0
- browser_use/dom/playground/extraction.py +312 -0
- browser_use/dom/playground/multi_act.py +32 -0
- browser_use/dom/serializer/clickable_elements.py +200 -0
- browser_use/dom/serializer/code_use_serializer.py +287 -0
- browser_use/dom/serializer/eval_serializer.py +478 -0
- browser_use/dom/serializer/html_serializer.py +212 -0
- browser_use/dom/serializer/paint_order.py +197 -0
- browser_use/dom/serializer/serializer.py +1170 -0
- browser_use/dom/service.py +825 -0
- browser_use/dom/utils.py +129 -0
- browser_use/dom/views.py +906 -0
- browser_use/exceptions.py +5 -0
- browser_use/filesystem/__init__.py +0 -0
- browser_use/filesystem/file_system.py +619 -0
- browser_use/init_cmd.py +376 -0
- browser_use/integrations/gmail/__init__.py +24 -0
- browser_use/integrations/gmail/actions.py +115 -0
- browser_use/integrations/gmail/service.py +225 -0
- browser_use/llm/__init__.py +155 -0
- browser_use/llm/anthropic/chat.py +242 -0
- browser_use/llm/anthropic/serializer.py +312 -0
- browser_use/llm/aws/__init__.py +36 -0
- browser_use/llm/aws/chat_anthropic.py +242 -0
- browser_use/llm/aws/chat_bedrock.py +289 -0
- browser_use/llm/aws/serializer.py +257 -0
- browser_use/llm/azure/chat.py +91 -0
- browser_use/llm/base.py +57 -0
- browser_use/llm/browser_use/__init__.py +3 -0
- browser_use/llm/browser_use/chat.py +201 -0
- browser_use/llm/cerebras/chat.py +193 -0
- browser_use/llm/cerebras/serializer.py +109 -0
- browser_use/llm/deepseek/chat.py +212 -0
- browser_use/llm/deepseek/serializer.py +109 -0
- browser_use/llm/exceptions.py +29 -0
- browser_use/llm/google/__init__.py +3 -0
- browser_use/llm/google/chat.py +542 -0
- browser_use/llm/google/serializer.py +120 -0
- browser_use/llm/groq/chat.py +229 -0
- browser_use/llm/groq/parser.py +158 -0
- browser_use/llm/groq/serializer.py +159 -0
- browser_use/llm/messages.py +238 -0
- browser_use/llm/models.py +271 -0
- browser_use/llm/oci_raw/__init__.py +10 -0
- browser_use/llm/oci_raw/chat.py +443 -0
- browser_use/llm/oci_raw/serializer.py +229 -0
- browser_use/llm/ollama/chat.py +97 -0
- browser_use/llm/ollama/serializer.py +143 -0
- browser_use/llm/openai/chat.py +264 -0
- browser_use/llm/openai/like.py +15 -0
- browser_use/llm/openai/serializer.py +165 -0
- browser_use/llm/openrouter/chat.py +211 -0
- browser_use/llm/openrouter/serializer.py +26 -0
- browser_use/llm/schema.py +176 -0
- browser_use/llm/views.py +48 -0
- browser_use/logging_config.py +330 -0
- browser_use/mcp/__init__.py +18 -0
- browser_use/mcp/__main__.py +12 -0
- browser_use/mcp/client.py +544 -0
- browser_use/mcp/controller.py +264 -0
- browser_use/mcp/server.py +1114 -0
- browser_use/observability.py +204 -0
- browser_use/py.typed +0 -0
- browser_use/sandbox/__init__.py +41 -0
- browser_use/sandbox/sandbox.py +637 -0
- browser_use/sandbox/views.py +132 -0
- browser_use/screenshots/__init__.py +1 -0
- browser_use/screenshots/service.py +52 -0
- browser_use/sync/__init__.py +6 -0
- browser_use/sync/auth.py +357 -0
- browser_use/sync/service.py +161 -0
- browser_use/telemetry/__init__.py +51 -0
- browser_use/telemetry/service.py +112 -0
- browser_use/telemetry/views.py +101 -0
- browser_use/tokens/__init__.py +0 -0
- browser_use/tokens/custom_pricing.py +24 -0
- browser_use/tokens/mappings.py +4 -0
- browser_use/tokens/service.py +580 -0
- browser_use/tokens/views.py +108 -0
- browser_use/tools/registry/service.py +572 -0
- browser_use/tools/registry/views.py +174 -0
- browser_use/tools/service.py +1675 -0
- browser_use/tools/utils.py +82 -0
- browser_use/tools/views.py +100 -0
- browser_use/utils.py +670 -0
- optexity_browser_use-0.9.5.dist-info/METADATA +344 -0
- optexity_browser_use-0.9.5.dist-info/RECORD +147 -0
- optexity_browser_use-0.9.5.dist-info/WHEEL +4 -0
- optexity_browser_use-0.9.5.dist-info/entry_points.txt +3 -0
- optexity_browser_use-0.9.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,743 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import traceback
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Generic, Literal
|
|
9
|
+
|
|
10
|
+
from openai import RateLimitError
|
|
11
|
+
from pydantic import BaseModel, ConfigDict, Field, ValidationError, create_model, model_validator
|
|
12
|
+
from typing_extensions import TypeVar
|
|
13
|
+
from uuid_extensions import uuid7str
|
|
14
|
+
|
|
15
|
+
from browser_use.agent.message_manager.views import MessageManagerState
|
|
16
|
+
from browser_use.browser.views import BrowserStateHistory
|
|
17
|
+
from browser_use.dom.views import DEFAULT_INCLUDE_ATTRIBUTES, DOMInteractedElement, DOMSelectorMap
|
|
18
|
+
|
|
19
|
+
# from browser_use.dom.history_tree_processor.service import (
|
|
20
|
+
# DOMElementNode,
|
|
21
|
+
# DOMHistoryElement,
|
|
22
|
+
# HistoryTreeProcessor,
|
|
23
|
+
# )
|
|
24
|
+
# from browser_use.dom.views import SelectorMap
|
|
25
|
+
from browser_use.filesystem.file_system import FileSystemState
|
|
26
|
+
from browser_use.llm.base import BaseChatModel
|
|
27
|
+
from browser_use.tokens.views import UsageSummary
|
|
28
|
+
from browser_use.tools.registry.views import ActionModel
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class AgentSettings(BaseModel):
|
|
34
|
+
"""Configuration options for the Agent"""
|
|
35
|
+
|
|
36
|
+
use_vision: bool | Literal['auto'] = 'auto'
|
|
37
|
+
vision_detail_level: Literal['auto', 'low', 'high'] = 'auto'
|
|
38
|
+
save_conversation_path: str | Path | None = None
|
|
39
|
+
save_conversation_path_encoding: str | None = 'utf-8'
|
|
40
|
+
max_failures: int = 3
|
|
41
|
+
generate_gif: bool | str = False
|
|
42
|
+
override_system_message: str | None = None
|
|
43
|
+
extend_system_message: str | None = None
|
|
44
|
+
include_attributes: list[str] | None = DEFAULT_INCLUDE_ATTRIBUTES
|
|
45
|
+
max_actions_per_step: int = 4
|
|
46
|
+
use_thinking: bool = True
|
|
47
|
+
flash_mode: bool = False # If enabled, disables evaluation_previous_goal and next_goal, and sets use_thinking = False
|
|
48
|
+
use_judge: bool = True
|
|
49
|
+
max_history_items: int | None = None
|
|
50
|
+
|
|
51
|
+
page_extraction_llm: BaseChatModel | None = None
|
|
52
|
+
calculate_cost: bool = False
|
|
53
|
+
include_tool_call_examples: bool = False
|
|
54
|
+
llm_timeout: int = 60 # Timeout in seconds for LLM calls (auto-detected: 30s for gemini, 90s for o3, 60s default)
|
|
55
|
+
step_timeout: int = 180 # Timeout in seconds for each step
|
|
56
|
+
final_response_after_failure: bool = True # If True, attempt one final recovery call after max_failures
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class AgentState(BaseModel):
|
|
60
|
+
"""Holds all state information for an Agent"""
|
|
61
|
+
|
|
62
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
63
|
+
|
|
64
|
+
agent_id: str = Field(default_factory=uuid7str)
|
|
65
|
+
n_steps: int = 1
|
|
66
|
+
consecutive_failures: int = 0
|
|
67
|
+
last_result: list[ActionResult] | None = None
|
|
68
|
+
last_plan: str | None = None
|
|
69
|
+
last_model_output: AgentOutput | None = None
|
|
70
|
+
|
|
71
|
+
# Pause/resume state (kept serialisable for checkpointing)
|
|
72
|
+
paused: bool = False
|
|
73
|
+
stopped: bool = False
|
|
74
|
+
session_initialized: bool = False # Track if session events have been dispatched
|
|
75
|
+
follow_up_task: bool = False # Track if the agent is a follow-up task
|
|
76
|
+
|
|
77
|
+
message_manager_state: MessageManagerState = Field(default_factory=MessageManagerState)
|
|
78
|
+
file_system_state: FileSystemState | None = None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
@dataclass
|
|
82
|
+
class AgentStepInfo:
|
|
83
|
+
step_number: int
|
|
84
|
+
max_steps: int
|
|
85
|
+
|
|
86
|
+
def is_last_step(self) -> bool:
|
|
87
|
+
"""Check if this is the last step"""
|
|
88
|
+
return self.step_number >= self.max_steps - 1
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class JudgementResult(BaseModel):
|
|
92
|
+
"""LLM judgement of agent trace"""
|
|
93
|
+
|
|
94
|
+
reasoning: str | None = Field(default=None, description='Explanation of the judgement')
|
|
95
|
+
verdict: bool = Field(description='Whether the trace was successful or not')
|
|
96
|
+
failure_reason: str | None = Field(default=None, description='If the trace was not successful, the reason why')
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class ActionResult(BaseModel):
|
|
100
|
+
"""Result of executing an action"""
|
|
101
|
+
|
|
102
|
+
# For done action
|
|
103
|
+
is_done: bool | None = False
|
|
104
|
+
success: bool | None = None
|
|
105
|
+
|
|
106
|
+
# For trace judgement
|
|
107
|
+
judgement: JudgementResult | None = None
|
|
108
|
+
|
|
109
|
+
# Error handling - always include in long term memory
|
|
110
|
+
error: str | None = None
|
|
111
|
+
|
|
112
|
+
# Files
|
|
113
|
+
attachments: list[str] | None = None # Files to display in the done message
|
|
114
|
+
|
|
115
|
+
# Images (base64 encoded) - separate from text content for efficient handling
|
|
116
|
+
images: list[dict[str, Any]] | None = None # [{"name": "file.jpg", "data": "base64_string"}]
|
|
117
|
+
|
|
118
|
+
# Always include in long term memory
|
|
119
|
+
long_term_memory: str | None = None # Memory of this action
|
|
120
|
+
|
|
121
|
+
# if update_only_read_state is True we add the extracted_content to the agent context only once for the next step
|
|
122
|
+
# if update_only_read_state is False we add the extracted_content to the agent long term memory if no long_term_memory is provided
|
|
123
|
+
extracted_content: str | None = None
|
|
124
|
+
include_extracted_content_only_once: bool = False # Whether the extracted content should be used to update the read_state
|
|
125
|
+
|
|
126
|
+
# Metadata for observability (e.g., click coordinates)
|
|
127
|
+
metadata: dict | None = None
|
|
128
|
+
|
|
129
|
+
# Deprecated
|
|
130
|
+
include_in_memory: bool = False # whether to include in extracted_content inside long_term_memory
|
|
131
|
+
|
|
132
|
+
@model_validator(mode='after')
|
|
133
|
+
def validate_success_requires_done(self):
|
|
134
|
+
"""Ensure success=True can only be set when is_done=True"""
|
|
135
|
+
if self.success is True and self.is_done is not True:
|
|
136
|
+
raise ValueError(
|
|
137
|
+
'success=True can only be set when is_done=True. '
|
|
138
|
+
'For regular actions that succeed, leave success as None. '
|
|
139
|
+
'Use success=False only for actions that fail.'
|
|
140
|
+
)
|
|
141
|
+
return self
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class StepMetadata(BaseModel):
|
|
145
|
+
"""Metadata for a single step including timing and token information"""
|
|
146
|
+
|
|
147
|
+
step_start_time: float
|
|
148
|
+
step_end_time: float
|
|
149
|
+
step_number: int
|
|
150
|
+
|
|
151
|
+
@property
|
|
152
|
+
def duration_seconds(self) -> float:
|
|
153
|
+
"""Calculate step duration in seconds"""
|
|
154
|
+
return self.step_end_time - self.step_start_time
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class AgentBrain(BaseModel):
|
|
158
|
+
thinking: str | None = None
|
|
159
|
+
evaluation_previous_goal: str
|
|
160
|
+
memory: str
|
|
161
|
+
next_goal: str
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class AgentOutput(BaseModel):
|
|
165
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, extra='forbid')
|
|
166
|
+
|
|
167
|
+
thinking: str | None = None
|
|
168
|
+
evaluation_previous_goal: str | None = None
|
|
169
|
+
memory: str | None = None
|
|
170
|
+
next_goal: str | None = None
|
|
171
|
+
action: list[ActionModel] = Field(
|
|
172
|
+
...,
|
|
173
|
+
json_schema_extra={'min_items': 1}, # Ensure at least one action is provided
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
@classmethod
|
|
177
|
+
def model_json_schema(cls, **kwargs):
|
|
178
|
+
schema = super().model_json_schema(**kwargs)
|
|
179
|
+
schema['required'] = ['evaluation_previous_goal', 'memory', 'next_goal', 'action']
|
|
180
|
+
return schema
|
|
181
|
+
|
|
182
|
+
@property
|
|
183
|
+
def current_state(self) -> AgentBrain:
|
|
184
|
+
"""For backward compatibility - returns an AgentBrain with the flattened properties"""
|
|
185
|
+
return AgentBrain(
|
|
186
|
+
thinking=self.thinking,
|
|
187
|
+
evaluation_previous_goal=self.evaluation_previous_goal if self.evaluation_previous_goal else '',
|
|
188
|
+
memory=self.memory if self.memory else '',
|
|
189
|
+
next_goal=self.next_goal if self.next_goal else '',
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
@staticmethod
|
|
193
|
+
def type_with_custom_actions(custom_actions: type[ActionModel]) -> type[AgentOutput]:
|
|
194
|
+
"""Extend actions with custom actions"""
|
|
195
|
+
|
|
196
|
+
model_ = create_model(
|
|
197
|
+
'AgentOutput',
|
|
198
|
+
__base__=AgentOutput,
|
|
199
|
+
action=(
|
|
200
|
+
list[custom_actions], # type: ignore
|
|
201
|
+
Field(..., description='List of actions to execute', json_schema_extra={'min_items': 1}),
|
|
202
|
+
),
|
|
203
|
+
__module__=AgentOutput.__module__,
|
|
204
|
+
)
|
|
205
|
+
return model_
|
|
206
|
+
|
|
207
|
+
@staticmethod
|
|
208
|
+
def type_with_custom_actions_no_thinking(custom_actions: type[ActionModel]) -> type[AgentOutput]:
|
|
209
|
+
"""Extend actions with custom actions and exclude thinking field"""
|
|
210
|
+
|
|
211
|
+
class AgentOutputNoThinking(AgentOutput):
|
|
212
|
+
@classmethod
|
|
213
|
+
def model_json_schema(cls, **kwargs):
|
|
214
|
+
schema = super().model_json_schema(**kwargs)
|
|
215
|
+
del schema['properties']['thinking']
|
|
216
|
+
schema['required'] = ['evaluation_previous_goal', 'memory', 'next_goal', 'action']
|
|
217
|
+
return schema
|
|
218
|
+
|
|
219
|
+
model = create_model(
|
|
220
|
+
'AgentOutput',
|
|
221
|
+
__base__=AgentOutputNoThinking,
|
|
222
|
+
action=(
|
|
223
|
+
list[custom_actions], # type: ignore
|
|
224
|
+
Field(..., json_schema_extra={'min_items': 1}),
|
|
225
|
+
),
|
|
226
|
+
__module__=AgentOutputNoThinking.__module__,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
return model
|
|
230
|
+
|
|
231
|
+
@staticmethod
|
|
232
|
+
def type_with_custom_actions_flash_mode(custom_actions: type[ActionModel]) -> type[AgentOutput]:
|
|
233
|
+
"""Extend actions with custom actions for flash mode - memory and action fields only"""
|
|
234
|
+
|
|
235
|
+
class AgentOutputFlashMode(AgentOutput):
|
|
236
|
+
@classmethod
|
|
237
|
+
def model_json_schema(cls, **kwargs):
|
|
238
|
+
schema = super().model_json_schema(**kwargs)
|
|
239
|
+
# Remove thinking, evaluation_previous_goal, and next_goal fields
|
|
240
|
+
del schema['properties']['thinking']
|
|
241
|
+
del schema['properties']['evaluation_previous_goal']
|
|
242
|
+
del schema['properties']['next_goal']
|
|
243
|
+
# Update required fields to only include remaining properties
|
|
244
|
+
schema['required'] = ['memory', 'action']
|
|
245
|
+
return schema
|
|
246
|
+
|
|
247
|
+
model = create_model(
|
|
248
|
+
'AgentOutput',
|
|
249
|
+
__base__=AgentOutputFlashMode,
|
|
250
|
+
action=(
|
|
251
|
+
list[custom_actions], # type: ignore
|
|
252
|
+
Field(..., json_schema_extra={'min_items': 1}),
|
|
253
|
+
),
|
|
254
|
+
__module__=AgentOutputFlashMode.__module__,
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return model
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
class AgentHistory(BaseModel):
|
|
261
|
+
"""History item for agent actions"""
|
|
262
|
+
|
|
263
|
+
model_output: AgentOutput | None
|
|
264
|
+
result: list[ActionResult]
|
|
265
|
+
state: BrowserStateHistory
|
|
266
|
+
metadata: StepMetadata | None = None
|
|
267
|
+
state_message: str | None = None
|
|
268
|
+
|
|
269
|
+
model_config = ConfigDict(arbitrary_types_allowed=True, protected_namespaces=())
|
|
270
|
+
|
|
271
|
+
@staticmethod
|
|
272
|
+
def get_interacted_element(model_output: AgentOutput, selector_map: DOMSelectorMap) -> list[DOMInteractedElement | None]:
|
|
273
|
+
elements = []
|
|
274
|
+
for action in model_output.action:
|
|
275
|
+
index = action.get_index()
|
|
276
|
+
if index is not None and index in selector_map:
|
|
277
|
+
el = selector_map[index]
|
|
278
|
+
elements.append(DOMInteractedElement.load_from_enhanced_dom_tree(el))
|
|
279
|
+
else:
|
|
280
|
+
elements.append(None)
|
|
281
|
+
return elements
|
|
282
|
+
|
|
283
|
+
def _filter_sensitive_data_from_string(self, value: str, sensitive_data: dict[str, str | dict[str, str]] | None) -> str:
|
|
284
|
+
"""Filter out sensitive data from a string value"""
|
|
285
|
+
if not sensitive_data:
|
|
286
|
+
return value
|
|
287
|
+
|
|
288
|
+
# Collect all sensitive values, immediately converting old format to new format
|
|
289
|
+
sensitive_values: dict[str, str] = {}
|
|
290
|
+
|
|
291
|
+
# Process all sensitive data entries
|
|
292
|
+
for key_or_domain, content in sensitive_data.items():
|
|
293
|
+
if isinstance(content, dict):
|
|
294
|
+
# Already in new format: {domain: {key: value}}
|
|
295
|
+
for key, val in content.items():
|
|
296
|
+
if val: # Skip empty values
|
|
297
|
+
sensitive_values[key] = val
|
|
298
|
+
elif content: # Old format: {key: value} - convert to new format internally
|
|
299
|
+
# We treat this as if it was {'http*://*': {key_or_domain: content}}
|
|
300
|
+
sensitive_values[key_or_domain] = content
|
|
301
|
+
|
|
302
|
+
# If there are no valid sensitive data entries, just return the original value
|
|
303
|
+
if not sensitive_values:
|
|
304
|
+
return value
|
|
305
|
+
|
|
306
|
+
# Replace all valid sensitive data values with their placeholder tags
|
|
307
|
+
for key, val in sensitive_values.items():
|
|
308
|
+
value = value.replace(val, f'<secret>{key}</secret>')
|
|
309
|
+
|
|
310
|
+
return value
|
|
311
|
+
|
|
312
|
+
def _filter_sensitive_data_from_dict(
|
|
313
|
+
self, data: dict[str, Any], sensitive_data: dict[str, str | dict[str, str]] | None
|
|
314
|
+
) -> dict[str, Any]:
|
|
315
|
+
"""Recursively filter sensitive data from a dictionary"""
|
|
316
|
+
if not sensitive_data:
|
|
317
|
+
return data
|
|
318
|
+
|
|
319
|
+
filtered_data = {}
|
|
320
|
+
for key, value in data.items():
|
|
321
|
+
if isinstance(value, str):
|
|
322
|
+
filtered_data[key] = self._filter_sensitive_data_from_string(value, sensitive_data)
|
|
323
|
+
elif isinstance(value, dict):
|
|
324
|
+
filtered_data[key] = self._filter_sensitive_data_from_dict(value, sensitive_data)
|
|
325
|
+
elif isinstance(value, list):
|
|
326
|
+
filtered_data[key] = [
|
|
327
|
+
self._filter_sensitive_data_from_string(item, sensitive_data)
|
|
328
|
+
if isinstance(item, str)
|
|
329
|
+
else self._filter_sensitive_data_from_dict(item, sensitive_data)
|
|
330
|
+
if isinstance(item, dict)
|
|
331
|
+
else item
|
|
332
|
+
for item in value
|
|
333
|
+
]
|
|
334
|
+
else:
|
|
335
|
+
filtered_data[key] = value
|
|
336
|
+
return filtered_data
|
|
337
|
+
|
|
338
|
+
def model_dump(self, sensitive_data: dict[str, str | dict[str, str]] | None = None, **kwargs) -> dict[str, Any]:
|
|
339
|
+
"""Custom serialization handling circular references and filtering sensitive data"""
|
|
340
|
+
|
|
341
|
+
# Handle action serialization
|
|
342
|
+
model_output_dump = None
|
|
343
|
+
if self.model_output:
|
|
344
|
+
action_dump = [action.model_dump(exclude_none=True) for action in self.model_output.action]
|
|
345
|
+
|
|
346
|
+
# Filter sensitive data only from input action parameters if sensitive_data is provided
|
|
347
|
+
if sensitive_data:
|
|
348
|
+
action_dump = [
|
|
349
|
+
self._filter_sensitive_data_from_dict(action, sensitive_data) if 'input' in action else action
|
|
350
|
+
for action in action_dump
|
|
351
|
+
]
|
|
352
|
+
|
|
353
|
+
model_output_dump = {
|
|
354
|
+
'evaluation_previous_goal': self.model_output.evaluation_previous_goal,
|
|
355
|
+
'memory': self.model_output.memory,
|
|
356
|
+
'next_goal': self.model_output.next_goal,
|
|
357
|
+
'action': action_dump, # This preserves the actual action data
|
|
358
|
+
}
|
|
359
|
+
# Only include thinking if it's present
|
|
360
|
+
if self.model_output.thinking is not None:
|
|
361
|
+
model_output_dump['thinking'] = self.model_output.thinking
|
|
362
|
+
|
|
363
|
+
# Handle result serialization - don't filter ActionResult data
|
|
364
|
+
# as it should contain meaningful information for the agent
|
|
365
|
+
result_dump = [r.model_dump(exclude_none=True) for r in self.result]
|
|
366
|
+
|
|
367
|
+
return {
|
|
368
|
+
'model_output': model_output_dump,
|
|
369
|
+
'result': result_dump,
|
|
370
|
+
'state': self.state.to_dict(),
|
|
371
|
+
'metadata': self.metadata.model_dump() if self.metadata else None,
|
|
372
|
+
'state_message': self.state_message,
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
AgentStructuredOutput = TypeVar('AgentStructuredOutput', bound=BaseModel)
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class AgentHistoryList(BaseModel, Generic[AgentStructuredOutput]):
|
|
380
|
+
"""List of AgentHistory messages, i.e. the history of the agent's actions and thoughts."""
|
|
381
|
+
|
|
382
|
+
history: list[AgentHistory]
|
|
383
|
+
usage: UsageSummary | None = None
|
|
384
|
+
|
|
385
|
+
_output_model_schema: type[AgentStructuredOutput] | None = None
|
|
386
|
+
|
|
387
|
+
def total_duration_seconds(self) -> float:
|
|
388
|
+
"""Get total duration of all steps in seconds"""
|
|
389
|
+
total = 0.0
|
|
390
|
+
for h in self.history:
|
|
391
|
+
if h.metadata:
|
|
392
|
+
total += h.metadata.duration_seconds
|
|
393
|
+
return total
|
|
394
|
+
|
|
395
|
+
def __len__(self) -> int:
|
|
396
|
+
"""Return the number of history items"""
|
|
397
|
+
return len(self.history)
|
|
398
|
+
|
|
399
|
+
def __str__(self) -> str:
|
|
400
|
+
"""Representation of the AgentHistoryList object"""
|
|
401
|
+
return f'AgentHistoryList(all_results={self.action_results()}, all_model_outputs={self.model_actions()})'
|
|
402
|
+
|
|
403
|
+
def add_item(self, history_item: AgentHistory) -> None:
|
|
404
|
+
"""Add a history item to the list"""
|
|
405
|
+
self.history.append(history_item)
|
|
406
|
+
|
|
407
|
+
def __repr__(self) -> str:
|
|
408
|
+
"""Representation of the AgentHistoryList object"""
|
|
409
|
+
return self.__str__()
|
|
410
|
+
|
|
411
|
+
def save_to_file(self, filepath: str | Path, sensitive_data: dict[str, str | dict[str, str]] | None = None) -> None:
|
|
412
|
+
"""Save history to JSON file with proper serialization and optional sensitive data filtering"""
|
|
413
|
+
try:
|
|
414
|
+
Path(filepath).parent.mkdir(parents=True, exist_ok=True)
|
|
415
|
+
data = self.model_dump(sensitive_data=sensitive_data)
|
|
416
|
+
with open(filepath, 'w', encoding='utf-8') as f:
|
|
417
|
+
json.dump(data, f, indent=2)
|
|
418
|
+
except Exception as e:
|
|
419
|
+
raise e
|
|
420
|
+
|
|
421
|
+
# def save_as_playwright_script(
|
|
422
|
+
# self,
|
|
423
|
+
# output_path: str | Path,
|
|
424
|
+
# sensitive_data_keys: list[str] | None = None,
|
|
425
|
+
# browser_config: BrowserConfig | None = None,
|
|
426
|
+
# context_config: BrowserContextConfig | None = None,
|
|
427
|
+
# ) -> None:
|
|
428
|
+
# """
|
|
429
|
+
# Generates a Playwright script based on the agent's history and saves it to a file.
|
|
430
|
+
# Args:
|
|
431
|
+
# output_path: The path where the generated Python script will be saved.
|
|
432
|
+
# sensitive_data_keys: A list of keys used as placeholders for sensitive data
|
|
433
|
+
# (e.g., ['username_placeholder', 'password_placeholder']).
|
|
434
|
+
# These will be loaded from environment variables in the
|
|
435
|
+
# generated script.
|
|
436
|
+
# browser_config: Configuration of the original Browser instance.
|
|
437
|
+
# context_config: Configuration of the original BrowserContext instance.
|
|
438
|
+
# """
|
|
439
|
+
# from browser_use.agent.playwright_script_generator import PlaywrightScriptGenerator
|
|
440
|
+
|
|
441
|
+
# try:
|
|
442
|
+
# serialized_history = self.model_dump()['history']
|
|
443
|
+
# generator = PlaywrightScriptGenerator(serialized_history, sensitive_data_keys, browser_config, context_config)
|
|
444
|
+
|
|
445
|
+
# script_content = generator.generate_script_content()
|
|
446
|
+
# path_obj = Path(output_path)
|
|
447
|
+
# path_obj.parent.mkdir(parents=True, exist_ok=True)
|
|
448
|
+
# with open(path_obj, 'w', encoding='utf-8') as f:
|
|
449
|
+
# f.write(script_content)
|
|
450
|
+
# except Exception as e:
|
|
451
|
+
# raise e
|
|
452
|
+
|
|
453
|
+
def model_dump(self, **kwargs) -> dict[str, Any]:
|
|
454
|
+
"""Custom serialization that properly uses AgentHistory's model_dump"""
|
|
455
|
+
return {
|
|
456
|
+
'history': [h.model_dump(**kwargs) for h in self.history],
|
|
457
|
+
}
|
|
458
|
+
|
|
459
|
+
@classmethod
|
|
460
|
+
def load_from_dict(cls, data: dict[str, Any], output_model: type[AgentOutput]) -> AgentHistoryList:
|
|
461
|
+
# loop through history and validate output_model actions to enrich with custom actions
|
|
462
|
+
for h in data['history']:
|
|
463
|
+
if h['model_output']:
|
|
464
|
+
if isinstance(h['model_output'], dict):
|
|
465
|
+
h['model_output'] = output_model.model_validate(h['model_output'])
|
|
466
|
+
else:
|
|
467
|
+
h['model_output'] = None
|
|
468
|
+
if 'interacted_element' not in h['state']:
|
|
469
|
+
h['state']['interacted_element'] = None
|
|
470
|
+
|
|
471
|
+
history = cls.model_validate(data)
|
|
472
|
+
return history
|
|
473
|
+
|
|
474
|
+
@classmethod
|
|
475
|
+
def load_from_file(cls, filepath: str | Path, output_model: type[AgentOutput]) -> AgentHistoryList:
|
|
476
|
+
"""Load history from JSON file"""
|
|
477
|
+
with open(filepath, encoding='utf-8') as f:
|
|
478
|
+
data = json.load(f)
|
|
479
|
+
return cls.load_from_dict(data, output_model)
|
|
480
|
+
|
|
481
|
+
def last_action(self) -> None | dict:
|
|
482
|
+
"""Last action in history"""
|
|
483
|
+
if self.history and self.history[-1].model_output:
|
|
484
|
+
return self.history[-1].model_output.action[-1].model_dump(exclude_none=True)
|
|
485
|
+
return None
|
|
486
|
+
|
|
487
|
+
def errors(self) -> list[str | None]:
|
|
488
|
+
"""Get all errors from history, with None for steps without errors"""
|
|
489
|
+
errors = []
|
|
490
|
+
for h in self.history:
|
|
491
|
+
step_errors = [r.error for r in h.result if r.error]
|
|
492
|
+
|
|
493
|
+
# each step can have only one error
|
|
494
|
+
errors.append(step_errors[0] if step_errors else None)
|
|
495
|
+
return errors
|
|
496
|
+
|
|
497
|
+
def final_result(self) -> None | str:
|
|
498
|
+
"""Final result from history"""
|
|
499
|
+
if self.history and self.history[-1].result[-1].extracted_content:
|
|
500
|
+
return self.history[-1].result[-1].extracted_content
|
|
501
|
+
return None
|
|
502
|
+
|
|
503
|
+
def is_done(self) -> bool:
|
|
504
|
+
"""Check if the agent is done"""
|
|
505
|
+
if self.history and len(self.history[-1].result) > 0:
|
|
506
|
+
last_result = self.history[-1].result[-1]
|
|
507
|
+
return last_result.is_done is True
|
|
508
|
+
return False
|
|
509
|
+
|
|
510
|
+
def is_successful(self) -> bool | None:
|
|
511
|
+
"""Check if the agent completed successfully - the agent decides in the last step if it was successful or not. None if not done yet."""
|
|
512
|
+
if self.history and len(self.history[-1].result) > 0:
|
|
513
|
+
last_result = self.history[-1].result[-1]
|
|
514
|
+
if last_result.is_done is True:
|
|
515
|
+
return last_result.success
|
|
516
|
+
return None
|
|
517
|
+
|
|
518
|
+
def has_errors(self) -> bool:
|
|
519
|
+
"""Check if the agent has any non-None errors"""
|
|
520
|
+
return any(error is not None for error in self.errors())
|
|
521
|
+
|
|
522
|
+
def judgement(self) -> dict | None:
|
|
523
|
+
"""Get the judgement result as a dictionary if it exists"""
|
|
524
|
+
if self.history and len(self.history[-1].result) > 0:
|
|
525
|
+
last_result = self.history[-1].result[-1]
|
|
526
|
+
if last_result.judgement:
|
|
527
|
+
return last_result.judgement.model_dump()
|
|
528
|
+
return None
|
|
529
|
+
|
|
530
|
+
def is_judged(self) -> bool:
|
|
531
|
+
"""Check if the agent trace has been judged"""
|
|
532
|
+
if self.history and len(self.history[-1].result) > 0:
|
|
533
|
+
last_result = self.history[-1].result[-1]
|
|
534
|
+
return last_result.judgement is not None
|
|
535
|
+
return False
|
|
536
|
+
|
|
537
|
+
def is_validated(self) -> bool | None:
|
|
538
|
+
"""Check if the judge validated the agent execution (verdict is True). Returns None if not judged yet."""
|
|
539
|
+
if self.history and len(self.history[-1].result) > 0:
|
|
540
|
+
last_result = self.history[-1].result[-1]
|
|
541
|
+
if last_result.judgement:
|
|
542
|
+
return last_result.judgement.verdict
|
|
543
|
+
return None
|
|
544
|
+
|
|
545
|
+
def urls(self) -> list[str | None]:
|
|
546
|
+
"""Get all unique URLs from history"""
|
|
547
|
+
return [h.state.url if h.state.url is not None else None for h in self.history]
|
|
548
|
+
|
|
549
|
+
def screenshot_paths(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
|
|
550
|
+
"""Get all screenshot paths from history"""
|
|
551
|
+
if n_last == 0:
|
|
552
|
+
return []
|
|
553
|
+
if n_last is None:
|
|
554
|
+
if return_none_if_not_screenshot:
|
|
555
|
+
return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self.history]
|
|
556
|
+
else:
|
|
557
|
+
return [h.state.screenshot_path for h in self.history if h.state.screenshot_path is not None]
|
|
558
|
+
else:
|
|
559
|
+
if return_none_if_not_screenshot:
|
|
560
|
+
return [h.state.screenshot_path if h.state.screenshot_path is not None else None for h in self.history[-n_last:]]
|
|
561
|
+
else:
|
|
562
|
+
return [h.state.screenshot_path for h in self.history[-n_last:] if h.state.screenshot_path is not None]
|
|
563
|
+
|
|
564
|
+
def screenshots(self, n_last: int | None = None, return_none_if_not_screenshot: bool = True) -> list[str | None]:
|
|
565
|
+
"""Get all screenshots from history as base64 strings"""
|
|
566
|
+
if n_last == 0:
|
|
567
|
+
return []
|
|
568
|
+
|
|
569
|
+
history_items = self.history if n_last is None else self.history[-n_last:]
|
|
570
|
+
screenshots = []
|
|
571
|
+
|
|
572
|
+
for item in history_items:
|
|
573
|
+
screenshot_b64 = item.state.get_screenshot()
|
|
574
|
+
if screenshot_b64:
|
|
575
|
+
screenshots.append(screenshot_b64)
|
|
576
|
+
else:
|
|
577
|
+
if return_none_if_not_screenshot:
|
|
578
|
+
screenshots.append(None)
|
|
579
|
+
# If return_none_if_not_screenshot is False, we skip None values
|
|
580
|
+
|
|
581
|
+
return screenshots
|
|
582
|
+
|
|
583
|
+
def action_names(self) -> list[str]:
|
|
584
|
+
"""Get all action names from history"""
|
|
585
|
+
action_names = []
|
|
586
|
+
for action in self.model_actions():
|
|
587
|
+
actions = list(action.keys())
|
|
588
|
+
if actions:
|
|
589
|
+
action_names.append(actions[0])
|
|
590
|
+
return action_names
|
|
591
|
+
|
|
592
|
+
def model_thoughts(self) -> list[AgentBrain]:
|
|
593
|
+
"""Get all thoughts from history"""
|
|
594
|
+
return [h.model_output.current_state for h in self.history if h.model_output]
|
|
595
|
+
|
|
596
|
+
def model_outputs(self) -> list[AgentOutput]:
|
|
597
|
+
"""Get all model outputs from history"""
|
|
598
|
+
return [h.model_output for h in self.history if h.model_output]
|
|
599
|
+
|
|
600
|
+
# get all actions with params
|
|
601
|
+
def model_actions(self) -> list[dict]:
|
|
602
|
+
"""Get all actions from history"""
|
|
603
|
+
outputs = []
|
|
604
|
+
|
|
605
|
+
for h in self.history:
|
|
606
|
+
if h.model_output:
|
|
607
|
+
# Guard against None interacted_element before zipping
|
|
608
|
+
interacted_elements = h.state.interacted_element or [None] * len(h.model_output.action)
|
|
609
|
+
for action, interacted_element in zip(h.model_output.action, interacted_elements):
|
|
610
|
+
output = action.model_dump(exclude_none=True)
|
|
611
|
+
output['interacted_element'] = interacted_element
|
|
612
|
+
outputs.append(output)
|
|
613
|
+
return outputs
|
|
614
|
+
|
|
615
|
+
def action_history(self) -> list[list[dict]]:
|
|
616
|
+
"""Get truncated action history with only essential fields"""
|
|
617
|
+
step_outputs = []
|
|
618
|
+
|
|
619
|
+
for h in self.history:
|
|
620
|
+
step_actions = []
|
|
621
|
+
if h.model_output:
|
|
622
|
+
# Guard against None interacted_element before zipping
|
|
623
|
+
interacted_elements = h.state.interacted_element or [None] * len(h.model_output.action)
|
|
624
|
+
# Zip actions with interacted elements and results
|
|
625
|
+
for action, interacted_element, result in zip(h.model_output.action, interacted_elements, h.result):
|
|
626
|
+
action_output = action.model_dump(exclude_none=True)
|
|
627
|
+
action_output['interacted_element'] = interacted_element
|
|
628
|
+
# Only keep long_term_memory from result
|
|
629
|
+
action_output['result'] = result.long_term_memory if result and result.long_term_memory else None
|
|
630
|
+
step_actions.append(action_output)
|
|
631
|
+
step_outputs.append(step_actions)
|
|
632
|
+
|
|
633
|
+
return step_outputs
|
|
634
|
+
|
|
635
|
+
def action_results(self) -> list[ActionResult]:
|
|
636
|
+
"""Get all results from history"""
|
|
637
|
+
results = []
|
|
638
|
+
for h in self.history:
|
|
639
|
+
results.extend([r for r in h.result if r])
|
|
640
|
+
return results
|
|
641
|
+
|
|
642
|
+
def extracted_content(self) -> list[str]:
|
|
643
|
+
"""Get all extracted content from history"""
|
|
644
|
+
content = []
|
|
645
|
+
for h in self.history:
|
|
646
|
+
content.extend([r.extracted_content for r in h.result if r.extracted_content])
|
|
647
|
+
return content
|
|
648
|
+
|
|
649
|
+
def model_actions_filtered(self, include: list[str] | None = None) -> list[dict]:
|
|
650
|
+
"""Get all model actions from history as JSON"""
|
|
651
|
+
if include is None:
|
|
652
|
+
include = []
|
|
653
|
+
outputs = self.model_actions()
|
|
654
|
+
result = []
|
|
655
|
+
for o in outputs:
|
|
656
|
+
for i in include:
|
|
657
|
+
if i == list(o.keys())[0]:
|
|
658
|
+
result.append(o)
|
|
659
|
+
return result
|
|
660
|
+
|
|
661
|
+
def number_of_steps(self) -> int:
|
|
662
|
+
"""Get the number of steps in the history"""
|
|
663
|
+
return len(self.history)
|
|
664
|
+
|
|
665
|
+
def agent_steps(self) -> list[str]:
|
|
666
|
+
"""Format agent history as readable step descriptions for judge evaluation."""
|
|
667
|
+
steps = []
|
|
668
|
+
|
|
669
|
+
# Iterate through history items (each is an AgentHistory)
|
|
670
|
+
for i, h in enumerate(self.history):
|
|
671
|
+
step_text = f'Step {i + 1}:\n'
|
|
672
|
+
|
|
673
|
+
# Get actions from model_output
|
|
674
|
+
if h.model_output and h.model_output.action:
|
|
675
|
+
# Use existing model_dump to get action dicts
|
|
676
|
+
actions_list = [action.model_dump(exclude_none=True) for action in h.model_output.action]
|
|
677
|
+
action_json = json.dumps(actions_list, indent=1)
|
|
678
|
+
step_text += f'Actions: {action_json}\n'
|
|
679
|
+
|
|
680
|
+
# Get results (already a list[ActionResult] in h.result)
|
|
681
|
+
if h.result:
|
|
682
|
+
for j, result in enumerate(h.result):
|
|
683
|
+
if result.extracted_content:
|
|
684
|
+
content = str(result.extracted_content)
|
|
685
|
+
step_text += f'Result {j + 1}: {content}\n'
|
|
686
|
+
|
|
687
|
+
if result.error:
|
|
688
|
+
error = str(result.error)
|
|
689
|
+
step_text += f'Error {j + 1}: {error}\n'
|
|
690
|
+
|
|
691
|
+
steps.append(step_text)
|
|
692
|
+
|
|
693
|
+
return steps
|
|
694
|
+
|
|
695
|
+
@property
|
|
696
|
+
def structured_output(self) -> AgentStructuredOutput | None:
|
|
697
|
+
"""Get the structured output from the history
|
|
698
|
+
|
|
699
|
+
Returns:
|
|
700
|
+
The structured output if both final_result and _output_model_schema are available,
|
|
701
|
+
otherwise None
|
|
702
|
+
"""
|
|
703
|
+
final_result = self.final_result()
|
|
704
|
+
if final_result is not None and self._output_model_schema is not None:
|
|
705
|
+
return self._output_model_schema.model_validate_json(final_result)
|
|
706
|
+
|
|
707
|
+
return None
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
class AgentError:
|
|
711
|
+
"""Container for agent error handling"""
|
|
712
|
+
|
|
713
|
+
VALIDATION_ERROR = 'Invalid model output format. Please follow the correct schema.'
|
|
714
|
+
RATE_LIMIT_ERROR = 'Rate limit reached. Waiting before retry.'
|
|
715
|
+
NO_VALID_ACTION = 'No valid action found'
|
|
716
|
+
|
|
717
|
+
@staticmethod
|
|
718
|
+
def format_error(error: Exception, include_trace: bool = False) -> str:
|
|
719
|
+
"""Format error message based on error type and optionally include trace"""
|
|
720
|
+
message = ''
|
|
721
|
+
if isinstance(error, ValidationError):
|
|
722
|
+
return f'{AgentError.VALIDATION_ERROR}\nDetails: {str(error)}'
|
|
723
|
+
if isinstance(error, RateLimitError):
|
|
724
|
+
return AgentError.RATE_LIMIT_ERROR
|
|
725
|
+
|
|
726
|
+
# Handle LLM response validation errors from llm_use
|
|
727
|
+
error_str = str(error)
|
|
728
|
+
if 'LLM response missing required fields' in error_str or 'Expected format: AgentOutput' in error_str:
|
|
729
|
+
# Extract the main error message without the huge stacktrace
|
|
730
|
+
lines = error_str.split('\n')
|
|
731
|
+
main_error = lines[0] if lines else error_str
|
|
732
|
+
|
|
733
|
+
# Provide a clearer error message
|
|
734
|
+
helpful_msg = f'{main_error}\n\nThe previous response had an invalid output structure. Please stick to the required output format. \n\n'
|
|
735
|
+
|
|
736
|
+
if include_trace:
|
|
737
|
+
helpful_msg += f'\n\nFull stacktrace:\n{traceback.format_exc()}'
|
|
738
|
+
|
|
739
|
+
return helpful_msg
|
|
740
|
+
|
|
741
|
+
if include_trace:
|
|
742
|
+
return f'{str(error)}\nStacktrace:\n{traceback.format_exc()}'
|
|
743
|
+
return f'{str(error)}'
|