sentienceapi 0.95.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentienceapi might be problematic. Click here for more details.
- sentience/__init__.py +253 -0
- sentience/_extension_loader.py +195 -0
- sentience/action_executor.py +215 -0
- sentience/actions.py +1020 -0
- sentience/agent.py +1181 -0
- sentience/agent_config.py +46 -0
- sentience/agent_runtime.py +424 -0
- sentience/asserts/__init__.py +70 -0
- sentience/asserts/expect.py +621 -0
- sentience/asserts/query.py +383 -0
- sentience/async_api.py +108 -0
- sentience/backends/__init__.py +137 -0
- sentience/backends/actions.py +343 -0
- sentience/backends/browser_use_adapter.py +241 -0
- sentience/backends/cdp_backend.py +393 -0
- sentience/backends/exceptions.py +211 -0
- sentience/backends/playwright_backend.py +194 -0
- sentience/backends/protocol.py +216 -0
- sentience/backends/sentience_context.py +469 -0
- sentience/backends/snapshot.py +427 -0
- sentience/base_agent.py +196 -0
- sentience/browser.py +1215 -0
- sentience/browser_evaluator.py +299 -0
- sentience/canonicalization.py +207 -0
- sentience/cli.py +130 -0
- sentience/cloud_tracing.py +807 -0
- sentience/constants.py +6 -0
- sentience/conversational_agent.py +543 -0
- sentience/element_filter.py +136 -0
- sentience/expect.py +188 -0
- sentience/extension/background.js +104 -0
- sentience/extension/content.js +161 -0
- sentience/extension/injected_api.js +914 -0
- sentience/extension/manifest.json +36 -0
- sentience/extension/pkg/sentience_core.d.ts +51 -0
- sentience/extension/pkg/sentience_core.js +323 -0
- sentience/extension/pkg/sentience_core_bg.wasm +0 -0
- sentience/extension/pkg/sentience_core_bg.wasm.d.ts +10 -0
- sentience/extension/release.json +115 -0
- sentience/formatting.py +15 -0
- sentience/generator.py +202 -0
- sentience/inspector.py +367 -0
- sentience/llm_interaction_handler.py +191 -0
- sentience/llm_provider.py +875 -0
- sentience/llm_provider_utils.py +120 -0
- sentience/llm_response_builder.py +153 -0
- sentience/models.py +846 -0
- sentience/ordinal.py +280 -0
- sentience/overlay.py +222 -0
- sentience/protocols.py +228 -0
- sentience/query.py +303 -0
- sentience/read.py +188 -0
- sentience/recorder.py +589 -0
- sentience/schemas/trace_v1.json +335 -0
- sentience/screenshot.py +100 -0
- sentience/sentience_methods.py +86 -0
- sentience/snapshot.py +706 -0
- sentience/snapshot_diff.py +126 -0
- sentience/text_search.py +262 -0
- sentience/trace_event_builder.py +148 -0
- sentience/trace_file_manager.py +197 -0
- sentience/trace_indexing/__init__.py +27 -0
- sentience/trace_indexing/index_schema.py +199 -0
- sentience/trace_indexing/indexer.py +414 -0
- sentience/tracer_factory.py +322 -0
- sentience/tracing.py +449 -0
- sentience/utils/__init__.py +40 -0
- sentience/utils/browser.py +46 -0
- sentience/utils/element.py +257 -0
- sentience/utils/formatting.py +59 -0
- sentience/utils.py +296 -0
- sentience/verification.py +380 -0
- sentience/visual_agent.py +2058 -0
- sentience/wait.py +139 -0
- sentienceapi-0.95.0.dist-info/METADATA +984 -0
- sentienceapi-0.95.0.dist-info/RECORD +82 -0
- sentienceapi-0.95.0.dist-info/WHEEL +5 -0
- sentienceapi-0.95.0.dist-info/entry_points.txt +2 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE +24 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE-APACHE +201 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE-MIT +21 -0
- sentienceapi-0.95.0.dist-info/top_level.txt +1 -0
sentience/constants.py
ADDED
|
@@ -0,0 +1,543 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Conversational Agent: Natural language interface for Sentience SDK
|
|
3
|
+
Enables end users to control web automation using plain English
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import json
|
|
7
|
+
import time
|
|
8
|
+
from typing import Any, Union
|
|
9
|
+
|
|
10
|
+
from .agent import SentienceAgent
|
|
11
|
+
from .browser import SentienceBrowser
|
|
12
|
+
from .llm_provider import LLMProvider
|
|
13
|
+
from .models import ExtractionResult, Snapshot, SnapshotOptions, StepExecutionResult
|
|
14
|
+
from .protocols import BrowserProtocol
|
|
15
|
+
from .snapshot import snapshot
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ConversationalAgent:
|
|
19
|
+
"""
|
|
20
|
+
Natural language agent that translates user intent into SDK actions
|
|
21
|
+
and returns human-readable results.
|
|
22
|
+
|
|
23
|
+
This is Layer 4 - the highest abstraction level for non-technical users.
|
|
24
|
+
|
|
25
|
+
Example:
|
|
26
|
+
>>> agent = ConversationalAgent(browser, llm)
|
|
27
|
+
>>> result = agent.execute("Search for magic mouse on google.com")
|
|
28
|
+
>>> print(result)
|
|
29
|
+
"I searched for 'magic mouse' on Google and found several results.
|
|
30
|
+
The top result is from amazon.com selling the Apple Magic Mouse 2 for $79."
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
browser: SentienceBrowser | BrowserProtocol,
|
|
36
|
+
llm: LLMProvider,
|
|
37
|
+
verbose: bool = True,
|
|
38
|
+
):
|
|
39
|
+
"""
|
|
40
|
+
Initialize conversational agent
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
browser: SentienceBrowser instance or BrowserProtocol-compatible object
|
|
44
|
+
(for testing, can use mock objects that implement BrowserProtocol)
|
|
45
|
+
llm: LLM provider (OpenAI, Anthropic, LocalLLM, etc.)
|
|
46
|
+
verbose: Print step-by-step execution logs (default: True)
|
|
47
|
+
"""
|
|
48
|
+
self.browser = browser
|
|
49
|
+
self.llm = llm
|
|
50
|
+
self.verbose = verbose
|
|
51
|
+
|
|
52
|
+
# Underlying technical agent
|
|
53
|
+
self.technical_agent = SentienceAgent(browser, llm, verbose=False)
|
|
54
|
+
|
|
55
|
+
# Conversation history and context
|
|
56
|
+
self.conversation_history: list[dict[str, Any]] = []
|
|
57
|
+
self.execution_context: dict[str, Any] = {
|
|
58
|
+
"current_url": None,
|
|
59
|
+
"last_action": None,
|
|
60
|
+
"discovered_elements": [],
|
|
61
|
+
"session_data": {},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
def execute(self, user_input: str) -> str:
|
|
65
|
+
"""
|
|
66
|
+
Execute a natural language command and return natural language result
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
user_input: Natural language instruction (e.g., "Search for magic mouse")
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
Human-readable result description
|
|
73
|
+
|
|
74
|
+
Example:
|
|
75
|
+
>>> agent.execute("Go to google.com and search for magic mouse")
|
|
76
|
+
"I navigated to google.com, searched for 'magic mouse', and found 10 results.
|
|
77
|
+
The top result is from amazon.com selling Magic Mouse 2 for $79."
|
|
78
|
+
"""
|
|
79
|
+
if self.verbose:
|
|
80
|
+
print(f"\n{'=' * 70}")
|
|
81
|
+
print(f"👤 User: {user_input}")
|
|
82
|
+
print(f"{'=' * 70}")
|
|
83
|
+
|
|
84
|
+
start_time = time.time()
|
|
85
|
+
|
|
86
|
+
# Step 1: Plan the execution (break down into atomic steps)
|
|
87
|
+
plan = self._create_plan(user_input)
|
|
88
|
+
|
|
89
|
+
if self.verbose:
|
|
90
|
+
print("\n📋 Execution Plan:")
|
|
91
|
+
for i, step in enumerate(plan["steps"], 1):
|
|
92
|
+
print(f" {i}. {step['description']}")
|
|
93
|
+
|
|
94
|
+
# Step 2: Execute each step
|
|
95
|
+
execution_results = []
|
|
96
|
+
for step in plan["steps"]:
|
|
97
|
+
step_result = self._execute_step(step)
|
|
98
|
+
execution_results.append(step_result)
|
|
99
|
+
|
|
100
|
+
if not step_result.success:
|
|
101
|
+
# Early exit on failure
|
|
102
|
+
if self.verbose:
|
|
103
|
+
print(f"⚠️ Step failed: {step['description']}")
|
|
104
|
+
break
|
|
105
|
+
|
|
106
|
+
# Step 3: Synthesize natural language response
|
|
107
|
+
response = self._synthesize_response(user_input, plan, execution_results)
|
|
108
|
+
|
|
109
|
+
duration_ms = int((time.time() - start_time) * 1000)
|
|
110
|
+
|
|
111
|
+
# Step 4: Update conversation history
|
|
112
|
+
self.conversation_history.append(
|
|
113
|
+
{
|
|
114
|
+
"user_input": user_input,
|
|
115
|
+
"plan": plan,
|
|
116
|
+
"results": execution_results,
|
|
117
|
+
"response": response,
|
|
118
|
+
"duration_ms": duration_ms,
|
|
119
|
+
}
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if self.verbose:
|
|
123
|
+
print(f"\n🤖 Agent: {response}")
|
|
124
|
+
print(f"⏱️ Completed in {duration_ms}ms\n")
|
|
125
|
+
|
|
126
|
+
return response
|
|
127
|
+
|
|
128
|
+
def _create_plan(self, user_input: str) -> dict[str, Any]:
|
|
129
|
+
"""
|
|
130
|
+
Use LLM to break down user input into atomic executable steps
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
user_input: Natural language command
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
Plan dictionary with list of atomic steps
|
|
137
|
+
"""
|
|
138
|
+
# Get current page context
|
|
139
|
+
current_url = self.browser.page.url if self.browser.page else "None"
|
|
140
|
+
|
|
141
|
+
system_prompt = """You are a web automation planning assistant.
|
|
142
|
+
|
|
143
|
+
Your job is to analyze a natural language request and break it down into atomic steps
|
|
144
|
+
that can be executed by a web automation agent.
|
|
145
|
+
|
|
146
|
+
AVAILABLE ACTIONS:
|
|
147
|
+
1. NAVIGATE - Go to a URL
|
|
148
|
+
2. FIND_AND_CLICK - Find and click an element by description
|
|
149
|
+
3. FIND_AND_TYPE - Find input field and type text
|
|
150
|
+
4. PRESS_KEY - Press a keyboard key (Enter, Escape, etc.)
|
|
151
|
+
5. WAIT - Wait for page to load or element to appear
|
|
152
|
+
6. EXTRACT_INFO - Extract specific information from the page
|
|
153
|
+
7. VERIFY - Verify a condition is met
|
|
154
|
+
|
|
155
|
+
RESPONSE FORMAT (JSON):
|
|
156
|
+
{
|
|
157
|
+
"intent": "brief summary of user intent",
|
|
158
|
+
"steps": [
|
|
159
|
+
{
|
|
160
|
+
"action": "NAVIGATE" | "FIND_AND_CLICK" | "FIND_AND_TYPE" | "PRESS_KEY" | "WAIT" | "EXTRACT_INFO" | "VERIFY",
|
|
161
|
+
"description": "human-readable description",
|
|
162
|
+
"parameters": {
|
|
163
|
+
"url": "https://...",
|
|
164
|
+
"element_description": "search box",
|
|
165
|
+
"text": "magic mouse",
|
|
166
|
+
"key": "Enter",
|
|
167
|
+
"duration": 2.0,
|
|
168
|
+
"info_type": "product link",
|
|
169
|
+
"condition": "page contains results"
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
],
|
|
173
|
+
"expected_outcome": "what success looks like"
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
IMPORTANT: Return ONLY valid JSON, no markdown, no code blocks."""
|
|
177
|
+
|
|
178
|
+
user_prompt = f"""Current URL: {current_url}
|
|
179
|
+
|
|
180
|
+
User Request: {user_input}
|
|
181
|
+
|
|
182
|
+
Create a step-by-step execution plan."""
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
response = self.llm.generate(
|
|
186
|
+
system_prompt,
|
|
187
|
+
user_prompt,
|
|
188
|
+
json_mode=self.llm.supports_json_mode(),
|
|
189
|
+
temperature=0.0,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
# Parse JSON response
|
|
193
|
+
plan = json.loads(response.content)
|
|
194
|
+
return plan
|
|
195
|
+
|
|
196
|
+
except json.JSONDecodeError as e:
|
|
197
|
+
# Fallback: create simple plan
|
|
198
|
+
if self.verbose:
|
|
199
|
+
print(f"⚠️ JSON parsing failed, using fallback plan: {e}")
|
|
200
|
+
|
|
201
|
+
return {
|
|
202
|
+
"intent": user_input,
|
|
203
|
+
"steps": [
|
|
204
|
+
{
|
|
205
|
+
"action": "FIND_AND_CLICK",
|
|
206
|
+
"description": user_input,
|
|
207
|
+
"parameters": {"element_description": user_input},
|
|
208
|
+
}
|
|
209
|
+
],
|
|
210
|
+
"expected_outcome": "Complete user request",
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
def _execute_step(self, step: dict[str, Any]) -> StepExecutionResult:
|
|
214
|
+
"""
|
|
215
|
+
Execute a single atomic step from the plan
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
step: Step dictionary with action and parameters
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
Execution result with success status and data
|
|
222
|
+
"""
|
|
223
|
+
action = step["action"]
|
|
224
|
+
params = step.get("parameters", {})
|
|
225
|
+
|
|
226
|
+
if self.verbose:
|
|
227
|
+
print(f"\n⚙️ Executing: {step['description']}")
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
if action == "NAVIGATE":
|
|
231
|
+
url = params["url"]
|
|
232
|
+
# Add https:// if missing
|
|
233
|
+
if not url.startswith(("http://", "https://")):
|
|
234
|
+
url = "https://" + url
|
|
235
|
+
|
|
236
|
+
self.browser.page.goto(url, wait_until="domcontentloaded")
|
|
237
|
+
self.execution_context["current_url"] = url
|
|
238
|
+
time.sleep(1) # Brief wait for page to settle
|
|
239
|
+
|
|
240
|
+
return StepExecutionResult(success=True, action=action, data={"url": url})
|
|
241
|
+
|
|
242
|
+
elif action == "FIND_AND_CLICK":
|
|
243
|
+
element_desc = params["element_description"]
|
|
244
|
+
# Use technical agent to find and click (returns AgentActionResult)
|
|
245
|
+
result = self.technical_agent.act(f"Click the {element_desc}")
|
|
246
|
+
return StepExecutionResult(
|
|
247
|
+
success=result.success,
|
|
248
|
+
action=action,
|
|
249
|
+
data=result.model_dump(), # Convert to dict for flexibility
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
elif action == "FIND_AND_TYPE":
|
|
253
|
+
element_desc = params["element_description"]
|
|
254
|
+
text = params["text"]
|
|
255
|
+
# Use technical agent to find input and type (returns AgentActionResult)
|
|
256
|
+
result = self.technical_agent.act(f"Type '{text}' into {element_desc}")
|
|
257
|
+
return StepExecutionResult(
|
|
258
|
+
success=result.success,
|
|
259
|
+
action=action,
|
|
260
|
+
data={"text": text, "result": result.model_dump()},
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
elif action == "PRESS_KEY":
|
|
264
|
+
key = params["key"]
|
|
265
|
+
result = self.technical_agent.act(f"Press {key} key")
|
|
266
|
+
return StepExecutionResult(
|
|
267
|
+
success=result.success,
|
|
268
|
+
action=action,
|
|
269
|
+
data={"key": key, "result": result.model_dump()},
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
elif action == "WAIT":
|
|
273
|
+
duration = params.get("duration", 2.0)
|
|
274
|
+
time.sleep(duration)
|
|
275
|
+
return StepExecutionResult(success=True, action=action, data={"duration": duration})
|
|
276
|
+
|
|
277
|
+
elif action == "EXTRACT_INFO":
|
|
278
|
+
info_type = params["info_type"]
|
|
279
|
+
# Get current page snapshot and extract info
|
|
280
|
+
snap = snapshot(self.browser, SnapshotOptions(limit=50))
|
|
281
|
+
|
|
282
|
+
# Use LLM to extract specific information
|
|
283
|
+
extracted = self._extract_information(snap, info_type)
|
|
284
|
+
|
|
285
|
+
return StepExecutionResult(
|
|
286
|
+
success=True,
|
|
287
|
+
action=action,
|
|
288
|
+
data={
|
|
289
|
+
"extracted": (
|
|
290
|
+
extracted.model_dump()
|
|
291
|
+
if isinstance(extracted, ExtractionResult)
|
|
292
|
+
else extracted
|
|
293
|
+
),
|
|
294
|
+
"info_type": info_type,
|
|
295
|
+
},
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
elif action == "VERIFY":
|
|
299
|
+
condition = params["condition"]
|
|
300
|
+
# Verify condition using current page state
|
|
301
|
+
is_verified = self._verify_condition(condition)
|
|
302
|
+
return StepExecutionResult(
|
|
303
|
+
success=is_verified,
|
|
304
|
+
action=action,
|
|
305
|
+
data={"condition": condition, "verified": is_verified},
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
else:
|
|
309
|
+
raise ValueError(f"Unknown action: {action}")
|
|
310
|
+
|
|
311
|
+
except Exception as e:
|
|
312
|
+
if self.verbose:
|
|
313
|
+
print(f"❌ Step failed: {e}")
|
|
314
|
+
return StepExecutionResult(success=False, action=action, error=str(e))
|
|
315
|
+
|
|
316
|
+
def _extract_information(self, snap: Snapshot, info_type: str) -> ExtractionResult:
|
|
317
|
+
"""
|
|
318
|
+
Extract specific information from snapshot using LLM
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
snap: Snapshot object
|
|
322
|
+
info_type: Type of info to extract (e.g., "product link", "price")
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Extracted information dictionary
|
|
326
|
+
"""
|
|
327
|
+
# Build context from snapshot
|
|
328
|
+
elements_text = "\n".join(
|
|
329
|
+
[
|
|
330
|
+
f"[{el.id}] {el.role}: {el.text} (importance: {el.importance})"
|
|
331
|
+
for el in snap.elements[:30] # Top 30 elements
|
|
332
|
+
]
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
system_prompt = f"""Extract {info_type} from the following page elements.
|
|
336
|
+
|
|
337
|
+
ELEMENTS:
|
|
338
|
+
{elements_text}
|
|
339
|
+
|
|
340
|
+
Return JSON with extracted information:
|
|
341
|
+
{{
|
|
342
|
+
"found": true/false,
|
|
343
|
+
"data": {{
|
|
344
|
+
// extracted information fields
|
|
345
|
+
}},
|
|
346
|
+
"summary": "brief description of what was found"
|
|
347
|
+
}}"""
|
|
348
|
+
|
|
349
|
+
user_prompt = f"Extract {info_type} from the elements above."
|
|
350
|
+
|
|
351
|
+
try:
|
|
352
|
+
response = self.llm.generate(
|
|
353
|
+
system_prompt, user_prompt, json_mode=self.llm.supports_json_mode()
|
|
354
|
+
)
|
|
355
|
+
return json.loads(response.content)
|
|
356
|
+
except:
|
|
357
|
+
return {
|
|
358
|
+
"found": False,
|
|
359
|
+
"data": {},
|
|
360
|
+
"summary": "Failed to extract information",
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
def _verify_condition(self, condition: str) -> bool:
|
|
364
|
+
"""
|
|
365
|
+
Verify a condition is met on current page
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
condition: Natural language condition to verify
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
True if condition is met, False otherwise
|
|
372
|
+
"""
|
|
373
|
+
try:
|
|
374
|
+
snap = snapshot(self.browser, SnapshotOptions(limit=30))
|
|
375
|
+
|
|
376
|
+
# Build context
|
|
377
|
+
elements_text = "\n".join([f"{el.role}: {el.text}" for el in snap.elements[:20]])
|
|
378
|
+
|
|
379
|
+
system_prompt = f"""Verify if the following condition is met based on page elements.
|
|
380
|
+
|
|
381
|
+
CONDITION: {condition}
|
|
382
|
+
|
|
383
|
+
PAGE ELEMENTS:
|
|
384
|
+
{elements_text}
|
|
385
|
+
|
|
386
|
+
Return JSON:
|
|
387
|
+
{{
|
|
388
|
+
"verified": true/false,
|
|
389
|
+
"reasoning": "explanation"
|
|
390
|
+
}}"""
|
|
391
|
+
|
|
392
|
+
response = self.llm.generate(system_prompt, "", json_mode=self.llm.supports_json_mode())
|
|
393
|
+
result = json.loads(response.content)
|
|
394
|
+
return result.get("verified", False)
|
|
395
|
+
except:
|
|
396
|
+
return False
|
|
397
|
+
|
|
398
|
+
def _synthesize_response(
|
|
399
|
+
self,
|
|
400
|
+
user_input: str,
|
|
401
|
+
plan: dict[str, Any],
|
|
402
|
+
execution_results: list[dict[str, Any]],
|
|
403
|
+
) -> str:
|
|
404
|
+
"""
|
|
405
|
+
Synthesize a natural language response from execution results
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
user_input: Original user input
|
|
409
|
+
plan: Execution plan
|
|
410
|
+
execution_results: List of step execution results
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
Human-readable response string
|
|
414
|
+
"""
|
|
415
|
+
# Build summary of what happened
|
|
416
|
+
successful_steps = [
|
|
417
|
+
r
|
|
418
|
+
for r in execution_results
|
|
419
|
+
if (isinstance(r, StepExecutionResult) and r.success)
|
|
420
|
+
or (isinstance(r, dict) and r.get("success", False))
|
|
421
|
+
]
|
|
422
|
+
failed_steps = [
|
|
423
|
+
r
|
|
424
|
+
for r in execution_results
|
|
425
|
+
if (isinstance(r, StepExecutionResult) and not r.success)
|
|
426
|
+
or (isinstance(r, dict) and not r.get("success", False))
|
|
427
|
+
]
|
|
428
|
+
|
|
429
|
+
# Extract key data
|
|
430
|
+
extracted_data = []
|
|
431
|
+
for result in execution_results:
|
|
432
|
+
if isinstance(result, StepExecutionResult):
|
|
433
|
+
action = result.action
|
|
434
|
+
data = result.data
|
|
435
|
+
else:
|
|
436
|
+
action = result.get("action")
|
|
437
|
+
data = result.get("data", {})
|
|
438
|
+
|
|
439
|
+
if action == "EXTRACT_INFO":
|
|
440
|
+
extracted = data.get("extracted", {})
|
|
441
|
+
if isinstance(extracted, dict):
|
|
442
|
+
extracted_data.append(extracted)
|
|
443
|
+
else:
|
|
444
|
+
# If it's an ExtractionResult model, convert to dict
|
|
445
|
+
extracted_data.append(
|
|
446
|
+
extracted.model_dump() if hasattr(extracted, "model_dump") else extracted
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Use LLM to create natural response
|
|
450
|
+
system_prompt = """You are a helpful assistant that summarizes web automation results
|
|
451
|
+
in natural, conversational language.
|
|
452
|
+
|
|
453
|
+
Your job is to take technical execution results and convert them into a friendly,
|
|
454
|
+
human-readable response that answers the user's original request.
|
|
455
|
+
|
|
456
|
+
Be concise but informative. Include key findings or data discovered.
|
|
457
|
+
If the task failed, explain what went wrong in simple terms.
|
|
458
|
+
|
|
459
|
+
IMPORTANT: Return only the natural language response, no JSON, no markdown."""
|
|
460
|
+
|
|
461
|
+
results_summary = {
|
|
462
|
+
"user_request": user_input,
|
|
463
|
+
"plan_intent": plan.get("intent"),
|
|
464
|
+
"total_steps": len(execution_results),
|
|
465
|
+
"successful_steps": len(successful_steps),
|
|
466
|
+
"failed_steps": len(failed_steps),
|
|
467
|
+
"extracted_data": extracted_data,
|
|
468
|
+
"final_url": self.browser.page.url if self.browser.page else None,
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
user_prompt = f"""Summarize these automation results in 1-3 natural sentences:
|
|
472
|
+
|
|
473
|
+
{json.dumps(results_summary, indent=2)}
|
|
474
|
+
|
|
475
|
+
Respond as if you're talking to a user, not listing technical details."""
|
|
476
|
+
|
|
477
|
+
try:
|
|
478
|
+
response = self.llm.generate(system_prompt, user_prompt, temperature=0.3)
|
|
479
|
+
return response.content.strip()
|
|
480
|
+
except:
|
|
481
|
+
# Fallback response
|
|
482
|
+
if failed_steps:
|
|
483
|
+
return f"I attempted to {user_input}, but encountered an error during execution."
|
|
484
|
+
else:
|
|
485
|
+
return f"I completed your request: {user_input}"
|
|
486
|
+
|
|
487
|
+
def chat(self, message: str) -> str:
|
|
488
|
+
"""
|
|
489
|
+
Conversational interface with context awareness
|
|
490
|
+
|
|
491
|
+
Args:
|
|
492
|
+
message: User message (can reference previous context)
|
|
493
|
+
|
|
494
|
+
Returns:
|
|
495
|
+
Agent response
|
|
496
|
+
|
|
497
|
+
Example:
|
|
498
|
+
>>> agent.chat("Go to google.com")
|
|
499
|
+
"I've navigated to google.com"
|
|
500
|
+
>>> agent.chat("Search for magic mouse") # Contextual
|
|
501
|
+
"I searched for 'magic mouse' and found 10 results"
|
|
502
|
+
"""
|
|
503
|
+
return self.execute(message)
|
|
504
|
+
|
|
505
|
+
def get_summary(self) -> str:
|
|
506
|
+
"""
|
|
507
|
+
Get a summary of the entire conversation/session
|
|
508
|
+
|
|
509
|
+
Returns:
|
|
510
|
+
Natural language summary of all actions taken
|
|
511
|
+
"""
|
|
512
|
+
if not self.conversation_history:
|
|
513
|
+
return "No actions have been performed yet."
|
|
514
|
+
|
|
515
|
+
system_prompt = """Summarize this web automation session in a brief, natural paragraph.
|
|
516
|
+
Focus on what was accomplished and key findings."""
|
|
517
|
+
|
|
518
|
+
session_data = {
|
|
519
|
+
"total_interactions": len(self.conversation_history),
|
|
520
|
+
"actions": [
|
|
521
|
+
{"request": h["user_input"], "outcome": h["response"]}
|
|
522
|
+
for h in self.conversation_history
|
|
523
|
+
],
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
user_prompt = f"Summarize this session:\n{json.dumps(session_data, indent=2)}"
|
|
527
|
+
|
|
528
|
+
try:
|
|
529
|
+
summary = self.llm.generate(system_prompt, user_prompt)
|
|
530
|
+
return summary.content.strip()
|
|
531
|
+
except Exception as ex:
|
|
532
|
+
return f"Session with {len(self.conversation_history)} interactions completed with exception: {ex}"
|
|
533
|
+
|
|
534
|
+
def clear_history(self):
|
|
535
|
+
"""Clear conversation history"""
|
|
536
|
+
self.conversation_history.clear()
|
|
537
|
+
self.technical_agent.clear_history()
|
|
538
|
+
self.execution_context = {
|
|
539
|
+
"current_url": None,
|
|
540
|
+
"last_action": None,
|
|
541
|
+
"discovered_elements": [],
|
|
542
|
+
"session_data": {},
|
|
543
|
+
}
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Element filtering utilities for agent-based element selection.
|
|
3
|
+
|
|
4
|
+
This module provides centralized element filtering logic to reduce duplication
|
|
5
|
+
across agent implementations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
from .models import Element, Snapshot
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ElementFilter:
|
|
14
|
+
"""
|
|
15
|
+
Centralized element filtering logic for agent-based element selection.
|
|
16
|
+
|
|
17
|
+
Provides static methods for filtering elements based on:
|
|
18
|
+
- Importance scores
|
|
19
|
+
- Goal-based keyword matching
|
|
20
|
+
- Role and visual properties
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
# Common stopwords for keyword extraction
|
|
24
|
+
STOPWORDS = {
|
|
25
|
+
"the",
|
|
26
|
+
"a",
|
|
27
|
+
"an",
|
|
28
|
+
"and",
|
|
29
|
+
"or",
|
|
30
|
+
"but",
|
|
31
|
+
"in",
|
|
32
|
+
"on",
|
|
33
|
+
"at",
|
|
34
|
+
"to",
|
|
35
|
+
"for",
|
|
36
|
+
"of",
|
|
37
|
+
"with",
|
|
38
|
+
"by",
|
|
39
|
+
"from",
|
|
40
|
+
"as",
|
|
41
|
+
"is",
|
|
42
|
+
"was",
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
@staticmethod
|
|
46
|
+
def filter_by_importance(
|
|
47
|
+
snapshot: Snapshot,
|
|
48
|
+
max_elements: int = 50,
|
|
49
|
+
) -> list[Element]:
|
|
50
|
+
"""
|
|
51
|
+
Filter elements by importance score (simple top-N selection).
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
snapshot: Current page snapshot
|
|
55
|
+
max_elements: Maximum number of elements to return
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Top N elements sorted by importance score
|
|
59
|
+
"""
|
|
60
|
+
# Filter out REMOVED elements - they're not actionable and shouldn't be in LLM context
|
|
61
|
+
elements = [el for el in snapshot.elements if el.diff_status != "REMOVED"]
|
|
62
|
+
# Elements are already sorted by importance in snapshot
|
|
63
|
+
return elements[:max_elements]
|
|
64
|
+
|
|
65
|
+
@staticmethod
|
|
66
|
+
def filter_by_goal(
|
|
67
|
+
snapshot: Snapshot,
|
|
68
|
+
goal: str | None,
|
|
69
|
+
max_elements: int = 100,
|
|
70
|
+
) -> list[Element]:
|
|
71
|
+
"""
|
|
72
|
+
Filter elements from snapshot based on goal context.
|
|
73
|
+
|
|
74
|
+
Applies goal-based keyword matching to boost relevant elements
|
|
75
|
+
and filters out irrelevant ones.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
snapshot: Current page snapshot
|
|
79
|
+
goal: User's goal (can inform filtering)
|
|
80
|
+
max_elements: Maximum number of elements to return
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Filtered list of elements sorted by boosted importance score
|
|
84
|
+
"""
|
|
85
|
+
# Filter out REMOVED elements - they're not actionable and shouldn't be in LLM context
|
|
86
|
+
elements = [el for el in snapshot.elements if el.diff_status != "REMOVED"]
|
|
87
|
+
|
|
88
|
+
# If no goal provided, return all elements (up to limit)
|
|
89
|
+
if not goal:
|
|
90
|
+
return elements[:max_elements]
|
|
91
|
+
|
|
92
|
+
goal_lower = goal.lower()
|
|
93
|
+
|
|
94
|
+
# Extract keywords from goal
|
|
95
|
+
keywords = ElementFilter._extract_keywords(goal_lower)
|
|
96
|
+
|
|
97
|
+
# Boost elements matching goal keywords
|
|
98
|
+
scored_elements = []
|
|
99
|
+
for el in elements:
|
|
100
|
+
score = el.importance
|
|
101
|
+
|
|
102
|
+
# Boost if element text matches goal
|
|
103
|
+
if el.text and any(kw in el.text.lower() for kw in keywords):
|
|
104
|
+
score += 0.3
|
|
105
|
+
|
|
106
|
+
# Boost if role matches goal intent
|
|
107
|
+
if "click" in goal_lower and el.visual_cues.is_clickable:
|
|
108
|
+
score += 0.2
|
|
109
|
+
if "type" in goal_lower and el.role in ["textbox", "searchbox"]:
|
|
110
|
+
score += 0.2
|
|
111
|
+
if "search" in goal_lower:
|
|
112
|
+
# Filter out non-interactive elements for search tasks
|
|
113
|
+
if el.role in ["link", "img"] and not el.visual_cues.is_primary:
|
|
114
|
+
score -= 0.5
|
|
115
|
+
|
|
116
|
+
scored_elements.append((score, el))
|
|
117
|
+
|
|
118
|
+
# Re-sort by boosted score
|
|
119
|
+
scored_elements.sort(key=lambda x: x[0], reverse=True)
|
|
120
|
+
elements = [el for _, el in scored_elements]
|
|
121
|
+
|
|
122
|
+
return elements[:max_elements]
|
|
123
|
+
|
|
124
|
+
@staticmethod
|
|
125
|
+
def _extract_keywords(text: str) -> list[str]:
|
|
126
|
+
"""
|
|
127
|
+
Extract meaningful keywords from goal text.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
text: Text to extract keywords from
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
List of keywords (non-stopwords, length > 2)
|
|
134
|
+
"""
|
|
135
|
+
words = text.split()
|
|
136
|
+
return [w for w in words if w not in ElementFilter.STOPWORDS and len(w) > 2]
|