sentienceapi 0.90.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentienceapi might be problematic. Click here for more details.

Files changed (50) hide show
  1. sentience/__init__.py +153 -0
  2. sentience/_extension_loader.py +40 -0
  3. sentience/actions.py +837 -0
  4. sentience/agent.py +1246 -0
  5. sentience/agent_config.py +43 -0
  6. sentience/async_api.py +101 -0
  7. sentience/base_agent.py +194 -0
  8. sentience/browser.py +1037 -0
  9. sentience/cli.py +130 -0
  10. sentience/cloud_tracing.py +382 -0
  11. sentience/conversational_agent.py +509 -0
  12. sentience/expect.py +188 -0
  13. sentience/extension/background.js +233 -0
  14. sentience/extension/content.js +298 -0
  15. sentience/extension/injected_api.js +1473 -0
  16. sentience/extension/manifest.json +36 -0
  17. sentience/extension/pkg/sentience_core.d.ts +51 -0
  18. sentience/extension/pkg/sentience_core.js +529 -0
  19. sentience/extension/pkg/sentience_core_bg.wasm +0 -0
  20. sentience/extension/pkg/sentience_core_bg.wasm.d.ts +10 -0
  21. sentience/extension/release.json +115 -0
  22. sentience/extension/test-content.js +4 -0
  23. sentience/formatting.py +59 -0
  24. sentience/generator.py +202 -0
  25. sentience/inspector.py +365 -0
  26. sentience/llm_provider.py +637 -0
  27. sentience/models.py +412 -0
  28. sentience/overlay.py +222 -0
  29. sentience/query.py +303 -0
  30. sentience/read.py +185 -0
  31. sentience/recorder.py +589 -0
  32. sentience/schemas/trace_v1.json +216 -0
  33. sentience/screenshot.py +100 -0
  34. sentience/snapshot.py +516 -0
  35. sentience/text_search.py +290 -0
  36. sentience/trace_indexing/__init__.py +27 -0
  37. sentience/trace_indexing/index_schema.py +111 -0
  38. sentience/trace_indexing/indexer.py +357 -0
  39. sentience/tracer_factory.py +211 -0
  40. sentience/tracing.py +285 -0
  41. sentience/utils.py +296 -0
  42. sentience/wait.py +137 -0
  43. sentienceapi-0.90.17.dist-info/METADATA +917 -0
  44. sentienceapi-0.90.17.dist-info/RECORD +50 -0
  45. sentienceapi-0.90.17.dist-info/WHEEL +5 -0
  46. sentienceapi-0.90.17.dist-info/entry_points.txt +2 -0
  47. sentienceapi-0.90.17.dist-info/licenses/LICENSE +24 -0
  48. sentienceapi-0.90.17.dist-info/licenses/LICENSE-APACHE +201 -0
  49. sentienceapi-0.90.17.dist-info/licenses/LICENSE-MIT +21 -0
  50. sentienceapi-0.90.17.dist-info/top_level.txt +1 -0
sentience/tracing.py ADDED
@@ -0,0 +1,285 @@
1
+ """
2
+ Trace event writer for Sentience agents.
3
+
4
+ Provides abstract interface and JSONL implementation for emitting trace events.
5
+ """
6
+
7
+ import json
8
+ import time
9
+ from abc import ABC, abstractmethod
10
+ from dataclasses import dataclass, field
11
+ from pathlib import Path
12
+ from typing import Any
13
+
14
+
15
+ @dataclass
16
+ class TraceEvent:
17
+ """
18
+ Trace event data structure.
19
+
20
+ Represents a single event in the agent execution trace.
21
+ """
22
+
23
+ v: int # Schema version
24
+ type: str # Event type
25
+ ts: str # ISO 8601 timestamp
26
+ run_id: str # UUID for the run
27
+ seq: int # Sequence number
28
+ data: dict[str, Any] # Event payload
29
+ step_id: str | None = None # UUID for the step (if step-scoped)
30
+ ts_ms: int | None = None # Unix timestamp in milliseconds
31
+
32
+ def to_dict(self) -> dict[str, Any]:
33
+ """Convert to dictionary for JSON serialization."""
34
+ result = {
35
+ "v": self.v,
36
+ "type": self.type,
37
+ "ts": self.ts,
38
+ "run_id": self.run_id,
39
+ "seq": self.seq,
40
+ "data": self.data,
41
+ }
42
+
43
+ if self.step_id is not None:
44
+ result["step_id"] = self.step_id
45
+
46
+ if self.ts_ms is not None:
47
+ result["ts_ms"] = self.ts_ms
48
+
49
+ return result
50
+
51
+
52
+ class TraceSink(ABC):
53
+ """
54
+ Abstract interface for trace event sink.
55
+
56
+ Implementations can write to files, databases, or remote services.
57
+ """
58
+
59
+ @abstractmethod
60
+ def emit(self, event: dict[str, Any]) -> None:
61
+ """
62
+ Emit a trace event.
63
+
64
+ Args:
65
+ event: Event dictionary (from TraceEvent.to_dict())
66
+ """
67
+ pass
68
+
69
+ @abstractmethod
70
+ def close(self) -> None:
71
+ """Close the sink and flush any buffered data."""
72
+ pass
73
+
74
+
75
+ class JsonlTraceSink(TraceSink):
76
+ """
77
+ JSONL file sink for trace events.
78
+
79
+ Writes one JSON object per line to a file.
80
+ """
81
+
82
+ def __init__(self, path: str | Path):
83
+ """
84
+ Initialize JSONL sink.
85
+
86
+ Args:
87
+ path: File path to write traces to
88
+ """
89
+ self.path = Path(path)
90
+ self.path.parent.mkdir(parents=True, exist_ok=True)
91
+
92
+ # Open file in append mode with line buffering
93
+ self._file = open(self.path, "a", encoding="utf-8", buffering=1)
94
+
95
+ def emit(self, event: dict[str, Any]) -> None:
96
+ """
97
+ Emit event as JSONL line.
98
+
99
+ Args:
100
+ event: Event dictionary
101
+ """
102
+ json_str = json.dumps(event, ensure_ascii=False)
103
+ self._file.write(json_str + "\n")
104
+
105
+ def close(self) -> None:
106
+ """Close the file and generate index."""
107
+ if hasattr(self, "_file") and not self._file.closed:
108
+ self._file.close()
109
+
110
+ # Generate index after closing file
111
+ self._generate_index()
112
+
113
+ def _generate_index(self) -> None:
114
+ """Generate trace index file (automatic on close)."""
115
+ try:
116
+ from .trace_indexing import write_trace_index
117
+
118
+ write_trace_index(str(self.path))
119
+ except Exception as e:
120
+ # Non-fatal: log but don't crash
121
+ print(f"⚠️ Failed to generate trace index: {e}")
122
+
123
+ def __enter__(self):
124
+ """Context manager support."""
125
+ return self
126
+
127
+ def __exit__(self, exc_type, exc_val, exc_tb):
128
+ """Context manager cleanup."""
129
+ self.close()
130
+ return False
131
+
132
+
133
+ @dataclass
134
+ class Tracer:
135
+ """
136
+ Trace event builder and emitter.
137
+
138
+ Manages sequence numbers and provides convenient methods for emitting events.
139
+ """
140
+
141
+ run_id: str
142
+ sink: TraceSink
143
+ seq: int = field(default=0, init=False)
144
+
145
+ def emit(
146
+ self,
147
+ event_type: str,
148
+ data: dict[str, Any],
149
+ step_id: str | None = None,
150
+ ) -> None:
151
+ """
152
+ Emit a trace event.
153
+
154
+ Args:
155
+ event_type: Type of event (e.g., 'run_start', 'step_end')
156
+ data: Event-specific payload
157
+ step_id: Step UUID (if step-scoped event)
158
+ """
159
+ self.seq += 1
160
+
161
+ # Generate timestamps
162
+ ts_ms = int(time.time() * 1000)
163
+ ts = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
164
+
165
+ event = TraceEvent(
166
+ v=1,
167
+ type=event_type,
168
+ ts=ts,
169
+ ts_ms=ts_ms,
170
+ run_id=self.run_id,
171
+ seq=self.seq,
172
+ step_id=step_id,
173
+ data=data,
174
+ )
175
+
176
+ self.sink.emit(event.to_dict())
177
+
178
+ def emit_run_start(
179
+ self,
180
+ agent: str,
181
+ llm_model: str | None = None,
182
+ config: dict[str, Any] | None = None,
183
+ ) -> None:
184
+ """
185
+ Emit run_start event.
186
+
187
+ Args:
188
+ agent: Agent name (e.g., 'SentienceAgent')
189
+ llm_model: LLM model name
190
+ config: Agent configuration
191
+ """
192
+ data: dict[str, Any] = {"agent": agent}
193
+ if llm_model is not None:
194
+ data["llm_model"] = llm_model
195
+ if config is not None:
196
+ data["config"] = config
197
+
198
+ self.emit("run_start", data)
199
+
200
+ def emit_step_start(
201
+ self,
202
+ step_id: str,
203
+ step_index: int,
204
+ goal: str,
205
+ attempt: int = 0,
206
+ pre_url: str | None = None,
207
+ ) -> None:
208
+ """
209
+ Emit step_start event.
210
+
211
+ Args:
212
+ step_id: Step UUID
213
+ step_index: Step number (1-indexed)
214
+ goal: Step goal description
215
+ attempt: Attempt number (0-indexed)
216
+ pre_url: URL before step
217
+ """
218
+ data = {
219
+ "step_id": step_id,
220
+ "step_index": step_index,
221
+ "goal": goal,
222
+ "attempt": attempt,
223
+ }
224
+ if pre_url is not None:
225
+ data["pre_url"] = pre_url
226
+
227
+ self.emit("step_start", data, step_id=step_id)
228
+
229
+ def emit_run_end(self, steps: int) -> None:
230
+ """
231
+ Emit run_end event.
232
+
233
+ Args:
234
+ steps: Total number of steps executed
235
+ """
236
+ self.emit("run_end", {"steps": steps})
237
+
238
+ def emit_error(
239
+ self,
240
+ step_id: str,
241
+ error: str,
242
+ attempt: int = 0,
243
+ ) -> None:
244
+ """
245
+ Emit error event.
246
+
247
+ Args:
248
+ step_id: Step UUID
249
+ error: Error message
250
+ attempt: Attempt number when error occurred
251
+ """
252
+ data = {
253
+ "step_id": step_id,
254
+ "error": error,
255
+ "attempt": attempt,
256
+ }
257
+ self.emit("error", data, step_id=step_id)
258
+
259
+ def close(self, **kwargs) -> None:
260
+ """
261
+ Close the underlying sink.
262
+
263
+ Args:
264
+ **kwargs: Passed through to sink.close() (e.g., blocking=True for CloudTraceSink)
265
+ """
266
+ # Check if sink.close() accepts kwargs (CloudTraceSink does, JsonlTraceSink doesn't)
267
+ import inspect
268
+
269
+ sig = inspect.signature(self.sink.close)
270
+ if any(
271
+ p.kind in (inspect.Parameter.VAR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)
272
+ for p in sig.parameters.values()
273
+ ):
274
+ self.sink.close(**kwargs)
275
+ else:
276
+ self.sink.close()
277
+
278
+ def __enter__(self):
279
+ """Context manager support."""
280
+ return self
281
+
282
+ def __exit__(self, exc_type, exc_val, exc_tb):
283
+ """Context manager cleanup."""
284
+ self.close()
285
+ return False
sentience/utils.py ADDED
@@ -0,0 +1,296 @@
1
+ """
2
+ Digest utilities for snapshot canonicalization and hashing.
3
+
4
+ Provides functions to compute stable digests of snapshots for determinism diff.
5
+ Two digest strategies:
6
+ - strict: includes structure + normalized text
7
+ - loose: structure only (no text) - detects layout changes vs content changes
8
+ """
9
+
10
+ import hashlib
11
+ import json
12
+ import re
13
+ from dataclasses import dataclass
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ from playwright.sync_api import BrowserContext
18
+
19
+
20
+ @dataclass
21
+ class BBox:
22
+ """Bounding box with normalized coordinates."""
23
+
24
+ x: int
25
+ y: int
26
+ width: int
27
+ height: int
28
+
29
+ @classmethod
30
+ def from_dict(cls, bbox_dict: dict[str, Any]) -> "BBox":
31
+ """Create BBox from dictionary."""
32
+ return cls(
33
+ x=int(bbox_dict.get("x", 0)),
34
+ y=int(bbox_dict.get("y", 0)),
35
+ width=int(bbox_dict.get("width", 0)),
36
+ height=int(bbox_dict.get("height", 0)),
37
+ )
38
+
39
+ def to_normalized(self, bucket_size: int = 2) -> list[int]:
40
+ """
41
+ Normalize bbox to fixed-size buckets to ignore minor jitter.
42
+
43
+ Args:
44
+ bucket_size: Pixel bucket size (default 2px)
45
+
46
+ Returns:
47
+ List of [x, y, width, height] rounded to buckets
48
+ """
49
+ return [
50
+ round(self.x / bucket_size) * bucket_size,
51
+ round(self.y / bucket_size) * bucket_size,
52
+ round(self.width / bucket_size) * bucket_size,
53
+ round(self.height / bucket_size) * bucket_size,
54
+ ]
55
+
56
+
57
+ @dataclass
58
+ class ElementFingerprint:
59
+ """Normalized element data for digest computation."""
60
+
61
+ id: int
62
+ role: str
63
+ bbox: list[int] # Normalized
64
+ clickable: int # 0 or 1
65
+ primary: int # 0 or 1
66
+ text: str = "" # Empty for loose digest
67
+
68
+ def to_dict(self) -> dict[str, Any]:
69
+ """Convert to dictionary for JSON serialization."""
70
+ data = {
71
+ "id": self.id,
72
+ "role": self.role,
73
+ "bbox": self.bbox,
74
+ "clickable": self.clickable,
75
+ "primary": self.primary,
76
+ }
77
+ if self.text: # Only include text if non-empty
78
+ data["text"] = self.text
79
+ return data
80
+
81
+
82
+ def normalize_text_strict(text: str | None, max_length: int = 80) -> str:
83
+ """
84
+ Normalize text for strict digest (structure + content).
85
+
86
+ Rules:
87
+ - Lowercase
88
+ - Trim and collapse whitespace
89
+ - Cap length at max_length
90
+ - Replace digit runs with '#'
91
+ - Normalize currency: $79.99 -> $#
92
+ - Normalize time patterns: 12:34 -> #:#
93
+
94
+ Args:
95
+ text: Input text
96
+ max_length: Maximum text length (default 80)
97
+
98
+ Returns:
99
+ Normalized text string
100
+ """
101
+ if not text:
102
+ return ""
103
+
104
+ # Lowercase and trim
105
+ text = text.strip().lower()
106
+
107
+ # Collapse whitespace
108
+ text = " ".join(text.split())
109
+
110
+ # Cap length
111
+ text = text[:max_length]
112
+
113
+ # Replace digit runs with #
114
+ text = re.sub(r"\d+", "#", text)
115
+
116
+ # Normalize currency
117
+ text = re.sub(r"\$\s*#", "$#", text)
118
+
119
+ # Normalize time patterns (HH:MM or similar)
120
+ text = re.sub(r"#:#", "#:#", text)
121
+
122
+ # Normalize date patterns (YYYY-MM-DD or similar)
123
+ text = re.sub(r"#-#-#", "#-#-#", text)
124
+
125
+ return text
126
+
127
+
128
+ def normalize_bbox(bbox: dict[str, Any] | BBox, bucket_size: int = 2) -> list[int]:
129
+ """
130
+ Round bbox to fixed-size buckets to ignore jitter.
131
+
132
+ Args:
133
+ bbox: BBox object or dict with x, y, width, height
134
+ bucket_size: Pixel bucket size (default 2px)
135
+
136
+ Returns:
137
+ List of [x, y, width, height] rounded to buckets
138
+ """
139
+ if isinstance(bbox, BBox):
140
+ return bbox.to_normalized(bucket_size)
141
+
142
+ bbox_obj = BBox.from_dict(bbox)
143
+ return bbox_obj.to_normalized(bucket_size)
144
+
145
+
146
+ def extract_element_fingerprint(
147
+ element: dict[str, Any],
148
+ include_text: bool = True,
149
+ ) -> ElementFingerprint:
150
+ """
151
+ Extract normalized fingerprint from element dict.
152
+
153
+ Args:
154
+ element: Element dict from snapshot
155
+ include_text: Whether to include normalized text (False for loose digest)
156
+
157
+ Returns:
158
+ ElementFingerprint with normalized data
159
+ """
160
+ # Extract basic fields
161
+ element_id = element.get("id", 0)
162
+ role = element.get("role", "unknown")
163
+
164
+ # Extract and normalize bbox
165
+ bbox_data = element.get("bbox", {})
166
+ bbox_normalized = normalize_bbox(bbox_data)
167
+
168
+ # Extract visual cues
169
+ visual_cues = element.get("visual_cues", {})
170
+ clickable = 1 if visual_cues.get("is_clickable", False) else 0
171
+ primary = 1 if visual_cues.get("is_primary", False) else 0
172
+
173
+ # Extract and normalize text (if requested)
174
+ text = ""
175
+ if include_text:
176
+ raw_text = element.get("text", "")
177
+ text = normalize_text_strict(raw_text)
178
+
179
+ return ElementFingerprint(
180
+ id=element_id,
181
+ role=role,
182
+ bbox=bbox_normalized,
183
+ clickable=clickable,
184
+ primary=primary,
185
+ text=text,
186
+ )
187
+
188
+
189
+ def canonical_snapshot_strict(elements: list[dict[str, Any]]) -> str:
190
+ """
191
+ Create strict snapshot digest (structure + normalized text).
192
+
193
+ Args:
194
+ elements: List of element dicts from snapshot
195
+
196
+ Returns:
197
+ Canonical JSON string for hashing
198
+ """
199
+ fingerprints = []
200
+
201
+ for element in sorted(elements, key=lambda e: e.get("id", 0)):
202
+ fingerprint = extract_element_fingerprint(element, include_text=True)
203
+ fingerprints.append(fingerprint.to_dict())
204
+
205
+ return json.dumps(fingerprints, sort_keys=True, ensure_ascii=False)
206
+
207
+
208
+ def canonical_snapshot_loose(elements: list[dict[str, Any]]) -> str:
209
+ """
210
+ Create loose snapshot digest (structure only, no text).
211
+
212
+ This is more resistant to content churn (prices, ads, timestamps).
213
+ Use for detecting structural changes vs content changes.
214
+
215
+ Args:
216
+ elements: List of element dicts from snapshot
217
+
218
+ Returns:
219
+ Canonical JSON string for hashing
220
+ """
221
+ fingerprints = []
222
+
223
+ for element in sorted(elements, key=lambda e: e.get("id", 0)):
224
+ fingerprint = extract_element_fingerprint(element, include_text=False)
225
+ fingerprints.append(fingerprint.to_dict())
226
+
227
+ return json.dumps(fingerprints, sort_keys=True, ensure_ascii=False)
228
+
229
+
230
+ def sha256_digest(canonical_str: str) -> str:
231
+ """
232
+ Compute SHA256 hash with 'sha256:' prefix.
233
+
234
+ Args:
235
+ canonical_str: Canonical string to hash
236
+
237
+ Returns:
238
+ Hash string with format: "sha256:<hex>"
239
+ """
240
+ hash_obj = hashlib.sha256(canonical_str.encode("utf-8"))
241
+ return f"sha256:{hash_obj.hexdigest()}"
242
+
243
+
244
+ def compute_snapshot_digests(elements: list[dict[str, Any]]) -> dict[str, str]:
245
+ """
246
+ Compute both strict and loose digests for a snapshot.
247
+
248
+ Args:
249
+ elements: List of element dicts from snapshot
250
+
251
+ Returns:
252
+ Dict with 'strict' and 'loose' digest strings
253
+ """
254
+ canonical_strict = canonical_snapshot_strict(elements)
255
+ canonical_loose = canonical_snapshot_loose(elements)
256
+
257
+ return {
258
+ "strict": sha256_digest(canonical_strict),
259
+ "loose": sha256_digest(canonical_loose),
260
+ }
261
+
262
+
263
+ def save_storage_state(context: BrowserContext, file_path: str | Path) -> None:
264
+ """
265
+ Save current browser storage state (cookies + localStorage) to a file.
266
+
267
+ This is useful for capturing a logged-in session to reuse later.
268
+
269
+ Args:
270
+ context: Playwright BrowserContext
271
+ file_path: Path to save the storage state JSON file
272
+
273
+ Example:
274
+ ```python
275
+ from sentience import SentienceBrowser, save_storage_state
276
+
277
+ browser = SentienceBrowser()
278
+ browser.start()
279
+
280
+ # User logs in manually or via agent
281
+ browser.goto("https://example.com")
282
+ # ... login happens ...
283
+
284
+ # Save session for later
285
+ save_storage_state(browser.context, "auth.json")
286
+ ```
287
+
288
+ Raises:
289
+ IOError: If file cannot be written
290
+ """
291
+ storage_state = context.storage_state()
292
+ file_path_obj = Path(file_path)
293
+ file_path_obj.parent.mkdir(parents=True, exist_ok=True)
294
+ with open(file_path_obj, "w") as f:
295
+ json.dump(storage_state, f, indent=2)
296
+ print(f"✅ [Sentience] Saved storage state to {file_path_obj}")
sentience/wait.py ADDED
@@ -0,0 +1,137 @@
1
+ """
2
+ Wait functionality - wait_for element matching selector
3
+ """
4
+
5
+ import asyncio
6
+ import time
7
+
8
+ from .browser import AsyncSentienceBrowser, SentienceBrowser
9
+ from .models import SnapshotOptions, WaitResult
10
+ from .query import find
11
+ from .snapshot import snapshot, snapshot_async
12
+
13
+
14
+ def wait_for(
15
+ browser: SentienceBrowser,
16
+ selector: str | dict,
17
+ timeout: float = 10.0,
18
+ interval: float | None = None,
19
+ use_api: bool | None = None,
20
+ ) -> WaitResult:
21
+ """
22
+ Wait for element matching selector to appear
23
+
24
+ Args:
25
+ browser: SentienceBrowser instance
26
+ selector: String DSL or dict query
27
+ timeout: Maximum time to wait (seconds)
28
+ interval: Polling interval (seconds). If None, auto-detects:
29
+ - 0.25s for local extension (use_api=False, fast)
30
+ - 1.5s for remote API (use_api=True or default, network latency)
31
+ use_api: Force use of server-side API if True, local extension if False.
32
+ If None, uses API if api_key is set, otherwise uses local extension.
33
+
34
+ Returns:
35
+ WaitResult
36
+ """
37
+ # Auto-detect optimal interval based on API usage
38
+ if interval is None:
39
+ # Determine if using API
40
+ will_use_api = use_api if use_api is not None else (browser.api_key is not None)
41
+ if will_use_api:
42
+ interval = 1.5 # Longer interval for API calls (network latency)
43
+ else:
44
+ interval = 0.25 # Shorter interval for local extension (fast)
45
+
46
+ start_time = time.time()
47
+
48
+ while time.time() - start_time < timeout:
49
+ # Take snapshot (may be local extension or remote API)
50
+ snap = snapshot(browser, SnapshotOptions(use_api=use_api))
51
+
52
+ # Try to find element
53
+ element = find(snap, selector)
54
+
55
+ if element:
56
+ duration_ms = int((time.time() - start_time) * 1000)
57
+ return WaitResult(
58
+ found=True,
59
+ element=element,
60
+ duration_ms=duration_ms,
61
+ timeout=False,
62
+ )
63
+
64
+ # Wait before next poll
65
+ time.sleep(interval)
66
+
67
+ # Timeout
68
+ duration_ms = int((time.time() - start_time) * 1000)
69
+ return WaitResult(
70
+ found=False,
71
+ element=None,
72
+ duration_ms=duration_ms,
73
+ timeout=True,
74
+ )
75
+
76
+
77
+ async def wait_for_async(
78
+ browser: AsyncSentienceBrowser,
79
+ selector: str | dict,
80
+ timeout: float = 10.0,
81
+ interval: float | None = None,
82
+ use_api: bool | None = None,
83
+ ) -> WaitResult:
84
+ """
85
+ Wait for element matching selector to appear (async)
86
+
87
+ Args:
88
+ browser: AsyncSentienceBrowser instance
89
+ selector: String DSL or dict query
90
+ timeout: Maximum time to wait (seconds)
91
+ interval: Polling interval (seconds). If None, auto-detects:
92
+ - 0.25s for local extension (use_api=False, fast)
93
+ - 1.5s for remote API (use_api=True or default, network latency)
94
+ use_api: Force use of server-side API if True, local extension if False.
95
+ If None, uses API if api_key is set, otherwise uses local extension.
96
+
97
+ Returns:
98
+ WaitResult
99
+ """
100
+ # Auto-detect optimal interval based on API usage
101
+ if interval is None:
102
+ # Determine if using API
103
+ will_use_api = use_api if use_api is not None else (browser.api_key is not None)
104
+ if will_use_api:
105
+ interval = 1.5 # Longer interval for API calls (network latency)
106
+ else:
107
+ interval = 0.25 # Shorter interval for local extension (fast)
108
+
109
+ start_time = time.time()
110
+
111
+ while time.time() - start_time < timeout:
112
+ # Take snapshot (may be local extension or remote API)
113
+ snap = await snapshot_async(browser, SnapshotOptions(use_api=use_api))
114
+
115
+ # Try to find element
116
+ element = find(snap, selector)
117
+
118
+ if element:
119
+ duration_ms = int((time.time() - start_time) * 1000)
120
+ return WaitResult(
121
+ found=True,
122
+ element=element,
123
+ duration_ms=duration_ms,
124
+ timeout=False,
125
+ )
126
+
127
+ # Wait before next poll
128
+ await asyncio.sleep(interval)
129
+
130
+ # Timeout
131
+ duration_ms = int((time.time() - start_time) * 1000)
132
+ return WaitResult(
133
+ found=False,
134
+ element=None,
135
+ duration_ms=duration_ms,
136
+ timeout=True,
137
+ )