sentienceapi 0.92.2__py3-none-any.whl → 0.98.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentienceapi might be problematic. Click here for more details.

Files changed (64) hide show
  1. sentience/__init__.py +107 -2
  2. sentience/_extension_loader.py +156 -1
  3. sentience/action_executor.py +2 -0
  4. sentience/actions.py +354 -9
  5. sentience/agent.py +4 -0
  6. sentience/agent_runtime.py +840 -0
  7. sentience/asserts/__init__.py +70 -0
  8. sentience/asserts/expect.py +621 -0
  9. sentience/asserts/query.py +383 -0
  10. sentience/async_api.py +8 -1
  11. sentience/backends/__init__.py +137 -0
  12. sentience/backends/actions.py +372 -0
  13. sentience/backends/browser_use_adapter.py +241 -0
  14. sentience/backends/cdp_backend.py +393 -0
  15. sentience/backends/exceptions.py +211 -0
  16. sentience/backends/playwright_backend.py +194 -0
  17. sentience/backends/protocol.py +216 -0
  18. sentience/backends/sentience_context.py +469 -0
  19. sentience/backends/snapshot.py +483 -0
  20. sentience/browser.py +230 -74
  21. sentience/canonicalization.py +207 -0
  22. sentience/cloud_tracing.py +65 -24
  23. sentience/constants.py +6 -0
  24. sentience/cursor_policy.py +142 -0
  25. sentience/extension/content.js +35 -0
  26. sentience/extension/injected_api.js +310 -15
  27. sentience/extension/manifest.json +1 -1
  28. sentience/extension/pkg/sentience_core.d.ts +22 -22
  29. sentience/extension/pkg/sentience_core.js +192 -144
  30. sentience/extension/pkg/sentience_core_bg.wasm +0 -0
  31. sentience/extension/release.json +29 -29
  32. sentience/failure_artifacts.py +241 -0
  33. sentience/integrations/__init__.py +6 -0
  34. sentience/integrations/langchain/__init__.py +12 -0
  35. sentience/integrations/langchain/context.py +18 -0
  36. sentience/integrations/langchain/core.py +326 -0
  37. sentience/integrations/langchain/tools.py +180 -0
  38. sentience/integrations/models.py +46 -0
  39. sentience/integrations/pydanticai/__init__.py +15 -0
  40. sentience/integrations/pydanticai/deps.py +20 -0
  41. sentience/integrations/pydanticai/toolset.py +468 -0
  42. sentience/llm_provider.py +695 -18
  43. sentience/models.py +536 -3
  44. sentience/ordinal.py +280 -0
  45. sentience/query.py +66 -4
  46. sentience/schemas/trace_v1.json +27 -1
  47. sentience/snapshot.py +384 -93
  48. sentience/snapshot_diff.py +39 -54
  49. sentience/text_search.py +1 -0
  50. sentience/trace_event_builder.py +20 -1
  51. sentience/trace_indexing/indexer.py +3 -49
  52. sentience/tracer_factory.py +1 -3
  53. sentience/verification.py +618 -0
  54. sentience/visual_agent.py +3 -1
  55. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/METADATA +198 -40
  56. sentienceapi-0.98.0.dist-info/RECORD +92 -0
  57. sentience/utils.py +0 -296
  58. sentienceapi-0.92.2.dist-info/RECORD +0 -65
  59. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/WHEEL +0 -0
  60. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/entry_points.txt +0 -0
  61. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/licenses/LICENSE +0 -0
  62. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/licenses/LICENSE-APACHE +0 -0
  63. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/licenses/LICENSE-MIT +0 -0
  64. {sentienceapi-0.92.2.dist-info → sentienceapi-0.98.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,241 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import shutil
5
+ import tempfile
6
+ import time
7
+ from collections.abc import Callable
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Any, Literal
11
+
12
+
13
+ @dataclass
14
+ class FailureArtifactsOptions:
15
+ buffer_seconds: float = 15.0
16
+ capture_on_action: bool = True
17
+ fps: float = 0.0
18
+ persist_mode: Literal["onFail", "always"] = "onFail"
19
+ output_dir: str = ".sentience/artifacts"
20
+ on_before_persist: Callable[[RedactionContext], RedactionResult] | None = None
21
+ redact_snapshot_values: bool = True
22
+
23
+
24
+ @dataclass
25
+ class RedactionContext:
26
+ run_id: str
27
+ reason: str | None
28
+ status: Literal["failure", "success"]
29
+ snapshot: Any | None
30
+ diagnostics: Any | None
31
+ frame_paths: list[str]
32
+ metadata: dict[str, Any]
33
+
34
+
35
+ @dataclass
36
+ class RedactionResult:
37
+ snapshot: Any | None = None
38
+ diagnostics: Any | None = None
39
+ frame_paths: list[str] | None = None
40
+ drop_frames: bool = False
41
+
42
+
43
+ @dataclass
44
+ class _FrameRecord:
45
+ ts: float
46
+ file_name: str
47
+ path: Path
48
+
49
+
50
+ class FailureArtifactBuffer:
51
+ """
52
+ Ring buffer of screenshots with minimal persistence on failure.
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ *,
58
+ run_id: str,
59
+ options: FailureArtifactsOptions,
60
+ time_fn: Callable[[], float] = time.time,
61
+ ) -> None:
62
+ self.run_id = run_id
63
+ self.options = options
64
+ self._time_fn = time_fn
65
+ self._temp_dir = Path(tempfile.mkdtemp(prefix="sentience-artifacts-"))
66
+ self._frames_dir = self._temp_dir / "frames"
67
+ self._frames_dir.mkdir(parents=True, exist_ok=True)
68
+ self._frames: list[_FrameRecord] = []
69
+ self._steps: list[dict] = []
70
+ self._persisted = False
71
+
72
+ @property
73
+ def temp_dir(self) -> Path:
74
+ return self._temp_dir
75
+
76
+ def record_step(
77
+ self,
78
+ *,
79
+ action: str,
80
+ step_id: str | None,
81
+ step_index: int | None,
82
+ url: str | None,
83
+ ) -> None:
84
+ self._steps.append(
85
+ {
86
+ "ts": self._time_fn(),
87
+ "action": action,
88
+ "step_id": step_id,
89
+ "step_index": step_index,
90
+ "url": url,
91
+ }
92
+ )
93
+
94
+ def add_frame(self, image_bytes: bytes, *, fmt: str = "png") -> None:
95
+ ts = self._time_fn()
96
+ file_name = f"frame_{int(ts * 1000)}.{fmt}"
97
+ path = self._frames_dir / file_name
98
+ path.write_bytes(image_bytes)
99
+ self._frames.append(_FrameRecord(ts=ts, file_name=file_name, path=path))
100
+ self._prune()
101
+
102
+ def frame_count(self) -> int:
103
+ return len(self._frames)
104
+
105
+ def _prune(self) -> None:
106
+ cutoff = self._time_fn() - max(0.0, self.options.buffer_seconds)
107
+ keep: list[_FrameRecord] = []
108
+ for frame in self._frames:
109
+ if frame.ts >= cutoff:
110
+ keep.append(frame)
111
+ else:
112
+ try:
113
+ frame.path.unlink(missing_ok=True)
114
+ except Exception:
115
+ pass
116
+ self._frames = keep
117
+
118
+ def _write_json_atomic(self, path: Path, data: Any) -> None:
119
+ tmp_path = path.with_suffix(path.suffix + ".tmp")
120
+ tmp_path.write_text(json.dumps(data, indent=2))
121
+ tmp_path.replace(path)
122
+
123
+ def _redact_snapshot_defaults(self, payload: Any) -> Any:
124
+ if not isinstance(payload, dict):
125
+ return payload
126
+ elements = payload.get("elements")
127
+ if not isinstance(elements, list):
128
+ return payload
129
+ redacted = []
130
+ for el in elements:
131
+ if not isinstance(el, dict):
132
+ redacted.append(el)
133
+ continue
134
+ input_type = (el.get("input_type") or "").lower()
135
+ if input_type in {"password", "email", "tel"} and "value" in el:
136
+ el = dict(el)
137
+ el["value"] = None
138
+ el["value_redacted"] = True
139
+ redacted.append(el)
140
+ payload = dict(payload)
141
+ payload["elements"] = redacted
142
+ return payload
143
+
144
+ def persist(
145
+ self,
146
+ *,
147
+ reason: str | None,
148
+ status: Literal["failure", "success"],
149
+ snapshot: Any | None = None,
150
+ diagnostics: Any | None = None,
151
+ metadata: dict[str, Any] | None = None,
152
+ ) -> Path | None:
153
+ if self._persisted:
154
+ return None
155
+
156
+ output_dir = Path(self.options.output_dir)
157
+ output_dir.mkdir(parents=True, exist_ok=True)
158
+ ts = int(self._time_fn() * 1000)
159
+ run_dir = output_dir / f"{self.run_id}-{ts}"
160
+ frames_out = run_dir / "frames"
161
+ frames_out.mkdir(parents=True, exist_ok=True)
162
+
163
+ snapshot_payload = None
164
+ if snapshot is not None:
165
+ if hasattr(snapshot, "model_dump"):
166
+ snapshot_payload = snapshot.model_dump()
167
+ else:
168
+ snapshot_payload = snapshot
169
+ if self.options.redact_snapshot_values:
170
+ snapshot_payload = self._redact_snapshot_defaults(snapshot_payload)
171
+
172
+ diagnostics_payload = None
173
+ if diagnostics is not None:
174
+ if hasattr(diagnostics, "model_dump"):
175
+ diagnostics_payload = diagnostics.model_dump()
176
+ else:
177
+ diagnostics_payload = diagnostics
178
+
179
+ frame_paths = [str(frame.path) for frame in self._frames]
180
+ drop_frames = False
181
+
182
+ if self.options.on_before_persist is not None:
183
+ try:
184
+ result = self.options.on_before_persist(
185
+ RedactionContext(
186
+ run_id=self.run_id,
187
+ reason=reason,
188
+ status=status,
189
+ snapshot=snapshot_payload,
190
+ diagnostics=diagnostics_payload,
191
+ frame_paths=frame_paths,
192
+ metadata=metadata or {},
193
+ )
194
+ )
195
+ if result.snapshot is not None:
196
+ snapshot_payload = result.snapshot
197
+ if result.diagnostics is not None:
198
+ diagnostics_payload = result.diagnostics
199
+ if result.frame_paths is not None:
200
+ frame_paths = result.frame_paths
201
+ drop_frames = result.drop_frames
202
+ except Exception:
203
+ drop_frames = True
204
+
205
+ if not drop_frames:
206
+ for frame_path in frame_paths:
207
+ src = Path(frame_path)
208
+ if not src.exists():
209
+ continue
210
+ shutil.copy2(src, frames_out / src.name)
211
+
212
+ self._write_json_atomic(run_dir / "steps.json", self._steps)
213
+ if snapshot_payload is not None:
214
+ self._write_json_atomic(run_dir / "snapshot.json", snapshot_payload)
215
+ if diagnostics_payload is not None:
216
+ self._write_json_atomic(run_dir / "diagnostics.json", diagnostics_payload)
217
+
218
+ manifest = {
219
+ "run_id": self.run_id,
220
+ "created_at_ms": ts,
221
+ "status": status,
222
+ "reason": reason,
223
+ "buffer_seconds": self.options.buffer_seconds,
224
+ "frame_count": 0 if drop_frames else len(frame_paths),
225
+ "frames": (
226
+ [] if drop_frames else [{"file": Path(p).name, "ts": None} for p in frame_paths]
227
+ ),
228
+ "snapshot": "snapshot.json" if snapshot_payload is not None else None,
229
+ "diagnostics": "diagnostics.json" if diagnostics_payload is not None else None,
230
+ "metadata": metadata or {},
231
+ "frames_redacted": not drop_frames and self.options.on_before_persist is not None,
232
+ "frames_dropped": drop_frames,
233
+ }
234
+ self._write_json_atomic(run_dir / "manifest.json", manifest)
235
+
236
+ self._persisted = True
237
+ return run_dir
238
+
239
+ def cleanup(self) -> None:
240
+ if self._temp_dir.exists():
241
+ shutil.rmtree(self._temp_dir, ignore_errors=True)
@@ -0,0 +1,6 @@
1
+ """
2
+ Integrations package (internal).
3
+
4
+ This package is intended for framework integrations (e.g., PydanticAI, LangChain/LangGraph).
5
+ Public APIs should be introduced deliberately once the integration surface is stable.
6
+ """
@@ -0,0 +1,12 @@
1
+ """
2
+ LangChain / LangGraph integration helpers (optional).
3
+
4
+ This package is designed so the base SDK can be imported without LangChain installed.
5
+ All LangChain imports are done lazily inside tool-builder functions.
6
+ """
7
+
8
+ from .context import SentienceLangChainContext
9
+ from .core import SentienceLangChainCore
10
+ from .tools import build_sentience_langchain_tools
11
+
12
+ __all__ = ["SentienceLangChainContext", "SentienceLangChainCore", "build_sentience_langchain_tools"]
@@ -0,0 +1,18 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+
5
+ from sentience.browser import AsyncSentienceBrowser
6
+ from sentience.tracing import Tracer
7
+
8
+
9
+ @dataclass
10
+ class SentienceLangChainContext:
11
+ """
12
+ Context for LangChain/LangGraph integrations.
13
+
14
+ We keep this small and explicit; it mirrors the PydanticAI deps object.
15
+ """
16
+
17
+ browser: AsyncSentienceBrowser
18
+ tracer: Tracer | None = None
@@ -0,0 +1,326 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import re
5
+ import time
6
+ from typing import Any, Literal
7
+
8
+ from sentience.actions import (
9
+ click_async,
10
+ click_rect_async,
11
+ press_async,
12
+ scroll_to_async,
13
+ type_text_async,
14
+ )
15
+ from sentience.integrations.models import AssertionResult, BrowserState, ElementSummary
16
+ from sentience.models import ReadResult, SnapshotOptions, TextRectSearchResult
17
+ from sentience.read import read_async
18
+ from sentience.snapshot import snapshot_async
19
+ from sentience.text_search import find_text_rect_async
20
+ from sentience.trace_event_builder import TraceEventBuilder
21
+
22
+ from .context import SentienceLangChainContext
23
+
24
+
25
+ class SentienceLangChainCore:
26
+ """
27
+ Framework-agnostic (LangChain-friendly) async wrappers around Sentience SDK.
28
+
29
+ - No LangChain imports
30
+ - Optional Sentience tracing (local/cloud) if ctx.tracer is provided
31
+ """
32
+
33
+ def __init__(self, ctx: SentienceLangChainContext):
34
+ self.ctx = ctx
35
+ self._step_counter = 0
36
+
37
+ def _safe_tracer_call(self, method_name: str, *args, **kwargs) -> None:
38
+ tracer = self.ctx.tracer
39
+ if not tracer:
40
+ return
41
+ try:
42
+ getattr(tracer, method_name)(*args, **kwargs)
43
+ except Exception:
44
+ # Tracing must be non-fatal
45
+ pass
46
+
47
+ async def _trace(self, tool_name: str, exec_coro, exec_meta: dict[str, Any]):
48
+ tracer = self.ctx.tracer
49
+ browser = self.ctx.browser
50
+
51
+ pre_url = getattr(getattr(browser, "page", None), "url", None)
52
+
53
+ # Emit run_start once (best-effort)
54
+ if tracer and getattr(tracer, "started_at", None) is None:
55
+ self._safe_tracer_call(
56
+ "emit_run_start",
57
+ agent="LangChain+SentienceTools",
58
+ llm_model=None,
59
+ config={"integration": "langchain"},
60
+ )
61
+
62
+ step_id = None
63
+ step_index = None
64
+ start = time.time()
65
+ if tracer:
66
+ self._step_counter += 1
67
+ step_index = self._step_counter
68
+ step_id = f"tool-{step_index}:{tool_name}"
69
+ self._safe_tracer_call(
70
+ "emit_step_start",
71
+ step_id=step_id,
72
+ step_index=step_index,
73
+ goal=f"tool:{tool_name}",
74
+ attempt=0,
75
+ pre_url=pre_url,
76
+ )
77
+
78
+ try:
79
+ result = await exec_coro()
80
+
81
+ if tracer and step_id and step_index:
82
+ post_url = getattr(getattr(browser, "page", None), "url", pre_url)
83
+ duration_ms = int((time.time() - start) * 1000)
84
+
85
+ success: bool | None = None
86
+ if hasattr(result, "success"):
87
+ success = bool(getattr(result, "success"))
88
+ elif hasattr(result, "status"):
89
+ success = getattr(result, "status") == "success"
90
+ elif isinstance(result, dict):
91
+ if "success" in result:
92
+ try:
93
+ success = bool(result.get("success"))
94
+ except Exception:
95
+ success = None
96
+ elif "status" in result:
97
+ success = result.get("status") == "success"
98
+
99
+ exec_data = {"tool": tool_name, "duration_ms": duration_ms, **exec_meta}
100
+ if success is not None:
101
+ exec_data["success"] = success
102
+
103
+ verify_data = {
104
+ "passed": bool(success) if success is not None else True,
105
+ "signals": {},
106
+ }
107
+
108
+ step_end_data = TraceEventBuilder.build_step_end_event(
109
+ step_id=step_id,
110
+ step_index=step_index,
111
+ goal=f"tool:{tool_name}",
112
+ attempt=0,
113
+ pre_url=pre_url or "",
114
+ post_url=post_url or "",
115
+ snapshot_digest=None,
116
+ llm_data={},
117
+ exec_data=exec_data,
118
+ verify_data=verify_data,
119
+ )
120
+ self._safe_tracer_call("emit", "step_end", step_end_data, step_id=step_id)
121
+
122
+ return result
123
+ except Exception as e:
124
+ if tracer and step_id:
125
+ self._safe_tracer_call("emit_error", step_id=step_id, error=str(e), attempt=0)
126
+ raise
127
+
128
+ # ===== Observe =====
129
+ async def snapshot_state(
130
+ self, limit: int = 50, include_screenshot: bool = False
131
+ ) -> BrowserState:
132
+ async def _run():
133
+ opts = SnapshotOptions(limit=limit, screenshot=include_screenshot)
134
+ snap = await snapshot_async(self.ctx.browser, opts)
135
+ if getattr(snap, "status", "success") != "success":
136
+ raise RuntimeError(getattr(snap, "error", None) or "snapshot failed")
137
+ elements = [
138
+ ElementSummary(
139
+ id=e.id,
140
+ role=e.role,
141
+ text=e.text,
142
+ importance=e.importance,
143
+ bbox=e.bbox,
144
+ )
145
+ for e in snap.elements
146
+ ]
147
+ return BrowserState(url=snap.url, elements=elements)
148
+
149
+ return await self._trace(
150
+ "snapshot_state",
151
+ _run,
152
+ {"limit": limit, "include_screenshot": include_screenshot},
153
+ )
154
+
155
+ async def read_page(
156
+ self,
157
+ format: Literal["raw", "text", "markdown"] = "text",
158
+ enhance_markdown: bool = True,
159
+ ) -> ReadResult:
160
+ async def _run():
161
+ return await read_async(
162
+ self.ctx.browser, output_format=format, enhance_markdown=enhance_markdown
163
+ )
164
+
165
+ return await self._trace(
166
+ "read_page",
167
+ _run,
168
+ {"format": format, "enhance_markdown": enhance_markdown},
169
+ )
170
+
171
+ # ===== Act =====
172
+ async def click(self, element_id: int):
173
+ return await self._trace(
174
+ "click",
175
+ lambda: click_async(self.ctx.browser, element_id),
176
+ {"element_id": element_id},
177
+ )
178
+
179
+ async def type_text(self, element_id: int, text: str):
180
+ # avoid tracing text (PII)
181
+ return await self._trace(
182
+ "type_text",
183
+ lambda: type_text_async(self.ctx.browser, element_id, text),
184
+ {"element_id": element_id},
185
+ )
186
+
187
+ async def press_key(self, key: str):
188
+ return await self._trace(
189
+ "press_key", lambda: press_async(self.ctx.browser, key), {"key": key}
190
+ )
191
+
192
+ async def scroll_to(
193
+ self,
194
+ element_id: int,
195
+ behavior: Literal["smooth", "instant", "auto"] = "smooth",
196
+ block: Literal["start", "center", "end", "nearest"] = "center",
197
+ ):
198
+ return await self._trace(
199
+ "scroll_to",
200
+ lambda: scroll_to_async(self.ctx.browser, element_id, behavior=behavior, block=block),
201
+ {"element_id": element_id, "behavior": behavior, "block": block},
202
+ )
203
+
204
+ async def navigate(self, url: str) -> dict[str, Any]:
205
+ async def _run():
206
+ await self.ctx.browser.goto(url)
207
+ post_url = getattr(getattr(self.ctx.browser, "page", None), "url", None)
208
+ return {"success": True, "url": post_url or url}
209
+
210
+ return await self._trace("navigate", _run, {"url": url})
211
+
212
+ async def click_rect(
213
+ self,
214
+ *,
215
+ x: float,
216
+ y: float,
217
+ width: float,
218
+ height: float,
219
+ button: Literal["left", "right", "middle"] = "left",
220
+ click_count: int = 1,
221
+ ):
222
+ async def _run():
223
+ return await click_rect_async(
224
+ self.ctx.browser,
225
+ {"x": x, "y": y, "w": width, "h": height},
226
+ button=button,
227
+ click_count=click_count,
228
+ )
229
+
230
+ return await self._trace(
231
+ "click_rect",
232
+ _run,
233
+ {
234
+ "x": x,
235
+ "y": y,
236
+ "width": width,
237
+ "height": height,
238
+ "button": button,
239
+ "click_count": click_count,
240
+ },
241
+ )
242
+
243
+ async def find_text_rect(
244
+ self,
245
+ text: str,
246
+ case_sensitive: bool = False,
247
+ whole_word: bool = False,
248
+ max_results: int = 10,
249
+ ) -> TextRectSearchResult:
250
+ async def _run():
251
+ return await find_text_rect_async(
252
+ self.ctx.browser,
253
+ text,
254
+ case_sensitive=case_sensitive,
255
+ whole_word=whole_word,
256
+ max_results=max_results,
257
+ )
258
+
259
+ return await self._trace(
260
+ "find_text_rect",
261
+ _run,
262
+ {
263
+ "query": text,
264
+ "case_sensitive": case_sensitive,
265
+ "whole_word": whole_word,
266
+ "max_results": max_results,
267
+ },
268
+ )
269
+
270
+ # ===== Verify / guard =====
271
+ async def verify_url_matches(self, pattern: str, flags: int = 0) -> AssertionResult:
272
+ async def _run():
273
+ page = getattr(self.ctx.browser, "page", None)
274
+ if not page:
275
+ return AssertionResult(passed=False, reason="Browser not started (page is None)")
276
+ url = page.url
277
+ ok = re.search(pattern, url, flags) is not None
278
+ return AssertionResult(
279
+ passed=ok,
280
+ reason="" if ok else f"URL did not match pattern. url={url!r} pattern={pattern!r}",
281
+ details={"url": url, "pattern": pattern},
282
+ )
283
+
284
+ return await self._trace("verify_url_matches", _run, {"pattern": pattern})
285
+
286
+ async def verify_text_present(
287
+ self,
288
+ text: str,
289
+ *,
290
+ format: Literal["text", "markdown", "raw"] = "text",
291
+ case_sensitive: bool = False,
292
+ ) -> AssertionResult:
293
+ async def _run():
294
+ result = await read_async(self.ctx.browser, output_format=format, enhance_markdown=True)
295
+ if result.status != "success":
296
+ return AssertionResult(
297
+ passed=False, reason=f"read failed: {result.error}", details={}
298
+ )
299
+
300
+ haystack = result.content if case_sensitive else result.content.lower()
301
+ needle = text if case_sensitive else text.lower()
302
+ ok = needle in haystack
303
+ return AssertionResult(
304
+ passed=ok,
305
+ reason="" if ok else f"Text not present: {text!r}",
306
+ details={"format": format, "query": text, "length": result.length},
307
+ )
308
+
309
+ return await self._trace("verify_text_present", _run, {"query": text, "format": format})
310
+
311
+ async def assert_eventually_url_matches(
312
+ self,
313
+ pattern: str,
314
+ *,
315
+ timeout_s: float = 10.0,
316
+ poll_s: float = 0.25,
317
+ flags: int = 0,
318
+ ) -> AssertionResult:
319
+ deadline = time.monotonic() + timeout_s
320
+ last: AssertionResult | None = None
321
+ while time.monotonic() <= deadline:
322
+ last = await self.verify_url_matches(pattern, flags)
323
+ if last.passed:
324
+ return last
325
+ await asyncio.sleep(poll_s)
326
+ return last or AssertionResult(passed=False, reason="No attempts executed", details={})