sentienceapi 0.95.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sentienceapi might be problematic. Click here for more details.

Files changed (82) hide show
  1. sentience/__init__.py +253 -0
  2. sentience/_extension_loader.py +195 -0
  3. sentience/action_executor.py +215 -0
  4. sentience/actions.py +1020 -0
  5. sentience/agent.py +1181 -0
  6. sentience/agent_config.py +46 -0
  7. sentience/agent_runtime.py +424 -0
  8. sentience/asserts/__init__.py +70 -0
  9. sentience/asserts/expect.py +621 -0
  10. sentience/asserts/query.py +383 -0
  11. sentience/async_api.py +108 -0
  12. sentience/backends/__init__.py +137 -0
  13. sentience/backends/actions.py +343 -0
  14. sentience/backends/browser_use_adapter.py +241 -0
  15. sentience/backends/cdp_backend.py +393 -0
  16. sentience/backends/exceptions.py +211 -0
  17. sentience/backends/playwright_backend.py +194 -0
  18. sentience/backends/protocol.py +216 -0
  19. sentience/backends/sentience_context.py +469 -0
  20. sentience/backends/snapshot.py +427 -0
  21. sentience/base_agent.py +196 -0
  22. sentience/browser.py +1215 -0
  23. sentience/browser_evaluator.py +299 -0
  24. sentience/canonicalization.py +207 -0
  25. sentience/cli.py +130 -0
  26. sentience/cloud_tracing.py +807 -0
  27. sentience/constants.py +6 -0
  28. sentience/conversational_agent.py +543 -0
  29. sentience/element_filter.py +136 -0
  30. sentience/expect.py +188 -0
  31. sentience/extension/background.js +104 -0
  32. sentience/extension/content.js +161 -0
  33. sentience/extension/injected_api.js +914 -0
  34. sentience/extension/manifest.json +36 -0
  35. sentience/extension/pkg/sentience_core.d.ts +51 -0
  36. sentience/extension/pkg/sentience_core.js +323 -0
  37. sentience/extension/pkg/sentience_core_bg.wasm +0 -0
  38. sentience/extension/pkg/sentience_core_bg.wasm.d.ts +10 -0
  39. sentience/extension/release.json +115 -0
  40. sentience/formatting.py +15 -0
  41. sentience/generator.py +202 -0
  42. sentience/inspector.py +367 -0
  43. sentience/llm_interaction_handler.py +191 -0
  44. sentience/llm_provider.py +875 -0
  45. sentience/llm_provider_utils.py +120 -0
  46. sentience/llm_response_builder.py +153 -0
  47. sentience/models.py +846 -0
  48. sentience/ordinal.py +280 -0
  49. sentience/overlay.py +222 -0
  50. sentience/protocols.py +228 -0
  51. sentience/query.py +303 -0
  52. sentience/read.py +188 -0
  53. sentience/recorder.py +589 -0
  54. sentience/schemas/trace_v1.json +335 -0
  55. sentience/screenshot.py +100 -0
  56. sentience/sentience_methods.py +86 -0
  57. sentience/snapshot.py +706 -0
  58. sentience/snapshot_diff.py +126 -0
  59. sentience/text_search.py +262 -0
  60. sentience/trace_event_builder.py +148 -0
  61. sentience/trace_file_manager.py +197 -0
  62. sentience/trace_indexing/__init__.py +27 -0
  63. sentience/trace_indexing/index_schema.py +199 -0
  64. sentience/trace_indexing/indexer.py +414 -0
  65. sentience/tracer_factory.py +322 -0
  66. sentience/tracing.py +449 -0
  67. sentience/utils/__init__.py +40 -0
  68. sentience/utils/browser.py +46 -0
  69. sentience/utils/element.py +257 -0
  70. sentience/utils/formatting.py +59 -0
  71. sentience/utils.py +296 -0
  72. sentience/verification.py +380 -0
  73. sentience/visual_agent.py +2058 -0
  74. sentience/wait.py +139 -0
  75. sentienceapi-0.95.0.dist-info/METADATA +984 -0
  76. sentienceapi-0.95.0.dist-info/RECORD +82 -0
  77. sentienceapi-0.95.0.dist-info/WHEEL +5 -0
  78. sentienceapi-0.95.0.dist-info/entry_points.txt +2 -0
  79. sentienceapi-0.95.0.dist-info/licenses/LICENSE +24 -0
  80. sentienceapi-0.95.0.dist-info/licenses/LICENSE-APACHE +201 -0
  81. sentienceapi-0.95.0.dist-info/licenses/LICENSE-MIT +21 -0
  82. sentienceapi-0.95.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,120 @@
1
+ """
2
+ LLM Provider utility functions for common initialization and error handling.
3
+
4
+ This module provides helper functions to reduce duplication across LLM provider implementations.
5
+ """
6
+
7
+ import os
8
+ from collections.abc import Callable
9
+ from typing import Any, Optional, TypeVar
10
+
11
+ T = TypeVar("T")
12
+
13
+
14
+ def require_package(
15
+ package_name: str,
16
+ module_name: str,
17
+ class_name: str | None = None,
18
+ install_command: str | None = None,
19
+ ) -> Any:
20
+ """
21
+ Import a package with consistent error handling.
22
+
23
+ Args:
24
+ package_name: Name of the package (for error messages)
25
+ module_name: Module name to import (e.g., "openai", "google.generativeai")
26
+ class_name: Optional class name to import from module (e.g., "OpenAI")
27
+ install_command: Installation command (defaults to "pip install {package_name}")
28
+
29
+ Returns:
30
+ Imported module or class
31
+
32
+ Raises:
33
+ ImportError: If package is not installed, with helpful message
34
+
35
+ Example:
36
+ >>> OpenAI = require_package("openai", "openai", "OpenAI", "pip install openai")
37
+ >>> genai = require_package("google-generativeai", "google.generativeai", install_command="pip install google-generativeai")
38
+ """
39
+ if install_command is None:
40
+ install_command = f"pip install {package_name}"
41
+
42
+ try:
43
+ if class_name:
44
+ # Import specific class: from module import class
45
+ module = __import__(module_name, fromlist=[class_name])
46
+ return getattr(module, class_name)
47
+ else:
48
+ # Import entire module
49
+ return __import__(module_name)
50
+ except ImportError:
51
+ raise ImportError(f"{package_name} package not installed. Install with: {install_command}")
52
+
53
+
54
+ def get_api_key_from_env(
55
+ env_vars: list[str],
56
+ api_key: str | None = None,
57
+ ) -> str | None:
58
+ """
59
+ Get API key from parameter or environment variables.
60
+
61
+ Args:
62
+ env_vars: List of environment variable names to check (in order)
63
+ api_key: Optional API key parameter (takes precedence)
64
+
65
+ Returns:
66
+ API key string or None if not found
67
+
68
+ Example:
69
+ >>> key = get_api_key_from_env(["OPENAI_API_KEY"], api_key="sk-...")
70
+ >>> # Returns "sk-..." if provided, otherwise checks OPENAI_API_KEY env var
71
+ """
72
+ if api_key:
73
+ return api_key
74
+
75
+ for env_var in env_vars:
76
+ value = os.getenv(env_var)
77
+ if value:
78
+ return value
79
+
80
+ return None
81
+
82
+
83
+ def handle_provider_error(
84
+ error: Exception,
85
+ provider_name: str,
86
+ operation: str = "operation",
87
+ ) -> None:
88
+ """
89
+ Standardize error handling for LLM provider operations.
90
+
91
+ Args:
92
+ error: Exception that occurred
93
+ provider_name: Name of the provider (e.g., "OpenAI", "Anthropic")
94
+ operation: Description of the operation that failed
95
+
96
+ Raises:
97
+ RuntimeError: With standardized error message
98
+
99
+ Example:
100
+ >>> try:
101
+ ... response = client.chat.completions.create(...)
102
+ ... except Exception as e:
103
+ ... handle_provider_error(e, "OpenAI", "generate response")
104
+ """
105
+ error_msg = str(error)
106
+ if "api key" in error_msg.lower() or "authentication" in error_msg.lower():
107
+ raise RuntimeError(
108
+ f"{provider_name} API key is invalid or missing. "
109
+ f"Please check your API key configuration."
110
+ ) from error
111
+ elif "rate limit" in error_msg.lower() or "429" in error_msg:
112
+ raise RuntimeError(
113
+ f"{provider_name} rate limit exceeded. Please try again later."
114
+ ) from error
115
+ elif "model" in error_msg.lower() and "not found" in error_msg.lower():
116
+ raise RuntimeError(
117
+ f"{provider_name} model not found. Please check the model name."
118
+ ) from error
119
+ else:
120
+ raise RuntimeError(f"{provider_name} {operation} failed: {error_msg}") from error
@@ -0,0 +1,153 @@
1
+ """
2
+ LLM Response building utilities for consistent response construction.
3
+
4
+ This module provides helper functions for building LLMResponse objects
5
+ from various provider API responses.
6
+ """
7
+
8
+ from typing import Any, Optional
9
+
10
+ # Import LLMResponse here to avoid circular dependency
11
+ # We import it inside functions to break the cycle
12
+
13
+
14
+ class LLMResponseBuilder:
15
+ """
16
+ Helper for building LLMResponse objects with consistent structure.
17
+
18
+ Provides static methods for building responses from different provider formats.
19
+ """
20
+
21
+ @staticmethod
22
+ def from_openai_format(
23
+ content: str,
24
+ prompt_tokens: int | None = None,
25
+ completion_tokens: int | None = None,
26
+ total_tokens: int | None = None,
27
+ model_name: str | None = None,
28
+ finish_reason: str | None = None,
29
+ ) -> "LLMResponse":
30
+ """
31
+ Build LLMResponse from OpenAI-style API response.
32
+
33
+ Args:
34
+ content: Response text content
35
+ prompt_tokens: Number of prompt tokens
36
+ completion_tokens: Number of completion tokens
37
+ total_tokens: Total tokens (or sum of prompt + completion)
38
+ model_name: Model identifier
39
+ finish_reason: Finish reason (stop, length, etc.)
40
+
41
+ Returns:
42
+ LLMResponse object
43
+ """
44
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
45
+
46
+ return LLMResponse(
47
+ content=content,
48
+ prompt_tokens=prompt_tokens,
49
+ completion_tokens=completion_tokens,
50
+ total_tokens=total_tokens
51
+ or (
52
+ (prompt_tokens + completion_tokens) if prompt_tokens and completion_tokens else None
53
+ ),
54
+ model_name=model_name,
55
+ finish_reason=finish_reason,
56
+ )
57
+
58
+ @staticmethod
59
+ def from_anthropic_format(
60
+ content: str,
61
+ input_tokens: int | None = None,
62
+ output_tokens: int | None = None,
63
+ model_name: str | None = None,
64
+ stop_reason: str | None = None,
65
+ ) -> "LLMResponse":
66
+ """
67
+ Build LLMResponse from Anthropic-style API response.
68
+
69
+ Args:
70
+ content: Response text content
71
+ input_tokens: Number of input tokens
72
+ output_tokens: Number of output tokens
73
+ model_name: Model identifier
74
+ stop_reason: Stop reason (end_turn, max_tokens, etc.)
75
+
76
+ Returns:
77
+ LLMResponse object
78
+ """
79
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
80
+
81
+ return LLMResponse(
82
+ content=content,
83
+ prompt_tokens=input_tokens,
84
+ completion_tokens=output_tokens,
85
+ total_tokens=(input_tokens + output_tokens) if input_tokens and output_tokens else None,
86
+ model_name=model_name,
87
+ finish_reason=stop_reason,
88
+ )
89
+
90
+ @staticmethod
91
+ def from_gemini_format(
92
+ content: str,
93
+ prompt_tokens: int | None = None,
94
+ completion_tokens: int | None = None,
95
+ total_tokens: int | None = None,
96
+ model_name: str | None = None,
97
+ ) -> "LLMResponse":
98
+ """
99
+ Build LLMResponse from Gemini-style API response.
100
+
101
+ Args:
102
+ content: Response text content
103
+ prompt_tokens: Number of prompt tokens
104
+ completion_tokens: Number of completion tokens
105
+ total_tokens: Total tokens
106
+ model_name: Model identifier
107
+
108
+ Returns:
109
+ LLMResponse object
110
+ """
111
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
112
+
113
+ return LLMResponse(
114
+ content=content,
115
+ prompt_tokens=prompt_tokens,
116
+ completion_tokens=completion_tokens,
117
+ total_tokens=total_tokens
118
+ or (
119
+ (prompt_tokens + completion_tokens) if prompt_tokens and completion_tokens else None
120
+ ),
121
+ model_name=model_name,
122
+ finish_reason=None, # Gemini uses different finish reason format
123
+ )
124
+
125
+ @staticmethod
126
+ def from_local_format(
127
+ content: str,
128
+ prompt_tokens: int,
129
+ completion_tokens: int,
130
+ model_name: str,
131
+ ) -> "LLMResponse":
132
+ """
133
+ Build LLMResponse from local model generation.
134
+
135
+ Args:
136
+ content: Response text content
137
+ prompt_tokens: Number of prompt tokens
138
+ completion_tokens: Number of completion tokens
139
+ model_name: Model identifier
140
+
141
+ Returns:
142
+ LLMResponse object
143
+ """
144
+ from .llm_provider import LLMResponse # Import here to avoid circular dependency
145
+
146
+ return LLMResponse(
147
+ content=content,
148
+ prompt_tokens=prompt_tokens,
149
+ completion_tokens=completion_tokens,
150
+ total_tokens=prompt_tokens + completion_tokens,
151
+ model_name=model_name,
152
+ finish_reason=None,
153
+ )