sentienceapi 0.95.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sentienceapi might be problematic. Click here for more details.
- sentience/__init__.py +253 -0
- sentience/_extension_loader.py +195 -0
- sentience/action_executor.py +215 -0
- sentience/actions.py +1020 -0
- sentience/agent.py +1181 -0
- sentience/agent_config.py +46 -0
- sentience/agent_runtime.py +424 -0
- sentience/asserts/__init__.py +70 -0
- sentience/asserts/expect.py +621 -0
- sentience/asserts/query.py +383 -0
- sentience/async_api.py +108 -0
- sentience/backends/__init__.py +137 -0
- sentience/backends/actions.py +343 -0
- sentience/backends/browser_use_adapter.py +241 -0
- sentience/backends/cdp_backend.py +393 -0
- sentience/backends/exceptions.py +211 -0
- sentience/backends/playwright_backend.py +194 -0
- sentience/backends/protocol.py +216 -0
- sentience/backends/sentience_context.py +469 -0
- sentience/backends/snapshot.py +427 -0
- sentience/base_agent.py +196 -0
- sentience/browser.py +1215 -0
- sentience/browser_evaluator.py +299 -0
- sentience/canonicalization.py +207 -0
- sentience/cli.py +130 -0
- sentience/cloud_tracing.py +807 -0
- sentience/constants.py +6 -0
- sentience/conversational_agent.py +543 -0
- sentience/element_filter.py +136 -0
- sentience/expect.py +188 -0
- sentience/extension/background.js +104 -0
- sentience/extension/content.js +161 -0
- sentience/extension/injected_api.js +914 -0
- sentience/extension/manifest.json +36 -0
- sentience/extension/pkg/sentience_core.d.ts +51 -0
- sentience/extension/pkg/sentience_core.js +323 -0
- sentience/extension/pkg/sentience_core_bg.wasm +0 -0
- sentience/extension/pkg/sentience_core_bg.wasm.d.ts +10 -0
- sentience/extension/release.json +115 -0
- sentience/formatting.py +15 -0
- sentience/generator.py +202 -0
- sentience/inspector.py +367 -0
- sentience/llm_interaction_handler.py +191 -0
- sentience/llm_provider.py +875 -0
- sentience/llm_provider_utils.py +120 -0
- sentience/llm_response_builder.py +153 -0
- sentience/models.py +846 -0
- sentience/ordinal.py +280 -0
- sentience/overlay.py +222 -0
- sentience/protocols.py +228 -0
- sentience/query.py +303 -0
- sentience/read.py +188 -0
- sentience/recorder.py +589 -0
- sentience/schemas/trace_v1.json +335 -0
- sentience/screenshot.py +100 -0
- sentience/sentience_methods.py +86 -0
- sentience/snapshot.py +706 -0
- sentience/snapshot_diff.py +126 -0
- sentience/text_search.py +262 -0
- sentience/trace_event_builder.py +148 -0
- sentience/trace_file_manager.py +197 -0
- sentience/trace_indexing/__init__.py +27 -0
- sentience/trace_indexing/index_schema.py +199 -0
- sentience/trace_indexing/indexer.py +414 -0
- sentience/tracer_factory.py +322 -0
- sentience/tracing.py +449 -0
- sentience/utils/__init__.py +40 -0
- sentience/utils/browser.py +46 -0
- sentience/utils/element.py +257 -0
- sentience/utils/formatting.py +59 -0
- sentience/utils.py +296 -0
- sentience/verification.py +380 -0
- sentience/visual_agent.py +2058 -0
- sentience/wait.py +139 -0
- sentienceapi-0.95.0.dist-info/METADATA +984 -0
- sentienceapi-0.95.0.dist-info/RECORD +82 -0
- sentienceapi-0.95.0.dist-info/WHEEL +5 -0
- sentienceapi-0.95.0.dist-info/entry_points.txt +2 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE +24 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE-APACHE +201 -0
- sentienceapi-0.95.0.dist-info/licenses/LICENSE-MIT +21 -0
- sentienceapi-0.95.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Provider utility functions for common initialization and error handling.
|
|
3
|
+
|
|
4
|
+
This module provides helper functions to reduce duplication across LLM provider implementations.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from collections.abc import Callable
|
|
9
|
+
from typing import Any, Optional, TypeVar
|
|
10
|
+
|
|
11
|
+
T = TypeVar("T")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def require_package(
|
|
15
|
+
package_name: str,
|
|
16
|
+
module_name: str,
|
|
17
|
+
class_name: str | None = None,
|
|
18
|
+
install_command: str | None = None,
|
|
19
|
+
) -> Any:
|
|
20
|
+
"""
|
|
21
|
+
Import a package with consistent error handling.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
package_name: Name of the package (for error messages)
|
|
25
|
+
module_name: Module name to import (e.g., "openai", "google.generativeai")
|
|
26
|
+
class_name: Optional class name to import from module (e.g., "OpenAI")
|
|
27
|
+
install_command: Installation command (defaults to "pip install {package_name}")
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Imported module or class
|
|
31
|
+
|
|
32
|
+
Raises:
|
|
33
|
+
ImportError: If package is not installed, with helpful message
|
|
34
|
+
|
|
35
|
+
Example:
|
|
36
|
+
>>> OpenAI = require_package("openai", "openai", "OpenAI", "pip install openai")
|
|
37
|
+
>>> genai = require_package("google-generativeai", "google.generativeai", install_command="pip install google-generativeai")
|
|
38
|
+
"""
|
|
39
|
+
if install_command is None:
|
|
40
|
+
install_command = f"pip install {package_name}"
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
if class_name:
|
|
44
|
+
# Import specific class: from module import class
|
|
45
|
+
module = __import__(module_name, fromlist=[class_name])
|
|
46
|
+
return getattr(module, class_name)
|
|
47
|
+
else:
|
|
48
|
+
# Import entire module
|
|
49
|
+
return __import__(module_name)
|
|
50
|
+
except ImportError:
|
|
51
|
+
raise ImportError(f"{package_name} package not installed. Install with: {install_command}")
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def get_api_key_from_env(
|
|
55
|
+
env_vars: list[str],
|
|
56
|
+
api_key: str | None = None,
|
|
57
|
+
) -> str | None:
|
|
58
|
+
"""
|
|
59
|
+
Get API key from parameter or environment variables.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
env_vars: List of environment variable names to check (in order)
|
|
63
|
+
api_key: Optional API key parameter (takes precedence)
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
API key string or None if not found
|
|
67
|
+
|
|
68
|
+
Example:
|
|
69
|
+
>>> key = get_api_key_from_env(["OPENAI_API_KEY"], api_key="sk-...")
|
|
70
|
+
>>> # Returns "sk-..." if provided, otherwise checks OPENAI_API_KEY env var
|
|
71
|
+
"""
|
|
72
|
+
if api_key:
|
|
73
|
+
return api_key
|
|
74
|
+
|
|
75
|
+
for env_var in env_vars:
|
|
76
|
+
value = os.getenv(env_var)
|
|
77
|
+
if value:
|
|
78
|
+
return value
|
|
79
|
+
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def handle_provider_error(
|
|
84
|
+
error: Exception,
|
|
85
|
+
provider_name: str,
|
|
86
|
+
operation: str = "operation",
|
|
87
|
+
) -> None:
|
|
88
|
+
"""
|
|
89
|
+
Standardize error handling for LLM provider operations.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
error: Exception that occurred
|
|
93
|
+
provider_name: Name of the provider (e.g., "OpenAI", "Anthropic")
|
|
94
|
+
operation: Description of the operation that failed
|
|
95
|
+
|
|
96
|
+
Raises:
|
|
97
|
+
RuntimeError: With standardized error message
|
|
98
|
+
|
|
99
|
+
Example:
|
|
100
|
+
>>> try:
|
|
101
|
+
... response = client.chat.completions.create(...)
|
|
102
|
+
... except Exception as e:
|
|
103
|
+
... handle_provider_error(e, "OpenAI", "generate response")
|
|
104
|
+
"""
|
|
105
|
+
error_msg = str(error)
|
|
106
|
+
if "api key" in error_msg.lower() or "authentication" in error_msg.lower():
|
|
107
|
+
raise RuntimeError(
|
|
108
|
+
f"{provider_name} API key is invalid or missing. "
|
|
109
|
+
f"Please check your API key configuration."
|
|
110
|
+
) from error
|
|
111
|
+
elif "rate limit" in error_msg.lower() or "429" in error_msg:
|
|
112
|
+
raise RuntimeError(
|
|
113
|
+
f"{provider_name} rate limit exceeded. Please try again later."
|
|
114
|
+
) from error
|
|
115
|
+
elif "model" in error_msg.lower() and "not found" in error_msg.lower():
|
|
116
|
+
raise RuntimeError(
|
|
117
|
+
f"{provider_name} model not found. Please check the model name."
|
|
118
|
+
) from error
|
|
119
|
+
else:
|
|
120
|
+
raise RuntimeError(f"{provider_name} {operation} failed: {error_msg}") from error
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Response building utilities for consistent response construction.
|
|
3
|
+
|
|
4
|
+
This module provides helper functions for building LLMResponse objects
|
|
5
|
+
from various provider API responses.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
|
|
10
|
+
# Import LLMResponse here to avoid circular dependency
|
|
11
|
+
# We import it inside functions to break the cycle
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class LLMResponseBuilder:
|
|
15
|
+
"""
|
|
16
|
+
Helper for building LLMResponse objects with consistent structure.
|
|
17
|
+
|
|
18
|
+
Provides static methods for building responses from different provider formats.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
@staticmethod
|
|
22
|
+
def from_openai_format(
|
|
23
|
+
content: str,
|
|
24
|
+
prompt_tokens: int | None = None,
|
|
25
|
+
completion_tokens: int | None = None,
|
|
26
|
+
total_tokens: int | None = None,
|
|
27
|
+
model_name: str | None = None,
|
|
28
|
+
finish_reason: str | None = None,
|
|
29
|
+
) -> "LLMResponse":
|
|
30
|
+
"""
|
|
31
|
+
Build LLMResponse from OpenAI-style API response.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
content: Response text content
|
|
35
|
+
prompt_tokens: Number of prompt tokens
|
|
36
|
+
completion_tokens: Number of completion tokens
|
|
37
|
+
total_tokens: Total tokens (or sum of prompt + completion)
|
|
38
|
+
model_name: Model identifier
|
|
39
|
+
finish_reason: Finish reason (stop, length, etc.)
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
LLMResponse object
|
|
43
|
+
"""
|
|
44
|
+
from .llm_provider import LLMResponse # Import here to avoid circular dependency
|
|
45
|
+
|
|
46
|
+
return LLMResponse(
|
|
47
|
+
content=content,
|
|
48
|
+
prompt_tokens=prompt_tokens,
|
|
49
|
+
completion_tokens=completion_tokens,
|
|
50
|
+
total_tokens=total_tokens
|
|
51
|
+
or (
|
|
52
|
+
(prompt_tokens + completion_tokens) if prompt_tokens and completion_tokens else None
|
|
53
|
+
),
|
|
54
|
+
model_name=model_name,
|
|
55
|
+
finish_reason=finish_reason,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def from_anthropic_format(
|
|
60
|
+
content: str,
|
|
61
|
+
input_tokens: int | None = None,
|
|
62
|
+
output_tokens: int | None = None,
|
|
63
|
+
model_name: str | None = None,
|
|
64
|
+
stop_reason: str | None = None,
|
|
65
|
+
) -> "LLMResponse":
|
|
66
|
+
"""
|
|
67
|
+
Build LLMResponse from Anthropic-style API response.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
content: Response text content
|
|
71
|
+
input_tokens: Number of input tokens
|
|
72
|
+
output_tokens: Number of output tokens
|
|
73
|
+
model_name: Model identifier
|
|
74
|
+
stop_reason: Stop reason (end_turn, max_tokens, etc.)
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
LLMResponse object
|
|
78
|
+
"""
|
|
79
|
+
from .llm_provider import LLMResponse # Import here to avoid circular dependency
|
|
80
|
+
|
|
81
|
+
return LLMResponse(
|
|
82
|
+
content=content,
|
|
83
|
+
prompt_tokens=input_tokens,
|
|
84
|
+
completion_tokens=output_tokens,
|
|
85
|
+
total_tokens=(input_tokens + output_tokens) if input_tokens and output_tokens else None,
|
|
86
|
+
model_name=model_name,
|
|
87
|
+
finish_reason=stop_reason,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def from_gemini_format(
|
|
92
|
+
content: str,
|
|
93
|
+
prompt_tokens: int | None = None,
|
|
94
|
+
completion_tokens: int | None = None,
|
|
95
|
+
total_tokens: int | None = None,
|
|
96
|
+
model_name: str | None = None,
|
|
97
|
+
) -> "LLMResponse":
|
|
98
|
+
"""
|
|
99
|
+
Build LLMResponse from Gemini-style API response.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
content: Response text content
|
|
103
|
+
prompt_tokens: Number of prompt tokens
|
|
104
|
+
completion_tokens: Number of completion tokens
|
|
105
|
+
total_tokens: Total tokens
|
|
106
|
+
model_name: Model identifier
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
LLMResponse object
|
|
110
|
+
"""
|
|
111
|
+
from .llm_provider import LLMResponse # Import here to avoid circular dependency
|
|
112
|
+
|
|
113
|
+
return LLMResponse(
|
|
114
|
+
content=content,
|
|
115
|
+
prompt_tokens=prompt_tokens,
|
|
116
|
+
completion_tokens=completion_tokens,
|
|
117
|
+
total_tokens=total_tokens
|
|
118
|
+
or (
|
|
119
|
+
(prompt_tokens + completion_tokens) if prompt_tokens and completion_tokens else None
|
|
120
|
+
),
|
|
121
|
+
model_name=model_name,
|
|
122
|
+
finish_reason=None, # Gemini uses different finish reason format
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
@staticmethod
|
|
126
|
+
def from_local_format(
|
|
127
|
+
content: str,
|
|
128
|
+
prompt_tokens: int,
|
|
129
|
+
completion_tokens: int,
|
|
130
|
+
model_name: str,
|
|
131
|
+
) -> "LLMResponse":
|
|
132
|
+
"""
|
|
133
|
+
Build LLMResponse from local model generation.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
content: Response text content
|
|
137
|
+
prompt_tokens: Number of prompt tokens
|
|
138
|
+
completion_tokens: Number of completion tokens
|
|
139
|
+
model_name: Model identifier
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
LLMResponse object
|
|
143
|
+
"""
|
|
144
|
+
from .llm_provider import LLMResponse # Import here to avoid circular dependency
|
|
145
|
+
|
|
146
|
+
return LLMResponse(
|
|
147
|
+
content=content,
|
|
148
|
+
prompt_tokens=prompt_tokens,
|
|
149
|
+
completion_tokens=completion_tokens,
|
|
150
|
+
total_tokens=prompt_tokens + completion_tokens,
|
|
151
|
+
model_name=model_name,
|
|
152
|
+
finish_reason=None,
|
|
153
|
+
)
|