optexity-browser-use 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. browser_use/__init__.py +157 -0
  2. browser_use/actor/__init__.py +11 -0
  3. browser_use/actor/element.py +1175 -0
  4. browser_use/actor/mouse.py +134 -0
  5. browser_use/actor/page.py +561 -0
  6. browser_use/actor/playground/flights.py +41 -0
  7. browser_use/actor/playground/mixed_automation.py +54 -0
  8. browser_use/actor/playground/playground.py +236 -0
  9. browser_use/actor/utils.py +176 -0
  10. browser_use/agent/cloud_events.py +282 -0
  11. browser_use/agent/gif.py +424 -0
  12. browser_use/agent/judge.py +170 -0
  13. browser_use/agent/message_manager/service.py +473 -0
  14. browser_use/agent/message_manager/utils.py +52 -0
  15. browser_use/agent/message_manager/views.py +98 -0
  16. browser_use/agent/prompts.py +413 -0
  17. browser_use/agent/service.py +2316 -0
  18. browser_use/agent/system_prompt.md +185 -0
  19. browser_use/agent/system_prompt_flash.md +10 -0
  20. browser_use/agent/system_prompt_no_thinking.md +183 -0
  21. browser_use/agent/views.py +743 -0
  22. browser_use/browser/__init__.py +41 -0
  23. browser_use/browser/cloud/cloud.py +203 -0
  24. browser_use/browser/cloud/views.py +89 -0
  25. browser_use/browser/events.py +578 -0
  26. browser_use/browser/profile.py +1158 -0
  27. browser_use/browser/python_highlights.py +548 -0
  28. browser_use/browser/session.py +3225 -0
  29. browser_use/browser/session_manager.py +399 -0
  30. browser_use/browser/video_recorder.py +162 -0
  31. browser_use/browser/views.py +200 -0
  32. browser_use/browser/watchdog_base.py +260 -0
  33. browser_use/browser/watchdogs/__init__.py +0 -0
  34. browser_use/browser/watchdogs/aboutblank_watchdog.py +253 -0
  35. browser_use/browser/watchdogs/crash_watchdog.py +335 -0
  36. browser_use/browser/watchdogs/default_action_watchdog.py +2729 -0
  37. browser_use/browser/watchdogs/dom_watchdog.py +817 -0
  38. browser_use/browser/watchdogs/downloads_watchdog.py +1277 -0
  39. browser_use/browser/watchdogs/local_browser_watchdog.py +461 -0
  40. browser_use/browser/watchdogs/permissions_watchdog.py +43 -0
  41. browser_use/browser/watchdogs/popups_watchdog.py +143 -0
  42. browser_use/browser/watchdogs/recording_watchdog.py +126 -0
  43. browser_use/browser/watchdogs/screenshot_watchdog.py +62 -0
  44. browser_use/browser/watchdogs/security_watchdog.py +280 -0
  45. browser_use/browser/watchdogs/storage_state_watchdog.py +335 -0
  46. browser_use/cli.py +2359 -0
  47. browser_use/code_use/__init__.py +16 -0
  48. browser_use/code_use/formatting.py +192 -0
  49. browser_use/code_use/namespace.py +665 -0
  50. browser_use/code_use/notebook_export.py +276 -0
  51. browser_use/code_use/service.py +1340 -0
  52. browser_use/code_use/system_prompt.md +574 -0
  53. browser_use/code_use/utils.py +150 -0
  54. browser_use/code_use/views.py +171 -0
  55. browser_use/config.py +505 -0
  56. browser_use/controller/__init__.py +3 -0
  57. browser_use/dom/enhanced_snapshot.py +161 -0
  58. browser_use/dom/markdown_extractor.py +169 -0
  59. browser_use/dom/playground/extraction.py +312 -0
  60. browser_use/dom/playground/multi_act.py +32 -0
  61. browser_use/dom/serializer/clickable_elements.py +200 -0
  62. browser_use/dom/serializer/code_use_serializer.py +287 -0
  63. browser_use/dom/serializer/eval_serializer.py +478 -0
  64. browser_use/dom/serializer/html_serializer.py +212 -0
  65. browser_use/dom/serializer/paint_order.py +197 -0
  66. browser_use/dom/serializer/serializer.py +1170 -0
  67. browser_use/dom/service.py +825 -0
  68. browser_use/dom/utils.py +129 -0
  69. browser_use/dom/views.py +906 -0
  70. browser_use/exceptions.py +5 -0
  71. browser_use/filesystem/__init__.py +0 -0
  72. browser_use/filesystem/file_system.py +619 -0
  73. browser_use/init_cmd.py +376 -0
  74. browser_use/integrations/gmail/__init__.py +24 -0
  75. browser_use/integrations/gmail/actions.py +115 -0
  76. browser_use/integrations/gmail/service.py +225 -0
  77. browser_use/llm/__init__.py +155 -0
  78. browser_use/llm/anthropic/chat.py +242 -0
  79. browser_use/llm/anthropic/serializer.py +312 -0
  80. browser_use/llm/aws/__init__.py +36 -0
  81. browser_use/llm/aws/chat_anthropic.py +242 -0
  82. browser_use/llm/aws/chat_bedrock.py +289 -0
  83. browser_use/llm/aws/serializer.py +257 -0
  84. browser_use/llm/azure/chat.py +91 -0
  85. browser_use/llm/base.py +57 -0
  86. browser_use/llm/browser_use/__init__.py +3 -0
  87. browser_use/llm/browser_use/chat.py +201 -0
  88. browser_use/llm/cerebras/chat.py +193 -0
  89. browser_use/llm/cerebras/serializer.py +109 -0
  90. browser_use/llm/deepseek/chat.py +212 -0
  91. browser_use/llm/deepseek/serializer.py +109 -0
  92. browser_use/llm/exceptions.py +29 -0
  93. browser_use/llm/google/__init__.py +3 -0
  94. browser_use/llm/google/chat.py +542 -0
  95. browser_use/llm/google/serializer.py +120 -0
  96. browser_use/llm/groq/chat.py +229 -0
  97. browser_use/llm/groq/parser.py +158 -0
  98. browser_use/llm/groq/serializer.py +159 -0
  99. browser_use/llm/messages.py +238 -0
  100. browser_use/llm/models.py +271 -0
  101. browser_use/llm/oci_raw/__init__.py +10 -0
  102. browser_use/llm/oci_raw/chat.py +443 -0
  103. browser_use/llm/oci_raw/serializer.py +229 -0
  104. browser_use/llm/ollama/chat.py +97 -0
  105. browser_use/llm/ollama/serializer.py +143 -0
  106. browser_use/llm/openai/chat.py +264 -0
  107. browser_use/llm/openai/like.py +15 -0
  108. browser_use/llm/openai/serializer.py +165 -0
  109. browser_use/llm/openrouter/chat.py +211 -0
  110. browser_use/llm/openrouter/serializer.py +26 -0
  111. browser_use/llm/schema.py +176 -0
  112. browser_use/llm/views.py +48 -0
  113. browser_use/logging_config.py +330 -0
  114. browser_use/mcp/__init__.py +18 -0
  115. browser_use/mcp/__main__.py +12 -0
  116. browser_use/mcp/client.py +544 -0
  117. browser_use/mcp/controller.py +264 -0
  118. browser_use/mcp/server.py +1114 -0
  119. browser_use/observability.py +204 -0
  120. browser_use/py.typed +0 -0
  121. browser_use/sandbox/__init__.py +41 -0
  122. browser_use/sandbox/sandbox.py +637 -0
  123. browser_use/sandbox/views.py +132 -0
  124. browser_use/screenshots/__init__.py +1 -0
  125. browser_use/screenshots/service.py +52 -0
  126. browser_use/sync/__init__.py +6 -0
  127. browser_use/sync/auth.py +357 -0
  128. browser_use/sync/service.py +161 -0
  129. browser_use/telemetry/__init__.py +51 -0
  130. browser_use/telemetry/service.py +112 -0
  131. browser_use/telemetry/views.py +101 -0
  132. browser_use/tokens/__init__.py +0 -0
  133. browser_use/tokens/custom_pricing.py +24 -0
  134. browser_use/tokens/mappings.py +4 -0
  135. browser_use/tokens/service.py +580 -0
  136. browser_use/tokens/views.py +108 -0
  137. browser_use/tools/registry/service.py +572 -0
  138. browser_use/tools/registry/views.py +174 -0
  139. browser_use/tools/service.py +1675 -0
  140. browser_use/tools/utils.py +82 -0
  141. browser_use/tools/views.py +100 -0
  142. browser_use/utils.py +670 -0
  143. optexity_browser_use-0.9.5.dist-info/METADATA +344 -0
  144. optexity_browser_use-0.9.5.dist-info/RECORD +147 -0
  145. optexity_browser_use-0.9.5.dist-info/WHEEL +4 -0
  146. optexity_browser_use-0.9.5.dist-info/entry_points.txt +3 -0
  147. optexity_browser_use-0.9.5.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,211 @@
1
+ from collections.abc import Mapping
2
+ from dataclasses import dataclass
3
+ from typing import Any, TypeVar, overload
4
+
5
+ import httpx
6
+ from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
7
+ from openai.types.chat.chat_completion import ChatCompletion
8
+ from openai.types.shared_params.response_format_json_schema import (
9
+ JSONSchema,
10
+ ResponseFormatJSONSchema,
11
+ )
12
+ from pydantic import BaseModel
13
+
14
+ from browser_use.llm.base import BaseChatModel
15
+ from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
16
+ from browser_use.llm.messages import BaseMessage
17
+ from browser_use.llm.openrouter.serializer import OpenRouterMessageSerializer
18
+ from browser_use.llm.schema import SchemaOptimizer
19
+ from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
20
+
21
+ T = TypeVar('T', bound=BaseModel)
22
+
23
+
24
+ @dataclass
25
+ class ChatOpenRouter(BaseChatModel):
26
+ """
27
+ A wrapper around OpenRouter's chat API, which provides access to various LLM models
28
+ through a unified OpenAI-compatible interface.
29
+
30
+ This class implements the BaseChatModel protocol for OpenRouter's API.
31
+ """
32
+
33
+ # Model configuration
34
+ model: str
35
+
36
+ # Model params
37
+ temperature: float | None = None
38
+ top_p: float | None = None
39
+ seed: int | None = None
40
+
41
+ # Client initialization parameters
42
+ api_key: str | None = None
43
+ http_referer: str | None = None # OpenRouter specific parameter for tracking
44
+ base_url: str | httpx.URL = 'https://openrouter.ai/api/v1'
45
+ timeout: float | httpx.Timeout | None = None
46
+ max_retries: int = 10
47
+ default_headers: Mapping[str, str] | None = None
48
+ default_query: Mapping[str, object] | None = None
49
+ http_client: httpx.AsyncClient | None = None
50
+ _strict_response_validation: bool = False
51
+ extra_body: dict[str, Any] | None = None
52
+
53
+ # Static
54
+ @property
55
+ def provider(self) -> str:
56
+ return 'openrouter'
57
+
58
+ def _get_client_params(self) -> dict[str, Any]:
59
+ """Prepare client parameters dictionary."""
60
+ # Define base client params
61
+ base_params = {
62
+ 'api_key': self.api_key,
63
+ 'base_url': self.base_url,
64
+ 'timeout': self.timeout,
65
+ 'max_retries': self.max_retries,
66
+ 'default_headers': self.default_headers,
67
+ 'default_query': self.default_query,
68
+ '_strict_response_validation': self._strict_response_validation,
69
+ 'top_p': self.top_p,
70
+ 'seed': self.seed,
71
+ }
72
+
73
+ # Create client_params dict with non-None values
74
+ client_params = {k: v for k, v in base_params.items() if v is not None}
75
+
76
+ # Add http_client if provided
77
+ if self.http_client is not None:
78
+ client_params['http_client'] = self.http_client
79
+
80
+ return client_params
81
+
82
+ def get_client(self) -> AsyncOpenAI:
83
+ """
84
+ Returns an AsyncOpenAI client configured for OpenRouter.
85
+
86
+ Returns:
87
+ AsyncOpenAI: An instance of the AsyncOpenAI client with OpenRouter base URL.
88
+ """
89
+ if not hasattr(self, '_client'):
90
+ client_params = self._get_client_params()
91
+ self._client = AsyncOpenAI(**client_params)
92
+ return self._client
93
+
94
+ @property
95
+ def name(self) -> str:
96
+ return str(self.model)
97
+
98
+ def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
99
+ """Extract usage information from the OpenRouter response."""
100
+ if response.usage is None:
101
+ return None
102
+
103
+ prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
104
+ cached_tokens = prompt_details.cached_tokens if prompt_details else None
105
+
106
+ return ChatInvokeUsage(
107
+ prompt_tokens=response.usage.prompt_tokens,
108
+ prompt_cached_tokens=cached_tokens,
109
+ prompt_cache_creation_tokens=None,
110
+ prompt_image_tokens=None,
111
+ # Completion
112
+ completion_tokens=response.usage.completion_tokens,
113
+ total_tokens=response.usage.total_tokens,
114
+ )
115
+
116
+ @overload
117
+ async def ainvoke(self, messages: list[BaseMessage], output_format: None = None) -> ChatInvokeCompletion[str]: ...
118
+
119
+ @overload
120
+ async def ainvoke(self, messages: list[BaseMessage], output_format: type[T]) -> ChatInvokeCompletion[T]: ...
121
+
122
+ async def ainvoke(
123
+ self, messages: list[BaseMessage], output_format: type[T] | None = None
124
+ ) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
125
+ """
126
+ Invoke the model with the given messages through OpenRouter.
127
+
128
+ Args:
129
+ messages: List of chat messages
130
+ output_format: Optional Pydantic model class for structured output
131
+
132
+ Returns:
133
+ Either a string response or an instance of output_format
134
+ """
135
+ openrouter_messages = OpenRouterMessageSerializer.serialize_messages(messages)
136
+
137
+ # Set up extra headers for OpenRouter
138
+ extra_headers = {}
139
+ if self.http_referer:
140
+ extra_headers['HTTP-Referer'] = self.http_referer
141
+
142
+ try:
143
+ if output_format is None:
144
+ # Return string response
145
+ response = await self.get_client().chat.completions.create(
146
+ model=self.model,
147
+ messages=openrouter_messages,
148
+ temperature=self.temperature,
149
+ top_p=self.top_p,
150
+ seed=self.seed,
151
+ extra_headers=extra_headers,
152
+ **(self.extra_body or {}),
153
+ )
154
+
155
+ usage = self._get_usage(response)
156
+ return ChatInvokeCompletion(
157
+ completion=response.choices[0].message.content or '',
158
+ usage=usage,
159
+ )
160
+
161
+ else:
162
+ # Create a JSON schema for structured output
163
+ schema = SchemaOptimizer.create_optimized_json_schema(output_format)
164
+
165
+ response_format_schema: JSONSchema = {
166
+ 'name': 'agent_output',
167
+ 'strict': True,
168
+ 'schema': schema,
169
+ }
170
+
171
+ # Return structured response
172
+ response = await self.get_client().chat.completions.create(
173
+ model=self.model,
174
+ messages=openrouter_messages,
175
+ temperature=self.temperature,
176
+ top_p=self.top_p,
177
+ seed=self.seed,
178
+ response_format=ResponseFormatJSONSchema(
179
+ json_schema=response_format_schema,
180
+ type='json_schema',
181
+ ),
182
+ extra_headers=extra_headers,
183
+ **(self.extra_body or {}),
184
+ )
185
+
186
+ if response.choices[0].message.content is None:
187
+ raise ModelProviderError(
188
+ message='Failed to parse structured output from model response',
189
+ status_code=500,
190
+ model=self.name,
191
+ )
192
+ usage = self._get_usage(response)
193
+
194
+ parsed = output_format.model_validate_json(response.choices[0].message.content)
195
+
196
+ return ChatInvokeCompletion(
197
+ completion=parsed,
198
+ usage=usage,
199
+ )
200
+
201
+ except RateLimitError as e:
202
+ raise ModelRateLimitError(message=e.message, model=self.name) from e
203
+
204
+ except APIConnectionError as e:
205
+ raise ModelProviderError(message=str(e), model=self.name) from e
206
+
207
+ except APIStatusError as e:
208
+ raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
209
+
210
+ except Exception as e:
211
+ raise ModelProviderError(message=str(e), model=self.name) from e
@@ -0,0 +1,26 @@
1
+ from openai.types.chat import ChatCompletionMessageParam
2
+
3
+ from browser_use.llm.messages import BaseMessage
4
+ from browser_use.llm.openai.serializer import OpenAIMessageSerializer
5
+
6
+
7
+ class OpenRouterMessageSerializer:
8
+ """
9
+ Serializer for converting between custom message types and OpenRouter message formats.
10
+
11
+ OpenRouter uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
12
+ """
13
+
14
+ @staticmethod
15
+ def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
16
+ """
17
+ Serialize a list of browser_use messages to OpenRouter-compatible messages.
18
+
19
+ Args:
20
+ messages: List of browser_use messages
21
+
22
+ Returns:
23
+ List of OpenRouter-compatible messages (identical to OpenAI format)
24
+ """
25
+ # OpenRouter uses the same message format as OpenAI
26
+ return OpenAIMessageSerializer.serialize_messages(messages)
@@ -0,0 +1,176 @@
1
+ """
2
+ Utilities for creating optimized Pydantic schemas for LLM usage.
3
+ """
4
+
5
+ from typing import Any
6
+
7
+ from pydantic import BaseModel
8
+
9
+
10
+ class SchemaOptimizer:
11
+ @staticmethod
12
+ def create_optimized_json_schema(model: type[BaseModel]) -> dict[str, Any]:
13
+ """
14
+ Create the most optimized schema by flattening all $ref/$defs while preserving
15
+ FULL descriptions and ALL action definitions. Also ensures OpenAI strict mode compatibility.
16
+
17
+ Args:
18
+ model: The Pydantic model to optimize
19
+
20
+ Returns:
21
+ Optimized schema with all $refs resolved and strict mode compatibility
22
+ """
23
+ # Generate original schema
24
+ original_schema = model.model_json_schema()
25
+
26
+ # Extract $defs for reference resolution, then flatten everything
27
+ defs_lookup = original_schema.get('$defs', {})
28
+
29
+ def optimize_schema(
30
+ obj: Any,
31
+ defs_lookup: dict[str, Any] | None = None,
32
+ *,
33
+ in_properties: bool = False, # NEW: track context
34
+ ) -> Any:
35
+ """Apply all optimization techniques including flattening all $ref/$defs"""
36
+ if isinstance(obj, dict):
37
+ optimized: dict[str, Any] = {}
38
+ flattened_ref: dict[str, Any] | None = None
39
+
40
+ # Skip unnecessary fields AND $defs (we'll inline everything)
41
+ skip_fields = ['additionalProperties', '$defs']
42
+
43
+ for key, value in obj.items():
44
+ if key in skip_fields:
45
+ continue
46
+
47
+ # Skip metadata "title" unless we're iterating inside an actual `properties` map
48
+ if key == 'title' and not in_properties:
49
+ continue
50
+
51
+ # Preserve FULL descriptions without truncation, skip empty ones
52
+ elif key == 'description':
53
+ if value: # Only include non-empty descriptions
54
+ optimized[key] = value
55
+
56
+ # Handle type field
57
+ elif key == 'type':
58
+ optimized[key] = value
59
+
60
+ # FLATTEN: Resolve $ref by inlining the actual definition
61
+ elif key == '$ref' and defs_lookup:
62
+ ref_path = value.split('/')[-1] # Get the definition name from "#/$defs/SomeName"
63
+ if ref_path in defs_lookup:
64
+ # Get the referenced definition and flatten it
65
+ referenced_def = defs_lookup[ref_path]
66
+ flattened_ref = optimize_schema(referenced_def, defs_lookup)
67
+
68
+ # Keep all anyOf structures (action unions) and resolve any $refs within
69
+ elif key == 'anyOf' and isinstance(value, list):
70
+ optimized[key] = [optimize_schema(item, defs_lookup) for item in value]
71
+
72
+ # Recursively optimize nested structures
73
+ elif key in ['properties', 'items']:
74
+ optimized[key] = optimize_schema(
75
+ value,
76
+ defs_lookup,
77
+ in_properties=(key == 'properties'),
78
+ )
79
+
80
+ # Keep essential validation fields
81
+ elif key in ['type', 'required', 'minimum', 'maximum', 'minItems', 'maxItems', 'pattern', 'default']:
82
+ optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
83
+
84
+ # Recursively process all other fields
85
+ else:
86
+ optimized[key] = optimize_schema(value, defs_lookup) if isinstance(value, (dict, list)) else value
87
+
88
+ # If we have a flattened reference, merge it with the optimized properties
89
+ if flattened_ref is not None and isinstance(flattened_ref, dict):
90
+ # Start with the flattened reference as the base
91
+ result = flattened_ref.copy()
92
+
93
+ # Merge in any sibling properties that were processed
94
+ for key, value in optimized.items():
95
+ # Preserve descriptions from the original object if they exist
96
+ if key == 'description' and 'description' not in result:
97
+ result[key] = value
98
+ elif key != 'description': # Don't overwrite description from flattened ref
99
+ result[key] = value
100
+
101
+ return result
102
+ else:
103
+ # No $ref, just return the optimized object
104
+ # CRITICAL: Add additionalProperties: false to ALL objects for OpenAI strict mode
105
+ if optimized.get('type') == 'object':
106
+ optimized['additionalProperties'] = False
107
+
108
+ return optimized
109
+
110
+ elif isinstance(obj, list):
111
+ return [optimize_schema(item, defs_lookup, in_properties=in_properties) for item in obj]
112
+ return obj
113
+
114
+ # Create optimized schema with flattening
115
+ optimized_result = optimize_schema(original_schema, defs_lookup)
116
+
117
+ # Ensure we have a dictionary (should always be the case for schema root)
118
+ if not isinstance(optimized_result, dict):
119
+ raise ValueError('Optimized schema result is not a dictionary')
120
+
121
+ optimized_schema: dict[str, Any] = optimized_result
122
+
123
+ # Additional pass to ensure ALL objects have additionalProperties: false
124
+ def ensure_additional_properties_false(obj: Any) -> None:
125
+ """Ensure all objects have additionalProperties: false"""
126
+ if isinstance(obj, dict):
127
+ # If it's an object type, ensure additionalProperties is false
128
+ if obj.get('type') == 'object':
129
+ obj['additionalProperties'] = False
130
+
131
+ # Recursively apply to all values
132
+ for value in obj.values():
133
+ if isinstance(value, (dict, list)):
134
+ ensure_additional_properties_false(value)
135
+ elif isinstance(obj, list):
136
+ for item in obj:
137
+ if isinstance(item, (dict, list)):
138
+ ensure_additional_properties_false(item)
139
+
140
+ ensure_additional_properties_false(optimized_schema)
141
+ SchemaOptimizer._make_strict_compatible(optimized_schema)
142
+
143
+ return optimized_schema
144
+
145
+ @staticmethod
146
+ def _make_strict_compatible(schema: dict[str, Any] | list[Any]) -> None:
147
+ """Ensure all properties are required for OpenAI strict mode"""
148
+ if isinstance(schema, dict):
149
+ # First recursively apply to nested objects
150
+ for key, value in schema.items():
151
+ if isinstance(value, (dict, list)) and key != 'required':
152
+ SchemaOptimizer._make_strict_compatible(value)
153
+
154
+ # Then update required for this level
155
+ if 'properties' in schema and 'type' in schema and schema['type'] == 'object':
156
+ # Add all properties to required array
157
+ all_props = list(schema['properties'].keys())
158
+ schema['required'] = all_props # Set all properties as required
159
+
160
+ elif isinstance(schema, list):
161
+ for item in schema:
162
+ SchemaOptimizer._make_strict_compatible(item)
163
+
164
+ @staticmethod
165
+ def create_gemini_optimized_schema(model: type[BaseModel]) -> dict[str, Any]:
166
+ """
167
+ Create Gemini-optimized schema, preserving explicit `required` arrays so Gemini
168
+ respects mandatory fields defined by the caller.
169
+
170
+ Args:
171
+ model: The Pydantic model to optimize
172
+
173
+ Returns:
174
+ Optimized schema suitable for Gemini structured output
175
+ """
176
+ return SchemaOptimizer.create_optimized_json_schema(model)
@@ -0,0 +1,48 @@
1
+ from typing import Generic, TypeVar, Union
2
+
3
+ from pydantic import BaseModel
4
+
5
+ T = TypeVar('T', bound=Union[BaseModel, str])
6
+
7
+
8
+ class ChatInvokeUsage(BaseModel):
9
+ """
10
+ Usage information for a chat model invocation.
11
+ """
12
+
13
+ prompt_tokens: int
14
+ """The number of tokens in the prompt (this includes the cached tokens as well. When calculating the cost, subtract the cached tokens from the prompt tokens)"""
15
+
16
+ prompt_cached_tokens: int | None
17
+ """The number of cached tokens."""
18
+
19
+ prompt_cache_creation_tokens: int | None
20
+ """Anthropic only: The number of tokens used to create the cache."""
21
+
22
+ prompt_image_tokens: int | None
23
+ """Google only: The number of tokens in the image (prompt tokens is the text tokens + image tokens in that case)"""
24
+
25
+ completion_tokens: int
26
+ """The number of tokens in the completion."""
27
+
28
+ total_tokens: int
29
+ """The total number of tokens in the response."""
30
+
31
+
32
+ class ChatInvokeCompletion(BaseModel, Generic[T]):
33
+ """
34
+ Response from a chat model invocation.
35
+ """
36
+
37
+ completion: T
38
+ """The completion of the response."""
39
+
40
+ # Thinking stuff
41
+ thinking: str | None = None
42
+ redacted_thinking: str | None = None
43
+
44
+ usage: ChatInvokeUsage | None
45
+ """The usage of the response."""
46
+
47
+ stop_reason: str | None = None
48
+ """The reason the model stopped generating. Common values: 'end_turn', 'max_tokens', 'stop_sequence'."""