optexity-browser-use 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. browser_use/__init__.py +157 -0
  2. browser_use/actor/__init__.py +11 -0
  3. browser_use/actor/element.py +1175 -0
  4. browser_use/actor/mouse.py +134 -0
  5. browser_use/actor/page.py +561 -0
  6. browser_use/actor/playground/flights.py +41 -0
  7. browser_use/actor/playground/mixed_automation.py +54 -0
  8. browser_use/actor/playground/playground.py +236 -0
  9. browser_use/actor/utils.py +176 -0
  10. browser_use/agent/cloud_events.py +282 -0
  11. browser_use/agent/gif.py +424 -0
  12. browser_use/agent/judge.py +170 -0
  13. browser_use/agent/message_manager/service.py +473 -0
  14. browser_use/agent/message_manager/utils.py +52 -0
  15. browser_use/agent/message_manager/views.py +98 -0
  16. browser_use/agent/prompts.py +413 -0
  17. browser_use/agent/service.py +2316 -0
  18. browser_use/agent/system_prompt.md +185 -0
  19. browser_use/agent/system_prompt_flash.md +10 -0
  20. browser_use/agent/system_prompt_no_thinking.md +183 -0
  21. browser_use/agent/views.py +743 -0
  22. browser_use/browser/__init__.py +41 -0
  23. browser_use/browser/cloud/cloud.py +203 -0
  24. browser_use/browser/cloud/views.py +89 -0
  25. browser_use/browser/events.py +578 -0
  26. browser_use/browser/profile.py +1158 -0
  27. browser_use/browser/python_highlights.py +548 -0
  28. browser_use/browser/session.py +3225 -0
  29. browser_use/browser/session_manager.py +399 -0
  30. browser_use/browser/video_recorder.py +162 -0
  31. browser_use/browser/views.py +200 -0
  32. browser_use/browser/watchdog_base.py +260 -0
  33. browser_use/browser/watchdogs/__init__.py +0 -0
  34. browser_use/browser/watchdogs/aboutblank_watchdog.py +253 -0
  35. browser_use/browser/watchdogs/crash_watchdog.py +335 -0
  36. browser_use/browser/watchdogs/default_action_watchdog.py +2729 -0
  37. browser_use/browser/watchdogs/dom_watchdog.py +817 -0
  38. browser_use/browser/watchdogs/downloads_watchdog.py +1277 -0
  39. browser_use/browser/watchdogs/local_browser_watchdog.py +461 -0
  40. browser_use/browser/watchdogs/permissions_watchdog.py +43 -0
  41. browser_use/browser/watchdogs/popups_watchdog.py +143 -0
  42. browser_use/browser/watchdogs/recording_watchdog.py +126 -0
  43. browser_use/browser/watchdogs/screenshot_watchdog.py +62 -0
  44. browser_use/browser/watchdogs/security_watchdog.py +280 -0
  45. browser_use/browser/watchdogs/storage_state_watchdog.py +335 -0
  46. browser_use/cli.py +2359 -0
  47. browser_use/code_use/__init__.py +16 -0
  48. browser_use/code_use/formatting.py +192 -0
  49. browser_use/code_use/namespace.py +665 -0
  50. browser_use/code_use/notebook_export.py +276 -0
  51. browser_use/code_use/service.py +1340 -0
  52. browser_use/code_use/system_prompt.md +574 -0
  53. browser_use/code_use/utils.py +150 -0
  54. browser_use/code_use/views.py +171 -0
  55. browser_use/config.py +505 -0
  56. browser_use/controller/__init__.py +3 -0
  57. browser_use/dom/enhanced_snapshot.py +161 -0
  58. browser_use/dom/markdown_extractor.py +169 -0
  59. browser_use/dom/playground/extraction.py +312 -0
  60. browser_use/dom/playground/multi_act.py +32 -0
  61. browser_use/dom/serializer/clickable_elements.py +200 -0
  62. browser_use/dom/serializer/code_use_serializer.py +287 -0
  63. browser_use/dom/serializer/eval_serializer.py +478 -0
  64. browser_use/dom/serializer/html_serializer.py +212 -0
  65. browser_use/dom/serializer/paint_order.py +197 -0
  66. browser_use/dom/serializer/serializer.py +1170 -0
  67. browser_use/dom/service.py +825 -0
  68. browser_use/dom/utils.py +129 -0
  69. browser_use/dom/views.py +906 -0
  70. browser_use/exceptions.py +5 -0
  71. browser_use/filesystem/__init__.py +0 -0
  72. browser_use/filesystem/file_system.py +619 -0
  73. browser_use/init_cmd.py +376 -0
  74. browser_use/integrations/gmail/__init__.py +24 -0
  75. browser_use/integrations/gmail/actions.py +115 -0
  76. browser_use/integrations/gmail/service.py +225 -0
  77. browser_use/llm/__init__.py +155 -0
  78. browser_use/llm/anthropic/chat.py +242 -0
  79. browser_use/llm/anthropic/serializer.py +312 -0
  80. browser_use/llm/aws/__init__.py +36 -0
  81. browser_use/llm/aws/chat_anthropic.py +242 -0
  82. browser_use/llm/aws/chat_bedrock.py +289 -0
  83. browser_use/llm/aws/serializer.py +257 -0
  84. browser_use/llm/azure/chat.py +91 -0
  85. browser_use/llm/base.py +57 -0
  86. browser_use/llm/browser_use/__init__.py +3 -0
  87. browser_use/llm/browser_use/chat.py +201 -0
  88. browser_use/llm/cerebras/chat.py +193 -0
  89. browser_use/llm/cerebras/serializer.py +109 -0
  90. browser_use/llm/deepseek/chat.py +212 -0
  91. browser_use/llm/deepseek/serializer.py +109 -0
  92. browser_use/llm/exceptions.py +29 -0
  93. browser_use/llm/google/__init__.py +3 -0
  94. browser_use/llm/google/chat.py +542 -0
  95. browser_use/llm/google/serializer.py +120 -0
  96. browser_use/llm/groq/chat.py +229 -0
  97. browser_use/llm/groq/parser.py +158 -0
  98. browser_use/llm/groq/serializer.py +159 -0
  99. browser_use/llm/messages.py +238 -0
  100. browser_use/llm/models.py +271 -0
  101. browser_use/llm/oci_raw/__init__.py +10 -0
  102. browser_use/llm/oci_raw/chat.py +443 -0
  103. browser_use/llm/oci_raw/serializer.py +229 -0
  104. browser_use/llm/ollama/chat.py +97 -0
  105. browser_use/llm/ollama/serializer.py +143 -0
  106. browser_use/llm/openai/chat.py +264 -0
  107. browser_use/llm/openai/like.py +15 -0
  108. browser_use/llm/openai/serializer.py +165 -0
  109. browser_use/llm/openrouter/chat.py +211 -0
  110. browser_use/llm/openrouter/serializer.py +26 -0
  111. browser_use/llm/schema.py +176 -0
  112. browser_use/llm/views.py +48 -0
  113. browser_use/logging_config.py +330 -0
  114. browser_use/mcp/__init__.py +18 -0
  115. browser_use/mcp/__main__.py +12 -0
  116. browser_use/mcp/client.py +544 -0
  117. browser_use/mcp/controller.py +264 -0
  118. browser_use/mcp/server.py +1114 -0
  119. browser_use/observability.py +204 -0
  120. browser_use/py.typed +0 -0
  121. browser_use/sandbox/__init__.py +41 -0
  122. browser_use/sandbox/sandbox.py +637 -0
  123. browser_use/sandbox/views.py +132 -0
  124. browser_use/screenshots/__init__.py +1 -0
  125. browser_use/screenshots/service.py +52 -0
  126. browser_use/sync/__init__.py +6 -0
  127. browser_use/sync/auth.py +357 -0
  128. browser_use/sync/service.py +161 -0
  129. browser_use/telemetry/__init__.py +51 -0
  130. browser_use/telemetry/service.py +112 -0
  131. browser_use/telemetry/views.py +101 -0
  132. browser_use/tokens/__init__.py +0 -0
  133. browser_use/tokens/custom_pricing.py +24 -0
  134. browser_use/tokens/mappings.py +4 -0
  135. browser_use/tokens/service.py +580 -0
  136. browser_use/tokens/views.py +108 -0
  137. browser_use/tools/registry/service.py +572 -0
  138. browser_use/tools/registry/views.py +174 -0
  139. browser_use/tools/service.py +1675 -0
  140. browser_use/tools/utils.py +82 -0
  141. browser_use/tools/views.py +100 -0
  142. browser_use/utils.py +670 -0
  143. optexity_browser_use-0.9.5.dist-info/METADATA +344 -0
  144. optexity_browser_use-0.9.5.dist-info/RECORD +147 -0
  145. optexity_browser_use-0.9.5.dist-info/WHEEL +4 -0
  146. optexity_browser_use-0.9.5.dist-info/entry_points.txt +3 -0
  147. optexity_browser_use-0.9.5.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,238 @@
1
+ """
2
+ This implementation is based on the OpenAI types, while removing all the parts that are not needed for Browser Use.
3
+ """
4
+
5
+ # region - Content parts
6
+ from typing import Literal, Union
7
+
8
+ from openai import BaseModel
9
+
10
+
11
+ def _truncate(text: str, max_length: int = 50) -> str:
12
+ """Truncate text to max_length characters, adding ellipsis if truncated."""
13
+ if len(text) <= max_length:
14
+ return text
15
+ return text[: max_length - 3] + '...'
16
+
17
+
18
+ def _format_image_url(url: str, max_length: int = 50) -> str:
19
+ """Format image URL for display, truncating if necessary."""
20
+ if url.startswith('data:'):
21
+ # Base64 image
22
+ media_type = url.split(';')[0].split(':')[1] if ';' in url else 'image'
23
+ return f'<base64 {media_type}>'
24
+ else:
25
+ # Regular URL
26
+ return _truncate(url, max_length)
27
+
28
+
29
+ class ContentPartTextParam(BaseModel):
30
+ text: str
31
+ type: Literal['text'] = 'text'
32
+
33
+ def __str__(self) -> str:
34
+ return f'Text: {_truncate(self.text)}'
35
+
36
+ def __repr__(self) -> str:
37
+ return f'ContentPartTextParam(text={_truncate(self.text)})'
38
+
39
+
40
+ class ContentPartRefusalParam(BaseModel):
41
+ refusal: str
42
+ type: Literal['refusal'] = 'refusal'
43
+
44
+ def __str__(self) -> str:
45
+ return f'Refusal: {_truncate(self.refusal)}'
46
+
47
+ def __repr__(self) -> str:
48
+ return f'ContentPartRefusalParam(refusal={_truncate(repr(self.refusal), 50)})'
49
+
50
+
51
+ SupportedImageMediaType = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
52
+
53
+
54
+ class ImageURL(BaseModel):
55
+ url: str
56
+ """Either a URL of the image or the base64 encoded image data."""
57
+ detail: Literal['auto', 'low', 'high'] = 'auto'
58
+ """Specifies the detail level of the image.
59
+
60
+ Learn more in the
61
+ [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
62
+ """
63
+ # needed for Anthropic
64
+ media_type: SupportedImageMediaType = 'image/jpeg'
65
+
66
+ def __str__(self) -> str:
67
+ url_display = _format_image_url(self.url)
68
+ return f'🖼️ Image[{self.media_type}, detail={self.detail}]: {url_display}'
69
+
70
+ def __repr__(self) -> str:
71
+ url_repr = _format_image_url(self.url, 30)
72
+ return f'ImageURL(url={repr(url_repr)}, detail={repr(self.detail)}, media_type={repr(self.media_type)})'
73
+
74
+
75
+ class ContentPartImageParam(BaseModel):
76
+ image_url: ImageURL
77
+ type: Literal['image_url'] = 'image_url'
78
+
79
+ def __str__(self) -> str:
80
+ return str(self.image_url)
81
+
82
+ def __repr__(self) -> str:
83
+ return f'ContentPartImageParam(image_url={repr(self.image_url)})'
84
+
85
+
86
+ class Function(BaseModel):
87
+ arguments: str
88
+ """
89
+ The arguments to call the function with, as generated by the model in JSON
90
+ format. Note that the model does not always generate valid JSON, and may
91
+ hallucinate parameters not defined by your function schema. Validate the
92
+ arguments in your code before calling your function.
93
+ """
94
+ name: str
95
+ """The name of the function to call."""
96
+
97
+ def __str__(self) -> str:
98
+ args_preview = _truncate(self.arguments, 80)
99
+ return f'{self.name}({args_preview})'
100
+
101
+ def __repr__(self) -> str:
102
+ args_repr = _truncate(repr(self.arguments), 50)
103
+ return f'Function(name={repr(self.name)}, arguments={args_repr})'
104
+
105
+
106
+ class ToolCall(BaseModel):
107
+ id: str
108
+ """The ID of the tool call."""
109
+ function: Function
110
+ """The function that the model called."""
111
+ type: Literal['function'] = 'function'
112
+ """The type of the tool. Currently, only `function` is supported."""
113
+
114
+ def __str__(self) -> str:
115
+ return f'ToolCall[{self.id}]: {self.function}'
116
+
117
+ def __repr__(self) -> str:
118
+ return f'ToolCall(id={repr(self.id)}, function={repr(self.function)})'
119
+
120
+
121
+ # endregion
122
+
123
+
124
+ # region - Message types
125
+ class _MessageBase(BaseModel):
126
+ """Base class for all message types"""
127
+
128
+ role: Literal['user', 'system', 'assistant']
129
+
130
+ cache: bool = False
131
+ """Whether to cache this message. This is only applicable when using Anthropic models.
132
+ """
133
+
134
+
135
+ class UserMessage(_MessageBase):
136
+ role: Literal['user'] = 'user'
137
+ """The role of the messages author, in this case `user`."""
138
+
139
+ content: str | list[ContentPartTextParam | ContentPartImageParam]
140
+ """The contents of the user message."""
141
+
142
+ name: str | None = None
143
+ """An optional name for the participant.
144
+
145
+ Provides the model information to differentiate between participants of the same
146
+ role.
147
+ """
148
+
149
+ @property
150
+ def text(self) -> str:
151
+ """
152
+ Automatically parse the text inside content, whether it's a string or a list of content parts.
153
+ """
154
+ if isinstance(self.content, str):
155
+ return self.content
156
+ elif isinstance(self.content, list):
157
+ return '\n'.join([part.text for part in self.content if part.type == 'text'])
158
+ else:
159
+ return ''
160
+
161
+ def __str__(self) -> str:
162
+ return f'UserMessage(content={self.text})'
163
+
164
+ def __repr__(self) -> str:
165
+ return f'UserMessage(content={repr(self.text)})'
166
+
167
+
168
+ class SystemMessage(_MessageBase):
169
+ role: Literal['system'] = 'system'
170
+ """The role of the messages author, in this case `system`."""
171
+
172
+ content: str | list[ContentPartTextParam]
173
+ """The contents of the system message."""
174
+
175
+ name: str | None = None
176
+
177
+ @property
178
+ def text(self) -> str:
179
+ """
180
+ Automatically parse the text inside content, whether it's a string or a list of content parts.
181
+ """
182
+ if isinstance(self.content, str):
183
+ return self.content
184
+ elif isinstance(self.content, list):
185
+ return '\n'.join([part.text for part in self.content if part.type == 'text'])
186
+ else:
187
+ return ''
188
+
189
+ def __str__(self) -> str:
190
+ return f'SystemMessage(content={self.text})'
191
+
192
+ def __repr__(self) -> str:
193
+ return f'SystemMessage(content={repr(self.text)})'
194
+
195
+
196
+ class AssistantMessage(_MessageBase):
197
+ role: Literal['assistant'] = 'assistant'
198
+ """The role of the messages author, in this case `assistant`."""
199
+
200
+ content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None
201
+ """The contents of the assistant message."""
202
+
203
+ name: str | None = None
204
+
205
+ refusal: str | None = None
206
+ """The refusal message by the assistant."""
207
+
208
+ tool_calls: list[ToolCall] = []
209
+ """The tool calls generated by the model, such as function calls."""
210
+
211
+ @property
212
+ def text(self) -> str:
213
+ """
214
+ Automatically parse the text inside content, whether it's a string or a list of content parts.
215
+ """
216
+ if isinstance(self.content, str):
217
+ return self.content
218
+ elif isinstance(self.content, list):
219
+ text = ''
220
+ for part in self.content:
221
+ if part.type == 'text':
222
+ text += part.text
223
+ elif part.type == 'refusal':
224
+ text += f'[Refusal] {part.refusal}'
225
+ return text
226
+ else:
227
+ return ''
228
+
229
+ def __str__(self) -> str:
230
+ return f'AssistantMessage(content={self.text})'
231
+
232
+ def __repr__(self) -> str:
233
+ return f'AssistantMessage(content={repr(self.text)})'
234
+
235
+
236
+ BaseMessage = Union[UserMessage, SystemMessage, AssistantMessage]
237
+
238
+ # endregion
@@ -0,0 +1,271 @@
1
+ """
2
+ Convenient access to LLM models.
3
+
4
+ Usage:
5
+ from browser_use import llm
6
+
7
+ # Simple model access
8
+ model = llm.azure_gpt_4_1_mini
9
+ model = llm.openai_gpt_4o
10
+ model = llm.google_gemini_2_5_pro
11
+ model = llm.bu_latest
12
+ """
13
+
14
+ import os
15
+ from typing import TYPE_CHECKING
16
+
17
+ from browser_use.llm.azure.chat import ChatAzureOpenAI
18
+ from browser_use.llm.browser_use.chat import ChatBrowserUse
19
+ from browser_use.llm.cerebras.chat import ChatCerebras
20
+ from browser_use.llm.google.chat import ChatGoogle
21
+ from browser_use.llm.openai.chat import ChatOpenAI
22
+
23
+ # Optional OCI import
24
+ try:
25
+ from browser_use.llm.oci_raw.chat import ChatOCIRaw
26
+
27
+ OCI_AVAILABLE = True
28
+ except ImportError:
29
+ ChatOCIRaw = None
30
+ OCI_AVAILABLE = False
31
+
32
+ if TYPE_CHECKING:
33
+ from browser_use.llm.base import BaseChatModel
34
+
35
+ # Type stubs for IDE autocomplete
36
+ openai_gpt_4o: 'BaseChatModel'
37
+ openai_gpt_4o_mini: 'BaseChatModel'
38
+ openai_gpt_4_1_mini: 'BaseChatModel'
39
+ openai_o1: 'BaseChatModel'
40
+ openai_o1_mini: 'BaseChatModel'
41
+ openai_o1_pro: 'BaseChatModel'
42
+ openai_o3: 'BaseChatModel'
43
+ openai_o3_mini: 'BaseChatModel'
44
+ openai_o3_pro: 'BaseChatModel'
45
+ openai_o4_mini: 'BaseChatModel'
46
+ openai_gpt_5: 'BaseChatModel'
47
+ openai_gpt_5_mini: 'BaseChatModel'
48
+ openai_gpt_5_nano: 'BaseChatModel'
49
+
50
+ azure_gpt_4o: 'BaseChatModel'
51
+ azure_gpt_4o_mini: 'BaseChatModel'
52
+ azure_gpt_4_1_mini: 'BaseChatModel'
53
+ azure_o1: 'BaseChatModel'
54
+ azure_o1_mini: 'BaseChatModel'
55
+ azure_o1_pro: 'BaseChatModel'
56
+ azure_o3: 'BaseChatModel'
57
+ azure_o3_mini: 'BaseChatModel'
58
+ azure_o3_pro: 'BaseChatModel'
59
+ azure_gpt_5: 'BaseChatModel'
60
+ azure_gpt_5_mini: 'BaseChatModel'
61
+
62
+ google_gemini_2_0_flash: 'BaseChatModel'
63
+ google_gemini_2_0_pro: 'BaseChatModel'
64
+ google_gemini_2_5_pro: 'BaseChatModel'
65
+ google_gemini_2_5_flash: 'BaseChatModel'
66
+ google_gemini_2_5_flash_lite: 'BaseChatModel'
67
+
68
+ cerebras_llama3_1_8b: 'BaseChatModel'
69
+ cerebras_llama3_3_70b: 'BaseChatModel'
70
+ cerebras_gpt_oss_120b: 'BaseChatModel'
71
+ cerebras_llama_4_scout_17b_16e_instruct: 'BaseChatModel'
72
+ cerebras_llama_4_maverick_17b_128e_instruct: 'BaseChatModel'
73
+ cerebras_qwen_3_32b: 'BaseChatModel'
74
+ cerebras_qwen_3_235b_a22b_instruct_2507: 'BaseChatModel'
75
+ cerebras_qwen_3_235b_a22b_thinking_2507: 'BaseChatModel'
76
+ cerebras_qwen_3_coder_480b: 'BaseChatModel'
77
+
78
+ bu_latest: 'BaseChatModel'
79
+ bu_1_0: 'BaseChatModel'
80
+
81
+
82
+ def get_llm_by_name(model_name: str):
83
+ """
84
+ Factory function to create LLM instances from string names with API keys from environment.
85
+
86
+ Args:
87
+ model_name: String name like 'azure_gpt_4_1_mini', 'openai_gpt_4o', etc.
88
+
89
+ Returns:
90
+ LLM instance with API keys from environment variables
91
+
92
+ Raises:
93
+ ValueError: If model_name is not recognized
94
+ """
95
+ if not model_name:
96
+ raise ValueError('Model name cannot be empty')
97
+
98
+ # Parse model name
99
+ parts = model_name.split('_', 1)
100
+ if len(parts) < 2:
101
+ raise ValueError(f"Invalid model name format: '{model_name}'. Expected format: 'provider_model_name'")
102
+
103
+ provider = parts[0]
104
+ model_part = parts[1]
105
+
106
+ # Convert underscores back to dots/dashes for actual model names
107
+ if 'gpt_4_1_mini' in model_part:
108
+ model = model_part.replace('gpt_4_1_mini', 'gpt-4.1-mini')
109
+ elif 'gpt_4o_mini' in model_part:
110
+ model = model_part.replace('gpt_4o_mini', 'gpt-4o-mini')
111
+ elif 'gpt_4o' in model_part:
112
+ model = model_part.replace('gpt_4o', 'gpt-4o')
113
+ elif 'gemini_2_0' in model_part:
114
+ model = model_part.replace('gemini_2_0', 'gemini-2.0').replace('_', '-')
115
+ elif 'gemini_2_5' in model_part:
116
+ model = model_part.replace('gemini_2_5', 'gemini-2.5').replace('_', '-')
117
+ elif 'llama3_1' in model_part:
118
+ model = model_part.replace('llama3_1', 'llama3.1').replace('_', '-')
119
+ elif 'llama3_3' in model_part:
120
+ model = model_part.replace('llama3_3', 'llama-3.3').replace('_', '-')
121
+ elif 'llama_4_scout' in model_part:
122
+ model = model_part.replace('llama_4_scout', 'llama-4-scout').replace('_', '-')
123
+ elif 'llama_4_maverick' in model_part:
124
+ model = model_part.replace('llama_4_maverick', 'llama-4-maverick').replace('_', '-')
125
+ elif 'gpt_oss_120b' in model_part:
126
+ model = model_part.replace('gpt_oss_120b', 'gpt-oss-120b')
127
+ elif 'qwen_3_32b' in model_part:
128
+ model = model_part.replace('qwen_3_32b', 'qwen-3-32b')
129
+ elif 'qwen_3_235b_a22b_instruct' in model_part:
130
+ if model_part.endswith('_2507'):
131
+ model = model_part.replace('qwen_3_235b_a22b_instruct_2507', 'qwen-3-235b-a22b-instruct-2507')
132
+ else:
133
+ model = model_part.replace('qwen_3_235b_a22b_instruct', 'qwen-3-235b-a22b-instruct-2507')
134
+ elif 'qwen_3_235b_a22b_thinking' in model_part:
135
+ if model_part.endswith('_2507'):
136
+ model = model_part.replace('qwen_3_235b_a22b_thinking_2507', 'qwen-3-235b-a22b-thinking-2507')
137
+ else:
138
+ model = model_part.replace('qwen_3_235b_a22b_thinking', 'qwen-3-235b-a22b-thinking-2507')
139
+ elif 'qwen_3_coder_480b' in model_part:
140
+ model = model_part.replace('qwen_3_coder_480b', 'qwen-3-coder-480b')
141
+ else:
142
+ model = model_part.replace('_', '-')
143
+
144
+ # OpenAI Models
145
+ if provider == 'openai':
146
+ api_key = os.getenv('OPENAI_API_KEY')
147
+ return ChatOpenAI(model=model, api_key=api_key)
148
+
149
+ # Azure OpenAI Models
150
+ elif provider == 'azure':
151
+ api_key = os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
152
+ azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
153
+ return ChatAzureOpenAI(model=model, api_key=api_key, azure_endpoint=azure_endpoint)
154
+
155
+ # Google Models
156
+ elif provider == 'google':
157
+ api_key = os.getenv('GOOGLE_API_KEY')
158
+ return ChatGoogle(model=model, api_key=api_key)
159
+
160
+ # OCI Models
161
+ elif provider == 'oci':
162
+ # OCI requires more complex configuration that can't be easily inferred from env vars
163
+ # Users should use ChatOCIRaw directly with proper configuration
164
+ raise ValueError('OCI models require manual configuration. Use ChatOCIRaw directly with your OCI credentials.')
165
+
166
+ # Cerebras Models
167
+ elif provider == 'cerebras':
168
+ api_key = os.getenv('CEREBRAS_API_KEY')
169
+ return ChatCerebras(model=model, api_key=api_key)
170
+
171
+ # Browser Use Models
172
+ elif provider == 'bu':
173
+ # Handle bu_latest -> bu-latest conversion (need to prepend 'bu-' back)
174
+ model = f'bu-{model_part.replace("_", "-")}'
175
+ api_key = os.getenv('BROWSER_USE_API_KEY')
176
+ return ChatBrowserUse(model=model, api_key=api_key)
177
+
178
+ else:
179
+ available_providers = ['openai', 'azure', 'google', 'oci', 'cerebras', 'bu']
180
+ raise ValueError(f"Unknown provider: '{provider}'. Available providers: {', '.join(available_providers)}")
181
+
182
+
183
+ # Pre-configured model instances (lazy loaded via __getattr__)
184
+ def __getattr__(name: str) -> 'BaseChatModel':
185
+ """Create model instances on demand with API keys from environment."""
186
+ # Handle chat classes first
187
+ if name == 'ChatOpenAI':
188
+ return ChatOpenAI # type: ignore
189
+ elif name == 'ChatAzureOpenAI':
190
+ return ChatAzureOpenAI # type: ignore
191
+ elif name == 'ChatGoogle':
192
+ return ChatGoogle # type: ignore
193
+ elif name == 'ChatOCIRaw':
194
+ if not OCI_AVAILABLE:
195
+ raise ImportError('OCI integration not available. Install with: pip install "browser-use[oci]"')
196
+ return ChatOCIRaw # type: ignore
197
+ elif name == 'ChatCerebras':
198
+ return ChatCerebras # type: ignore
199
+ elif name == 'ChatBrowserUse':
200
+ return ChatBrowserUse # type: ignore
201
+
202
+ # Handle model instances - these are the main use case
203
+ try:
204
+ return get_llm_by_name(name)
205
+ except ValueError:
206
+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
207
+
208
+
209
+ # Export all classes and preconfigured instances, conditionally including ChatOCIRaw
210
+ __all__ = [
211
+ 'ChatOpenAI',
212
+ 'ChatAzureOpenAI',
213
+ 'ChatGoogle',
214
+ 'ChatCerebras',
215
+ 'ChatBrowserUse',
216
+ ]
217
+
218
+ if OCI_AVAILABLE:
219
+ __all__.append('ChatOCIRaw')
220
+
221
+ __all__ += [
222
+ 'get_llm_by_name',
223
+ # OpenAI instances - created on demand
224
+ 'openai_gpt_4o',
225
+ 'openai_gpt_4o_mini',
226
+ 'openai_gpt_4_1_mini',
227
+ 'openai_o1',
228
+ 'openai_o1_mini',
229
+ 'openai_o1_pro',
230
+ 'openai_o3',
231
+ 'openai_o3_mini',
232
+ 'openai_o3_pro',
233
+ 'openai_o4_mini',
234
+ 'openai_gpt_5',
235
+ 'openai_gpt_5_mini',
236
+ 'openai_gpt_5_nano',
237
+ # Azure instances - created on demand
238
+ 'azure_gpt_4o',
239
+ 'azure_gpt_4o_mini',
240
+ 'azure_gpt_4_1_mini',
241
+ 'azure_o1',
242
+ 'azure_o1_mini',
243
+ 'azure_o1_pro',
244
+ 'azure_o3',
245
+ 'azure_o3_mini',
246
+ 'azure_o3_pro',
247
+ 'azure_gpt_5',
248
+ 'azure_gpt_5_mini',
249
+ # Google instances - created on demand
250
+ 'google_gemini_2_0_flash',
251
+ 'google_gemini_2_0_pro',
252
+ 'google_gemini_2_5_pro',
253
+ 'google_gemini_2_5_flash',
254
+ 'google_gemini_2_5_flash_lite',
255
+ # Cerebras instances - created on demand
256
+ 'cerebras_llama3_1_8b',
257
+ 'cerebras_llama3_3_70b',
258
+ 'cerebras_gpt_oss_120b',
259
+ 'cerebras_llama_4_scout_17b_16e_instruct',
260
+ 'cerebras_llama_4_maverick_17b_128e_instruct',
261
+ 'cerebras_qwen_3_32b',
262
+ 'cerebras_qwen_3_235b_a22b_instruct_2507',
263
+ 'cerebras_qwen_3_235b_a22b_thinking_2507',
264
+ 'cerebras_qwen_3_coder_480b',
265
+ # Browser Use instances - created on demand
266
+ 'bu_latest',
267
+ 'bu_1_0',
268
+ ]
269
+
270
+ # NOTE: OCI backend is optional. The try/except ImportError and conditional __all__ are required
271
+ # so this module can be imported without browser-use[oci] installed.
@@ -0,0 +1,10 @@
1
+ """
2
+ OCI Raw API integration for browser-use.
3
+
4
+ This module provides direct integration with Oracle Cloud Infrastructure's
5
+ Generative AI service using the raw API endpoints, without Langchain dependencies.
6
+ """
7
+
8
+ from .chat import ChatOCIRaw
9
+
10
+ __all__ = ['ChatOCIRaw']