optexity-browser-use 0.9.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- browser_use/__init__.py +157 -0
- browser_use/actor/__init__.py +11 -0
- browser_use/actor/element.py +1175 -0
- browser_use/actor/mouse.py +134 -0
- browser_use/actor/page.py +561 -0
- browser_use/actor/playground/flights.py +41 -0
- browser_use/actor/playground/mixed_automation.py +54 -0
- browser_use/actor/playground/playground.py +236 -0
- browser_use/actor/utils.py +176 -0
- browser_use/agent/cloud_events.py +282 -0
- browser_use/agent/gif.py +424 -0
- browser_use/agent/judge.py +170 -0
- browser_use/agent/message_manager/service.py +473 -0
- browser_use/agent/message_manager/utils.py +52 -0
- browser_use/agent/message_manager/views.py +98 -0
- browser_use/agent/prompts.py +413 -0
- browser_use/agent/service.py +2316 -0
- browser_use/agent/system_prompt.md +185 -0
- browser_use/agent/system_prompt_flash.md +10 -0
- browser_use/agent/system_prompt_no_thinking.md +183 -0
- browser_use/agent/views.py +743 -0
- browser_use/browser/__init__.py +41 -0
- browser_use/browser/cloud/cloud.py +203 -0
- browser_use/browser/cloud/views.py +89 -0
- browser_use/browser/events.py +578 -0
- browser_use/browser/profile.py +1158 -0
- browser_use/browser/python_highlights.py +548 -0
- browser_use/browser/session.py +3225 -0
- browser_use/browser/session_manager.py +399 -0
- browser_use/browser/video_recorder.py +162 -0
- browser_use/browser/views.py +200 -0
- browser_use/browser/watchdog_base.py +260 -0
- browser_use/browser/watchdogs/__init__.py +0 -0
- browser_use/browser/watchdogs/aboutblank_watchdog.py +253 -0
- browser_use/browser/watchdogs/crash_watchdog.py +335 -0
- browser_use/browser/watchdogs/default_action_watchdog.py +2729 -0
- browser_use/browser/watchdogs/dom_watchdog.py +817 -0
- browser_use/browser/watchdogs/downloads_watchdog.py +1277 -0
- browser_use/browser/watchdogs/local_browser_watchdog.py +461 -0
- browser_use/browser/watchdogs/permissions_watchdog.py +43 -0
- browser_use/browser/watchdogs/popups_watchdog.py +143 -0
- browser_use/browser/watchdogs/recording_watchdog.py +126 -0
- browser_use/browser/watchdogs/screenshot_watchdog.py +62 -0
- browser_use/browser/watchdogs/security_watchdog.py +280 -0
- browser_use/browser/watchdogs/storage_state_watchdog.py +335 -0
- browser_use/cli.py +2359 -0
- browser_use/code_use/__init__.py +16 -0
- browser_use/code_use/formatting.py +192 -0
- browser_use/code_use/namespace.py +665 -0
- browser_use/code_use/notebook_export.py +276 -0
- browser_use/code_use/service.py +1340 -0
- browser_use/code_use/system_prompt.md +574 -0
- browser_use/code_use/utils.py +150 -0
- browser_use/code_use/views.py +171 -0
- browser_use/config.py +505 -0
- browser_use/controller/__init__.py +3 -0
- browser_use/dom/enhanced_snapshot.py +161 -0
- browser_use/dom/markdown_extractor.py +169 -0
- browser_use/dom/playground/extraction.py +312 -0
- browser_use/dom/playground/multi_act.py +32 -0
- browser_use/dom/serializer/clickable_elements.py +200 -0
- browser_use/dom/serializer/code_use_serializer.py +287 -0
- browser_use/dom/serializer/eval_serializer.py +478 -0
- browser_use/dom/serializer/html_serializer.py +212 -0
- browser_use/dom/serializer/paint_order.py +197 -0
- browser_use/dom/serializer/serializer.py +1170 -0
- browser_use/dom/service.py +825 -0
- browser_use/dom/utils.py +129 -0
- browser_use/dom/views.py +906 -0
- browser_use/exceptions.py +5 -0
- browser_use/filesystem/__init__.py +0 -0
- browser_use/filesystem/file_system.py +619 -0
- browser_use/init_cmd.py +376 -0
- browser_use/integrations/gmail/__init__.py +24 -0
- browser_use/integrations/gmail/actions.py +115 -0
- browser_use/integrations/gmail/service.py +225 -0
- browser_use/llm/__init__.py +155 -0
- browser_use/llm/anthropic/chat.py +242 -0
- browser_use/llm/anthropic/serializer.py +312 -0
- browser_use/llm/aws/__init__.py +36 -0
- browser_use/llm/aws/chat_anthropic.py +242 -0
- browser_use/llm/aws/chat_bedrock.py +289 -0
- browser_use/llm/aws/serializer.py +257 -0
- browser_use/llm/azure/chat.py +91 -0
- browser_use/llm/base.py +57 -0
- browser_use/llm/browser_use/__init__.py +3 -0
- browser_use/llm/browser_use/chat.py +201 -0
- browser_use/llm/cerebras/chat.py +193 -0
- browser_use/llm/cerebras/serializer.py +109 -0
- browser_use/llm/deepseek/chat.py +212 -0
- browser_use/llm/deepseek/serializer.py +109 -0
- browser_use/llm/exceptions.py +29 -0
- browser_use/llm/google/__init__.py +3 -0
- browser_use/llm/google/chat.py +542 -0
- browser_use/llm/google/serializer.py +120 -0
- browser_use/llm/groq/chat.py +229 -0
- browser_use/llm/groq/parser.py +158 -0
- browser_use/llm/groq/serializer.py +159 -0
- browser_use/llm/messages.py +238 -0
- browser_use/llm/models.py +271 -0
- browser_use/llm/oci_raw/__init__.py +10 -0
- browser_use/llm/oci_raw/chat.py +443 -0
- browser_use/llm/oci_raw/serializer.py +229 -0
- browser_use/llm/ollama/chat.py +97 -0
- browser_use/llm/ollama/serializer.py +143 -0
- browser_use/llm/openai/chat.py +264 -0
- browser_use/llm/openai/like.py +15 -0
- browser_use/llm/openai/serializer.py +165 -0
- browser_use/llm/openrouter/chat.py +211 -0
- browser_use/llm/openrouter/serializer.py +26 -0
- browser_use/llm/schema.py +176 -0
- browser_use/llm/views.py +48 -0
- browser_use/logging_config.py +330 -0
- browser_use/mcp/__init__.py +18 -0
- browser_use/mcp/__main__.py +12 -0
- browser_use/mcp/client.py +544 -0
- browser_use/mcp/controller.py +264 -0
- browser_use/mcp/server.py +1114 -0
- browser_use/observability.py +204 -0
- browser_use/py.typed +0 -0
- browser_use/sandbox/__init__.py +41 -0
- browser_use/sandbox/sandbox.py +637 -0
- browser_use/sandbox/views.py +132 -0
- browser_use/screenshots/__init__.py +1 -0
- browser_use/screenshots/service.py +52 -0
- browser_use/sync/__init__.py +6 -0
- browser_use/sync/auth.py +357 -0
- browser_use/sync/service.py +161 -0
- browser_use/telemetry/__init__.py +51 -0
- browser_use/telemetry/service.py +112 -0
- browser_use/telemetry/views.py +101 -0
- browser_use/tokens/__init__.py +0 -0
- browser_use/tokens/custom_pricing.py +24 -0
- browser_use/tokens/mappings.py +4 -0
- browser_use/tokens/service.py +580 -0
- browser_use/tokens/views.py +108 -0
- browser_use/tools/registry/service.py +572 -0
- browser_use/tools/registry/views.py +174 -0
- browser_use/tools/service.py +1675 -0
- browser_use/tools/utils.py +82 -0
- browser_use/tools/views.py +100 -0
- browser_use/utils.py +670 -0
- optexity_browser_use-0.9.5.dist-info/METADATA +344 -0
- optexity_browser_use-0.9.5.dist-info/RECORD +147 -0
- optexity_browser_use-0.9.5.dist-info/WHEEL +4 -0
- optexity_browser_use-0.9.5.dist-info/entry_points.txt +3 -0
- optexity_browser_use-0.9.5.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,212 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Any, TypeVar, overload
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
from openai import (
|
|
9
|
+
APIConnectionError,
|
|
10
|
+
APIError,
|
|
11
|
+
APIStatusError,
|
|
12
|
+
APITimeoutError,
|
|
13
|
+
AsyncOpenAI,
|
|
14
|
+
RateLimitError,
|
|
15
|
+
)
|
|
16
|
+
from pydantic import BaseModel
|
|
17
|
+
|
|
18
|
+
from browser_use.llm.base import BaseChatModel
|
|
19
|
+
from browser_use.llm.deepseek.serializer import DeepSeekMessageSerializer
|
|
20
|
+
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
|
|
21
|
+
from browser_use.llm.messages import BaseMessage
|
|
22
|
+
from browser_use.llm.schema import SchemaOptimizer
|
|
23
|
+
from browser_use.llm.views import ChatInvokeCompletion
|
|
24
|
+
|
|
25
|
+
T = TypeVar('T', bound=BaseModel)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class ChatDeepSeek(BaseChatModel):
|
|
30
|
+
"""DeepSeek /chat/completions wrapper (OpenAI-compatible)."""
|
|
31
|
+
|
|
32
|
+
model: str = 'deepseek-chat'
|
|
33
|
+
|
|
34
|
+
# Generation parameters
|
|
35
|
+
max_tokens: int | None = None
|
|
36
|
+
temperature: float | None = None
|
|
37
|
+
top_p: float | None = None
|
|
38
|
+
seed: int | None = None
|
|
39
|
+
|
|
40
|
+
# Connection parameters
|
|
41
|
+
api_key: str | None = None
|
|
42
|
+
base_url: str | httpx.URL | None = 'https://api.deepseek.com/v1'
|
|
43
|
+
timeout: float | httpx.Timeout | None = None
|
|
44
|
+
client_params: dict[str, Any] | None = None
|
|
45
|
+
|
|
46
|
+
@property
|
|
47
|
+
def provider(self) -> str:
|
|
48
|
+
return 'deepseek'
|
|
49
|
+
|
|
50
|
+
def _client(self) -> AsyncOpenAI:
|
|
51
|
+
return AsyncOpenAI(
|
|
52
|
+
api_key=self.api_key,
|
|
53
|
+
base_url=self.base_url,
|
|
54
|
+
timeout=self.timeout,
|
|
55
|
+
**(self.client_params or {}),
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
@property
|
|
59
|
+
def name(self) -> str:
|
|
60
|
+
return self.model
|
|
61
|
+
|
|
62
|
+
@overload
|
|
63
|
+
async def ainvoke(
|
|
64
|
+
self,
|
|
65
|
+
messages: list[BaseMessage],
|
|
66
|
+
output_format: None = None,
|
|
67
|
+
tools: list[dict[str, Any]] | None = None,
|
|
68
|
+
stop: list[str] | None = None,
|
|
69
|
+
) -> ChatInvokeCompletion[str]: ...
|
|
70
|
+
|
|
71
|
+
@overload
|
|
72
|
+
async def ainvoke(
|
|
73
|
+
self,
|
|
74
|
+
messages: list[BaseMessage],
|
|
75
|
+
output_format: type[T],
|
|
76
|
+
tools: list[dict[str, Any]] | None = None,
|
|
77
|
+
stop: list[str] | None = None,
|
|
78
|
+
) -> ChatInvokeCompletion[T]: ...
|
|
79
|
+
|
|
80
|
+
async def ainvoke(
|
|
81
|
+
self,
|
|
82
|
+
messages: list[BaseMessage],
|
|
83
|
+
output_format: type[T] | None = None,
|
|
84
|
+
tools: list[dict[str, Any]] | None = None,
|
|
85
|
+
stop: list[str] | None = None,
|
|
86
|
+
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
|
|
87
|
+
"""
|
|
88
|
+
DeepSeek ainvoke supports:
|
|
89
|
+
1. Regular text/multi-turn conversation
|
|
90
|
+
2. Function Calling
|
|
91
|
+
3. JSON Output (response_format)
|
|
92
|
+
4. Conversation prefix continuation (beta, prefix, stop)
|
|
93
|
+
"""
|
|
94
|
+
client = self._client()
|
|
95
|
+
ds_messages = DeepSeekMessageSerializer.serialize_messages(messages)
|
|
96
|
+
common: dict[str, Any] = {}
|
|
97
|
+
|
|
98
|
+
if self.temperature is not None:
|
|
99
|
+
common['temperature'] = self.temperature
|
|
100
|
+
if self.max_tokens is not None:
|
|
101
|
+
common['max_tokens'] = self.max_tokens
|
|
102
|
+
if self.top_p is not None:
|
|
103
|
+
common['top_p'] = self.top_p
|
|
104
|
+
if self.seed is not None:
|
|
105
|
+
common['seed'] = self.seed
|
|
106
|
+
|
|
107
|
+
# Beta conversation prefix continuation (see official documentation)
|
|
108
|
+
if self.base_url and str(self.base_url).endswith('/beta'):
|
|
109
|
+
# The last assistant message must have prefix
|
|
110
|
+
if ds_messages and isinstance(ds_messages[-1], dict) and ds_messages[-1].get('role') == 'assistant':
|
|
111
|
+
ds_messages[-1]['prefix'] = True
|
|
112
|
+
if stop:
|
|
113
|
+
common['stop'] = stop
|
|
114
|
+
|
|
115
|
+
# ① Regular multi-turn conversation/text output
|
|
116
|
+
if output_format is None and not tools:
|
|
117
|
+
try:
|
|
118
|
+
resp = await client.chat.completions.create( # type: ignore
|
|
119
|
+
model=self.model,
|
|
120
|
+
messages=ds_messages, # type: ignore
|
|
121
|
+
**common,
|
|
122
|
+
)
|
|
123
|
+
return ChatInvokeCompletion(
|
|
124
|
+
completion=resp.choices[0].message.content or '',
|
|
125
|
+
usage=None,
|
|
126
|
+
)
|
|
127
|
+
except RateLimitError as e:
|
|
128
|
+
raise ModelRateLimitError(str(e), model=self.name) from e
|
|
129
|
+
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
|
|
130
|
+
raise ModelProviderError(str(e), model=self.name) from e
|
|
131
|
+
except Exception as e:
|
|
132
|
+
raise ModelProviderError(str(e), model=self.name) from e
|
|
133
|
+
|
|
134
|
+
# ② Function Calling path (with tools or output_format)
|
|
135
|
+
if tools or (output_format is not None and hasattr(output_format, 'model_json_schema')):
|
|
136
|
+
try:
|
|
137
|
+
call_tools = tools
|
|
138
|
+
tool_choice = None
|
|
139
|
+
if output_format is not None and hasattr(output_format, 'model_json_schema'):
|
|
140
|
+
tool_name = output_format.__name__
|
|
141
|
+
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
|
|
142
|
+
schema.pop('title', None)
|
|
143
|
+
call_tools = [
|
|
144
|
+
{
|
|
145
|
+
'type': 'function',
|
|
146
|
+
'function': {
|
|
147
|
+
'name': tool_name,
|
|
148
|
+
'description': f'Return a JSON object of type {tool_name}',
|
|
149
|
+
'parameters': schema,
|
|
150
|
+
},
|
|
151
|
+
}
|
|
152
|
+
]
|
|
153
|
+
tool_choice = {'type': 'function', 'function': {'name': tool_name}}
|
|
154
|
+
resp = await client.chat.completions.create( # type: ignore
|
|
155
|
+
model=self.model,
|
|
156
|
+
messages=ds_messages, # type: ignore
|
|
157
|
+
tools=call_tools, # type: ignore
|
|
158
|
+
tool_choice=tool_choice, # type: ignore
|
|
159
|
+
**common,
|
|
160
|
+
)
|
|
161
|
+
msg = resp.choices[0].message
|
|
162
|
+
if not msg.tool_calls:
|
|
163
|
+
raise ValueError('Expected tool_calls in response but got none')
|
|
164
|
+
raw_args = msg.tool_calls[0].function.arguments
|
|
165
|
+
if isinstance(raw_args, str):
|
|
166
|
+
parsed = json.loads(raw_args)
|
|
167
|
+
else:
|
|
168
|
+
parsed = raw_args
|
|
169
|
+
# --------- Fix: only use model_validate when output_format is not None ----------
|
|
170
|
+
if output_format is not None:
|
|
171
|
+
return ChatInvokeCompletion(
|
|
172
|
+
completion=output_format.model_validate(parsed),
|
|
173
|
+
usage=None,
|
|
174
|
+
)
|
|
175
|
+
else:
|
|
176
|
+
# If no output_format, return dict directly
|
|
177
|
+
return ChatInvokeCompletion(
|
|
178
|
+
completion=parsed,
|
|
179
|
+
usage=None,
|
|
180
|
+
)
|
|
181
|
+
except RateLimitError as e:
|
|
182
|
+
raise ModelRateLimitError(str(e), model=self.name) from e
|
|
183
|
+
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
|
|
184
|
+
raise ModelProviderError(str(e), model=self.name) from e
|
|
185
|
+
except Exception as e:
|
|
186
|
+
raise ModelProviderError(str(e), model=self.name) from e
|
|
187
|
+
|
|
188
|
+
# ③ JSON Output path (official response_format)
|
|
189
|
+
if output_format is not None and hasattr(output_format, 'model_json_schema'):
|
|
190
|
+
try:
|
|
191
|
+
resp = await client.chat.completions.create( # type: ignore
|
|
192
|
+
model=self.model,
|
|
193
|
+
messages=ds_messages, # type: ignore
|
|
194
|
+
response_format={'type': 'json_object'},
|
|
195
|
+
**common,
|
|
196
|
+
)
|
|
197
|
+
content = resp.choices[0].message.content
|
|
198
|
+
if not content:
|
|
199
|
+
raise ModelProviderError('Empty JSON content in DeepSeek response', model=self.name)
|
|
200
|
+
parsed = output_format.model_validate_json(content)
|
|
201
|
+
return ChatInvokeCompletion(
|
|
202
|
+
completion=parsed,
|
|
203
|
+
usage=None,
|
|
204
|
+
)
|
|
205
|
+
except RateLimitError as e:
|
|
206
|
+
raise ModelRateLimitError(str(e), model=self.name) from e
|
|
207
|
+
except (APIError, APIConnectionError, APITimeoutError, APIStatusError) as e:
|
|
208
|
+
raise ModelProviderError(str(e), model=self.name) from e
|
|
209
|
+
except Exception as e:
|
|
210
|
+
raise ModelProviderError(str(e), model=self.name) from e
|
|
211
|
+
|
|
212
|
+
raise ModelProviderError('No valid ainvoke execution path for DeepSeek LLM', model=self.name)
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, overload
|
|
5
|
+
|
|
6
|
+
from browser_use.llm.messages import (
|
|
7
|
+
AssistantMessage,
|
|
8
|
+
BaseMessage,
|
|
9
|
+
ContentPartImageParam,
|
|
10
|
+
ContentPartTextParam,
|
|
11
|
+
SystemMessage,
|
|
12
|
+
ToolCall,
|
|
13
|
+
UserMessage,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
MessageDict = dict[str, Any]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class DeepSeekMessageSerializer:
|
|
20
|
+
"""Serializer for converting browser-use messages to DeepSeek messages."""
|
|
21
|
+
|
|
22
|
+
# -------- content 处理 --------------------------------------------------
|
|
23
|
+
@staticmethod
|
|
24
|
+
def _serialize_text_part(part: ContentPartTextParam) -> str:
|
|
25
|
+
return part.text
|
|
26
|
+
|
|
27
|
+
@staticmethod
|
|
28
|
+
def _serialize_image_part(part: ContentPartImageParam) -> dict[str, Any]:
|
|
29
|
+
url = part.image_url.url
|
|
30
|
+
if url.startswith('data:'):
|
|
31
|
+
return {'type': 'image_url', 'image_url': {'url': url}}
|
|
32
|
+
return {'type': 'image_url', 'image_url': {'url': url}}
|
|
33
|
+
|
|
34
|
+
@staticmethod
|
|
35
|
+
def _serialize_content(content: Any) -> str | list[dict[str, Any]]:
|
|
36
|
+
if content is None:
|
|
37
|
+
return ''
|
|
38
|
+
if isinstance(content, str):
|
|
39
|
+
return content
|
|
40
|
+
serialized: list[dict[str, Any]] = []
|
|
41
|
+
for part in content:
|
|
42
|
+
if part.type == 'text':
|
|
43
|
+
serialized.append({'type': 'text', 'text': DeepSeekMessageSerializer._serialize_text_part(part)})
|
|
44
|
+
elif part.type == 'image_url':
|
|
45
|
+
serialized.append(DeepSeekMessageSerializer._serialize_image_part(part))
|
|
46
|
+
elif part.type == 'refusal':
|
|
47
|
+
serialized.append({'type': 'text', 'text': f'[Refusal] {part.refusal}'})
|
|
48
|
+
return serialized
|
|
49
|
+
|
|
50
|
+
# -------- Tool-call 处理 -------------------------------------------------
|
|
51
|
+
@staticmethod
|
|
52
|
+
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[dict[str, Any]]:
|
|
53
|
+
deepseek_tool_calls: list[dict[str, Any]] = []
|
|
54
|
+
for tc in tool_calls:
|
|
55
|
+
try:
|
|
56
|
+
arguments = json.loads(tc.function.arguments)
|
|
57
|
+
except json.JSONDecodeError:
|
|
58
|
+
arguments = {'arguments': tc.function.arguments}
|
|
59
|
+
deepseek_tool_calls.append(
|
|
60
|
+
{
|
|
61
|
+
'id': tc.id,
|
|
62
|
+
'type': 'function',
|
|
63
|
+
'function': {
|
|
64
|
+
'name': tc.function.name,
|
|
65
|
+
'arguments': arguments,
|
|
66
|
+
},
|
|
67
|
+
}
|
|
68
|
+
)
|
|
69
|
+
return deepseek_tool_calls
|
|
70
|
+
|
|
71
|
+
# -------- 单条消息序列化 -------------------------------------------------
|
|
72
|
+
@overload
|
|
73
|
+
@staticmethod
|
|
74
|
+
def serialize(message: UserMessage) -> MessageDict: ...
|
|
75
|
+
|
|
76
|
+
@overload
|
|
77
|
+
@staticmethod
|
|
78
|
+
def serialize(message: SystemMessage) -> MessageDict: ...
|
|
79
|
+
|
|
80
|
+
@overload
|
|
81
|
+
@staticmethod
|
|
82
|
+
def serialize(message: AssistantMessage) -> MessageDict: ...
|
|
83
|
+
|
|
84
|
+
@staticmethod
|
|
85
|
+
def serialize(message: BaseMessage) -> MessageDict:
|
|
86
|
+
if isinstance(message, UserMessage):
|
|
87
|
+
return {
|
|
88
|
+
'role': 'user',
|
|
89
|
+
'content': DeepSeekMessageSerializer._serialize_content(message.content),
|
|
90
|
+
}
|
|
91
|
+
if isinstance(message, SystemMessage):
|
|
92
|
+
return {
|
|
93
|
+
'role': 'system',
|
|
94
|
+
'content': DeepSeekMessageSerializer._serialize_content(message.content),
|
|
95
|
+
}
|
|
96
|
+
if isinstance(message, AssistantMessage):
|
|
97
|
+
msg: MessageDict = {
|
|
98
|
+
'role': 'assistant',
|
|
99
|
+
'content': DeepSeekMessageSerializer._serialize_content(message.content),
|
|
100
|
+
}
|
|
101
|
+
if message.tool_calls:
|
|
102
|
+
msg['tool_calls'] = DeepSeekMessageSerializer._serialize_tool_calls(message.tool_calls)
|
|
103
|
+
return msg
|
|
104
|
+
raise ValueError(f'Unknown message type: {type(message)}')
|
|
105
|
+
|
|
106
|
+
# -------- 列表序列化 -----------------------------------------------------
|
|
107
|
+
@staticmethod
|
|
108
|
+
def serialize_messages(messages: list[BaseMessage]) -> list[MessageDict]:
|
|
109
|
+
return [DeepSeekMessageSerializer.serialize(m) for m in messages]
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
class ModelError(Exception):
|
|
2
|
+
pass
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class ModelProviderError(ModelError):
|
|
6
|
+
"""Exception raised when a model provider returns an error."""
|
|
7
|
+
|
|
8
|
+
def __init__(
|
|
9
|
+
self,
|
|
10
|
+
message: str,
|
|
11
|
+
status_code: int = 502,
|
|
12
|
+
model: str | None = None,
|
|
13
|
+
):
|
|
14
|
+
super().__init__(message)
|
|
15
|
+
self.message = message
|
|
16
|
+
self.status_code = status_code
|
|
17
|
+
self.model = model
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ModelRateLimitError(ModelProviderError):
|
|
21
|
+
"""Exception raised when a model provider returns a rate limit error."""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
message: str,
|
|
26
|
+
status_code: int = 429,
|
|
27
|
+
model: str | None = None,
|
|
28
|
+
):
|
|
29
|
+
super().__init__(message, status_code, model)
|