camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_types.py +6 -2
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +4014 -410
- camel/agents/mcp_agent.py +30 -27
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/configs/vllm_config.py +2 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/base_generator.py +39 -10
- camel/environments/__init__.py +12 -0
- camel/environments/rlcards_env.py +860 -0
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +4 -16
- camel/interpreters/docker_interpreter.py +3 -2
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/internal_python_interpreter.py +51 -2
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/base_loader.py +85 -0
- camel/loaders/chunkr_reader.py +9 -0
- camel/loaders/firecrawl_reader.py +4 -4
- camel/logger.py +1 -1
- camel/memories/agent_memories.py +84 -1
- camel/memories/base.py +34 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/blocks/vectordb_block.py +8 -1
- camel/memories/context_creators/score_based.py +29 -237
- camel/memories/records.py +88 -8
- camel/messages/base.py +166 -40
- camel/messages/func_message.py +32 -5
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +117 -18
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +205 -91
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +189 -24
- camel/models/cohere_model.py +5 -17
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +6 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +71 -20
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +49 -32
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/model_manager.py +24 -6
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +185 -19
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +171 -46
- camel/models/openai_model.py +205 -77
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/configs.py +11 -11
- camel/runtimes/daytona_runtime.py +15 -16
- camel/runtimes/docker_runtime.py +6 -6
- camel/runtimes/remote_http_runtime.py +5 -5
- camel/services/agent_openapi_server.py +380 -0
- camel/societies/__init__.py +2 -0
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +249 -38
- camel/societies/workforce/role_playing_worker.py +82 -20
- camel/societies/workforce/single_agent_worker.py +634 -34
- camel/societies/workforce/structured_output_handler.py +512 -0
- camel/societies/workforce/task_channel.py +169 -23
- camel/societies/workforce/utils.py +176 -9
- camel/societies/workforce/worker.py +77 -23
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +3168 -478
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +203 -175
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/__init__.py +4 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/__init__.py +6 -0
- camel/storages/vectordb_storages/chroma.py +731 -0
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/surreal.py +365 -0
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +244 -27
- camel/toolkits/__init__.py +46 -8
- camel/toolkits/aci_toolkit.py +64 -19
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/base.py +63 -5
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/craw4ai_toolkit.py +93 -0
- camel/toolkits/dappier_toolkit.py +10 -6
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
- camel/toolkits/excel_toolkit.py +901 -67
- camel/toolkits/file_toolkit.py +1402 -0
- camel/toolkits/function_tool.py +30 -6
- camel/toolkits/github_toolkit.py +107 -20
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +54 -0
- camel/toolkits/human_toolkit.py +34 -10
- camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
- camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
- camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
- camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
- camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
- camel/toolkits/image_generation_toolkit.py +390 -0
- camel/toolkits/jina_reranker_toolkit.py +3 -4
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +104 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +370 -45
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_agent_toolkit.py +608 -0
- camel/toolkits/message_integration.py +724 -0
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +277 -0
- camel/toolkits/notion_mcp_toolkit.py +224 -0
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +56 -0
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/pptx_toolkit.py +25 -12
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/screenshot_toolkit.py +213 -0
- camel/toolkits/search_toolkit.py +437 -142
- camel/toolkits/slack_toolkit.py +104 -50
- camel/toolkits/sympy_toolkit.py +1 -1
- camel/toolkits/task_planning_toolkit.py +3 -3
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/thinking_toolkit.py +1 -1
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +106 -26
- camel/toolkits/video_download_toolkit.py +17 -14
- camel/toolkits/web_deploy_toolkit.py +1219 -0
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/agents/tool_calling_record.py +4 -1
- camel/types/enums.py +316 -40
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +31 -4
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/mcp_client.py +45 -1
- camel/utils/message_summarizer.py +148 -0
- camel/utils/token_counting.py +43 -20
- camel/utils/tool_result.py +44 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/dalle_toolkit.py +0 -175
- camel/toolkits/file_write_toolkit.py +0 -444
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1037
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
camel/models/sglang_model.py
CHANGED
|
@@ -21,7 +21,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
21
21
|
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
22
22
|
from pydantic import BaseModel
|
|
23
23
|
|
|
24
|
-
from camel.configs import
|
|
24
|
+
from camel.configs import SGLangConfig
|
|
25
25
|
from camel.messages import OpenAIMessage
|
|
26
26
|
from camel.models import BaseModelBackend
|
|
27
27
|
from camel.types import (
|
|
@@ -72,8 +72,16 @@ class SGLangModel(BaseModelBackend):
|
|
|
72
72
|
(default: :obj:`None`)
|
|
73
73
|
max_retries (int, optional): Maximum number of retries for API calls.
|
|
74
74
|
(default: :obj:`3`)
|
|
75
|
+
client (Optional[Any], optional): A custom synchronous
|
|
76
|
+
OpenAI-compatible client instance. If provided, this client will
|
|
77
|
+
be used instead of creating a new one. Note: When using custom
|
|
78
|
+
clients with SGLang, server auto-start features will be disabled.
|
|
79
|
+
(default: :obj:`None`)
|
|
80
|
+
async_client (Optional[Any], optional): A custom asynchronous
|
|
81
|
+
OpenAI-compatible client instance. If provided, this client will
|
|
82
|
+
be used instead of creating a new one. (default: :obj:`None`)
|
|
75
83
|
**kwargs (Any): Additional arguments to pass to the client
|
|
76
|
-
initialization.
|
|
84
|
+
initialization. Ignored if custom clients are provided.
|
|
77
85
|
|
|
78
86
|
Reference: https://sgl-project.github.io/backend/openai_api_completions.
|
|
79
87
|
html
|
|
@@ -88,6 +96,8 @@ class SGLangModel(BaseModelBackend):
|
|
|
88
96
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
89
97
|
timeout: Optional[float] = None,
|
|
90
98
|
max_retries: int = 3,
|
|
99
|
+
client: Optional[Any] = None,
|
|
100
|
+
async_client: Optional[Any] = None,
|
|
91
101
|
**kwargs: Any,
|
|
92
102
|
) -> None:
|
|
93
103
|
if model_config_dict is None:
|
|
@@ -111,9 +121,10 @@ class SGLangModel(BaseModelBackend):
|
|
|
111
121
|
max_retries,
|
|
112
122
|
)
|
|
113
123
|
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
124
|
+
# Use custom clients if provided, otherwise create new ones
|
|
125
|
+
if client is not None:
|
|
126
|
+
self._client = client
|
|
127
|
+
elif self._url:
|
|
117
128
|
# Initialize the client if an existing URL is provided
|
|
118
129
|
self._client = OpenAI(
|
|
119
130
|
timeout=self._timeout,
|
|
@@ -122,6 +133,12 @@ class SGLangModel(BaseModelBackend):
|
|
|
122
133
|
base_url=self._url,
|
|
123
134
|
**kwargs,
|
|
124
135
|
)
|
|
136
|
+
else:
|
|
137
|
+
self._client = None
|
|
138
|
+
|
|
139
|
+
if async_client is not None:
|
|
140
|
+
self._async_client = async_client
|
|
141
|
+
elif self._url:
|
|
125
142
|
self._async_client = AsyncOpenAI(
|
|
126
143
|
timeout=self._timeout,
|
|
127
144
|
max_retries=self._max_retries,
|
|
@@ -129,6 +146,8 @@ class SGLangModel(BaseModelBackend):
|
|
|
129
146
|
base_url=self._url,
|
|
130
147
|
**kwargs,
|
|
131
148
|
)
|
|
149
|
+
else:
|
|
150
|
+
self._async_client = None
|
|
132
151
|
|
|
133
152
|
def _start_server(self) -> None:
|
|
134
153
|
try:
|
|
@@ -159,13 +178,24 @@ class SGLangModel(BaseModelBackend):
|
|
|
159
178
|
)
|
|
160
179
|
self._inactivity_thread.start()
|
|
161
180
|
self.last_run_time = time.time()
|
|
162
|
-
# Initialize
|
|
163
|
-
self._client
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
181
|
+
# Initialize client after server starts if not already set
|
|
182
|
+
if self._client is None:
|
|
183
|
+
self._client = OpenAI(
|
|
184
|
+
timeout=self._timeout,
|
|
185
|
+
max_retries=self._max_retries,
|
|
186
|
+
api_key="Set-but-ignored", # required but ignored
|
|
187
|
+
base_url=self._url,
|
|
188
|
+
)
|
|
189
|
+
if (
|
|
190
|
+
not hasattr(self, '_async_client')
|
|
191
|
+
or self._async_client is None
|
|
192
|
+
):
|
|
193
|
+
self._async_client = AsyncOpenAI(
|
|
194
|
+
timeout=self._timeout,
|
|
195
|
+
max_retries=self._max_retries,
|
|
196
|
+
api_key="Set-but-ignored", # required but ignored
|
|
197
|
+
base_url=self._url,
|
|
198
|
+
)
|
|
169
199
|
except Exception as e:
|
|
170
200
|
raise RuntimeError(f"Failed to start SGLang server: {e}") from e
|
|
171
201
|
|
|
@@ -209,21 +239,6 @@ class SGLangModel(BaseModelBackend):
|
|
|
209
239
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
210
240
|
return self._token_counter
|
|
211
241
|
|
|
212
|
-
def check_model_config(self):
|
|
213
|
-
r"""Check whether the model configuration contains any
|
|
214
|
-
unexpected arguments to SGLang API.
|
|
215
|
-
|
|
216
|
-
Raises:
|
|
217
|
-
ValueError: If the model configuration dictionary contains any
|
|
218
|
-
unexpected arguments to OpenAI API.
|
|
219
|
-
"""
|
|
220
|
-
for param in self.model_config_dict:
|
|
221
|
-
if param not in SGLANG_API_PARAMS:
|
|
222
|
-
raise ValueError(
|
|
223
|
-
f"Unexpected argument `{param}` is "
|
|
224
|
-
"input into SGLang model backend."
|
|
225
|
-
)
|
|
226
|
-
|
|
227
242
|
@observe(as_type='generation')
|
|
228
243
|
async def _arun(
|
|
229
244
|
self,
|
|
@@ -271,15 +286,24 @@ class SGLangModel(BaseModelBackend):
|
|
|
271
286
|
with self._lock:
|
|
272
287
|
# Update last run time
|
|
273
288
|
self.last_run_time = time.time()
|
|
289
|
+
async_client = self._async_client
|
|
274
290
|
|
|
275
|
-
if
|
|
291
|
+
if async_client is None:
|
|
276
292
|
raise RuntimeError(
|
|
277
293
|
"Client is not initialized. Ensure the server is running."
|
|
278
294
|
)
|
|
279
295
|
|
|
280
|
-
|
|
296
|
+
# Prepare additional parameters
|
|
297
|
+
extra_params: Dict[str, Any] = {}
|
|
298
|
+
if response_format is not None:
|
|
299
|
+
extra_params["response_format"] = response_format
|
|
300
|
+
if tools is not None:
|
|
301
|
+
extra_params["tools"] = tools
|
|
302
|
+
|
|
303
|
+
response = await async_client.chat.completions.create(
|
|
281
304
|
messages=messages,
|
|
282
305
|
model=self.model_type,
|
|
306
|
+
**extra_params,
|
|
283
307
|
**self.model_config_dict,
|
|
284
308
|
)
|
|
285
309
|
update_current_observation(
|
|
@@ -337,15 +361,24 @@ class SGLangModel(BaseModelBackend):
|
|
|
337
361
|
with self._lock:
|
|
338
362
|
# Update last run time
|
|
339
363
|
self.last_run_time = time.time()
|
|
364
|
+
client = self._client
|
|
340
365
|
|
|
341
|
-
if
|
|
366
|
+
if client is None:
|
|
342
367
|
raise RuntimeError(
|
|
343
368
|
"Client is not initialized. Ensure the server is running."
|
|
344
369
|
)
|
|
345
370
|
|
|
346
|
-
|
|
371
|
+
# Prepare additional parameters
|
|
372
|
+
extra_params: Dict[str, Any] = {}
|
|
373
|
+
if response_format is not None:
|
|
374
|
+
extra_params["response_format"] = response_format
|
|
375
|
+
if tools is not None:
|
|
376
|
+
extra_params["tools"] = tools
|
|
377
|
+
|
|
378
|
+
response = client.chat.completions.create(
|
|
347
379
|
messages=messages,
|
|
348
380
|
model=self.model_type,
|
|
381
|
+
**extra_params,
|
|
349
382
|
**self.model_config_dict,
|
|
350
383
|
)
|
|
351
384
|
update_current_observation(
|
|
@@ -17,7 +17,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
17
17
|
from openai import AsyncStream
|
|
18
18
|
from pydantic import BaseModel
|
|
19
19
|
|
|
20
|
-
from camel.configs import
|
|
20
|
+
from camel.configs import SiliconFlowConfig
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
22
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
23
23
|
from camel.types import (
|
|
@@ -104,18 +104,3 @@ class SiliconFlowModel(OpenAICompatibleModel):
|
|
|
104
104
|
raise NotImplementedError(
|
|
105
105
|
"SiliconFlow does not support async inference."
|
|
106
106
|
)
|
|
107
|
-
|
|
108
|
-
def check_model_config(self):
|
|
109
|
-
r"""Check whether the model configuration contains any
|
|
110
|
-
unexpected arguments to SiliconFlow API.
|
|
111
|
-
|
|
112
|
-
Raises:
|
|
113
|
-
ValueError: If the model configuration dictionary contains any
|
|
114
|
-
unexpected arguments to SiliconFlow API.
|
|
115
|
-
"""
|
|
116
|
-
for param in self.model_config_dict:
|
|
117
|
-
if param not in SILICONFLOW_API_PARAMS:
|
|
118
|
-
raise ValueError(
|
|
119
|
-
f"Unexpected argument `{param}` is "
|
|
120
|
-
"input into SiliconFlow model backend."
|
|
121
|
-
)
|
camel/models/stub_model.py
CHANGED
camel/models/togetherai_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import TogetherAIConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -86,18 +86,3 @@ class TogetherAIModel(OpenAICompatibleModel):
|
|
|
86
86
|
max_retries=max_retries,
|
|
87
87
|
**kwargs,
|
|
88
88
|
)
|
|
89
|
-
|
|
90
|
-
def check_model_config(self):
|
|
91
|
-
r"""Check whether the model configuration contains any
|
|
92
|
-
unexpected arguments to TogetherAI API.
|
|
93
|
-
|
|
94
|
-
Raises:
|
|
95
|
-
ValueError: If the model configuration dictionary contains any
|
|
96
|
-
unexpected arguments to TogetherAI API.
|
|
97
|
-
"""
|
|
98
|
-
for param in self.model_config_dict:
|
|
99
|
-
if param not in TOGETHERAI_API_PARAMS:
|
|
100
|
-
raise ValueError(
|
|
101
|
-
f"Unexpected argument `{param}` is "
|
|
102
|
-
"input into TogetherAI model backend."
|
|
103
|
-
)
|
camel/models/vllm_model.py
CHANGED
|
@@ -15,7 +15,7 @@ import os
|
|
|
15
15
|
import subprocess
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import VLLMConfig
|
|
19
19
|
from camel.logger import get_logger
|
|
20
20
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
21
21
|
from camel.types import ModelType
|
|
@@ -103,18 +103,3 @@ class VLLMModel(OpenAICompatibleModel):
|
|
|
103
103
|
)
|
|
104
104
|
except Exception as e:
|
|
105
105
|
logger.error(f"Failed to start vllm server: {e}.")
|
|
106
|
-
|
|
107
|
-
def check_model_config(self):
|
|
108
|
-
r"""Check whether the model configuration contains any
|
|
109
|
-
unexpected arguments to vLLM API.
|
|
110
|
-
|
|
111
|
-
Raises:
|
|
112
|
-
ValueError: If the model configuration dictionary contains any
|
|
113
|
-
unexpected arguments to OpenAI API.
|
|
114
|
-
"""
|
|
115
|
-
for param in self.model_config_dict:
|
|
116
|
-
if param not in VLLM_API_PARAMS:
|
|
117
|
-
raise ValueError(
|
|
118
|
-
f"Unexpected argument `{param}` is "
|
|
119
|
-
"input into vLLM model backend."
|
|
120
|
-
)
|
camel/models/volcano_model.py
CHANGED
|
@@ -15,7 +15,6 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import OPENAI_API_PARAMS
|
|
19
18
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
19
|
from camel.types import ModelType
|
|
21
20
|
from camel.utils import (
|
|
@@ -86,19 +85,3 @@ class VolcanoModel(OpenAICompatibleModel):
|
|
|
86
85
|
max_retries,
|
|
87
86
|
**kwargs,
|
|
88
87
|
)
|
|
89
|
-
|
|
90
|
-
def check_model_config(self):
|
|
91
|
-
r"""Check whether the model configuration is valid for Volcano
|
|
92
|
-
model backends.
|
|
93
|
-
|
|
94
|
-
Raises:
|
|
95
|
-
ValueError: If the model configuration dictionary contains any
|
|
96
|
-
unexpected arguments to Volcano API.
|
|
97
|
-
"""
|
|
98
|
-
# Using OpenAI API params as Volcano Engine API is OpenAI-compatible
|
|
99
|
-
for param in self.model_config_dict:
|
|
100
|
-
if param not in OPENAI_API_PARAMS:
|
|
101
|
-
raise ValueError(
|
|
102
|
-
f"Unexpected argument `{param}` is "
|
|
103
|
-
"input into Volcano model backend."
|
|
104
|
-
)
|
camel/models/watsonx_model.py
CHANGED
|
@@ -16,7 +16,7 @@ from typing import Any, Dict, List, Optional, Type, Union
|
|
|
16
16
|
|
|
17
17
|
from pydantic import BaseModel
|
|
18
18
|
|
|
19
|
-
from camel.configs import
|
|
19
|
+
from camel.configs import WatsonXConfig
|
|
20
20
|
from camel.logger import get_logger
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
22
|
from camel.models import BaseModelBackend
|
|
@@ -292,21 +292,6 @@ class WatsonXModel(BaseModelBackend):
|
|
|
292
292
|
logger.error(f"Unexpected error when calling WatsonX API: {e!s}")
|
|
293
293
|
raise
|
|
294
294
|
|
|
295
|
-
def check_model_config(self):
|
|
296
|
-
r"""Check whether the model configuration contains any unexpected
|
|
297
|
-
arguments to WatsonX API.
|
|
298
|
-
|
|
299
|
-
Raises:
|
|
300
|
-
ValueError: If the model configuration dictionary contains any
|
|
301
|
-
unexpected arguments to WatsonX API.
|
|
302
|
-
"""
|
|
303
|
-
for param in self.model_config_dict:
|
|
304
|
-
if param not in WATSONX_API_PARAMS:
|
|
305
|
-
raise ValueError(
|
|
306
|
-
f"Unexpected argument `{param}` is "
|
|
307
|
-
"input into WatsonX model backend."
|
|
308
|
-
)
|
|
309
|
-
|
|
310
295
|
@property
|
|
311
296
|
def stream(self) -> bool:
|
|
312
297
|
r"""Returns whether the model is in stream mode, which sends partial
|
camel/models/yi_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import YiConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -85,18 +85,3 @@ class YiModel(OpenAICompatibleModel):
|
|
|
85
85
|
max_retries=max_retries,
|
|
86
86
|
**kwargs,
|
|
87
87
|
)
|
|
88
|
-
|
|
89
|
-
def check_model_config(self):
|
|
90
|
-
r"""Check whether the model configuration contains any
|
|
91
|
-
unexpected arguments to Yi API.
|
|
92
|
-
|
|
93
|
-
Raises:
|
|
94
|
-
ValueError: If the model configuration dictionary contains any
|
|
95
|
-
unexpected arguments to Yi API.
|
|
96
|
-
"""
|
|
97
|
-
for param in self.model_config_dict:
|
|
98
|
-
if param not in YI_API_PARAMS:
|
|
99
|
-
raise ValueError(
|
|
100
|
-
f"Unexpected argument `{param}` is "
|
|
101
|
-
"input into Yi model backend."
|
|
102
|
-
)
|
camel/models/zhipuai_model.py
CHANGED
|
@@ -13,16 +13,26 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
from typing import Any, Dict, Optional, Union
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
17
|
|
|
18
|
-
from
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
from camel.configs import ZhipuAIConfig
|
|
21
|
+
from camel.logger import get_logger
|
|
22
|
+
from camel.messages import OpenAIMessage
|
|
23
|
+
from camel.models._utils import try_modify_message_with_format
|
|
19
24
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
|
-
from camel.types import
|
|
25
|
+
from camel.types import (
|
|
26
|
+
ChatCompletion,
|
|
27
|
+
ModelType,
|
|
28
|
+
)
|
|
21
29
|
from camel.utils import (
|
|
22
30
|
BaseTokenCounter,
|
|
23
31
|
api_keys_required,
|
|
24
32
|
)
|
|
25
33
|
|
|
34
|
+
logger = get_logger(__name__)
|
|
35
|
+
|
|
26
36
|
|
|
27
37
|
class ZhipuAIModel(OpenAICompatibleModel):
|
|
28
38
|
r"""ZhipuAI API in a unified OpenAICompatibleModel interface.
|
|
@@ -86,17 +96,51 @@ class ZhipuAIModel(OpenAICompatibleModel):
|
|
|
86
96
|
**kwargs,
|
|
87
97
|
)
|
|
88
98
|
|
|
89
|
-
def
|
|
90
|
-
|
|
91
|
-
|
|
99
|
+
def _request_parse(
|
|
100
|
+
self,
|
|
101
|
+
messages: List[OpenAIMessage],
|
|
102
|
+
response_format: Type[BaseModel],
|
|
103
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
104
|
+
) -> ChatCompletion:
|
|
105
|
+
import copy
|
|
106
|
+
|
|
107
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
108
|
+
request_config.pop("stream", None)
|
|
109
|
+
if tools is not None:
|
|
110
|
+
request_config["tools"] = tools
|
|
111
|
+
|
|
112
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
113
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
114
|
+
try:
|
|
115
|
+
return self._client.beta.chat.completions.parse(
|
|
116
|
+
messages=messages,
|
|
117
|
+
model=self.model_type,
|
|
118
|
+
**request_config,
|
|
119
|
+
)
|
|
120
|
+
except Exception as e:
|
|
121
|
+
logger.error(f"Fallback attempt also failed: {e}")
|
|
122
|
+
raise
|
|
123
|
+
|
|
124
|
+
async def _arequest_parse(
|
|
125
|
+
self,
|
|
126
|
+
messages: List[OpenAIMessage],
|
|
127
|
+
response_format: Type[BaseModel],
|
|
128
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
129
|
+
) -> ChatCompletion:
|
|
130
|
+
import copy
|
|
92
131
|
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
132
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
133
|
+
request_config.pop("stream", None)
|
|
134
|
+
if tools is not None:
|
|
135
|
+
request_config["tools"] = tools
|
|
136
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
137
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
138
|
+
try:
|
|
139
|
+
return await self._async_client.beta.chat.completions.parse(
|
|
140
|
+
messages=messages,
|
|
141
|
+
model=self.model_type,
|
|
142
|
+
**request_config,
|
|
143
|
+
)
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logger.error(f"Fallback attempt also failed: {e}")
|
|
146
|
+
raise
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
"""Helper parsers used across the CAMEL project."""
|
|
15
|
+
|
|
16
|
+
from .mcp_tool_call_parser import extract_tool_calls_from_text
|
|
17
|
+
|
|
18
|
+
__all__ = ["extract_tool_calls_from_text"]
|
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
"""Utility functions for parsing MCP tool calls from model output."""
|
|
15
|
+
|
|
16
|
+
import ast
|
|
17
|
+
import json
|
|
18
|
+
import logging
|
|
19
|
+
import re
|
|
20
|
+
from typing import Any, Dict, List, Optional
|
|
21
|
+
|
|
22
|
+
try: # pragma: no cover - optional dependency
|
|
23
|
+
import yaml
|
|
24
|
+
except ImportError: # pragma: no cover
|
|
25
|
+
yaml = None # type: ignore[assignment]
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
CODE_BLOCK_PATTERN = re.compile(
|
|
29
|
+
r"```(?:[a-z0-9_-]+)?\s*([\s\S]+?)\s*```",
|
|
30
|
+
re.IGNORECASE,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
JSON_START_PATTERN = re.compile(r"[{\[]")
|
|
34
|
+
JSON_TOKEN_PATTERN = re.compile(
|
|
35
|
+
r"""
|
|
36
|
+
(?P<double>"(?:\\.|[^"\\])*")
|
|
37
|
+
|
|
|
38
|
+
(?P<single>'(?:\\.|[^'\\])*')
|
|
39
|
+
|
|
|
40
|
+
(?P<brace>[{}\[\]])
|
|
41
|
+
""",
|
|
42
|
+
re.VERBOSE,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
logger = logging.getLogger(__name__)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def extract_tool_calls_from_text(content: str) -> List[Dict[str, Any]]:
|
|
49
|
+
"""Extract tool call dictionaries from raw text output."""
|
|
50
|
+
|
|
51
|
+
if not content:
|
|
52
|
+
return []
|
|
53
|
+
|
|
54
|
+
tool_calls: List[Dict[str, Any]] = []
|
|
55
|
+
seen_ranges: List[tuple[int, int]] = []
|
|
56
|
+
|
|
57
|
+
for match in CODE_BLOCK_PATTERN.finditer(content):
|
|
58
|
+
snippet = match.group(1).strip()
|
|
59
|
+
if not snippet:
|
|
60
|
+
continue
|
|
61
|
+
|
|
62
|
+
parsed = _try_parse_json_like(snippet)
|
|
63
|
+
if parsed is None:
|
|
64
|
+
logger.warning(
|
|
65
|
+
"Failed to parse JSON payload from fenced block: %s",
|
|
66
|
+
snippet,
|
|
67
|
+
)
|
|
68
|
+
continue
|
|
69
|
+
|
|
70
|
+
_collect_tool_calls(parsed, tool_calls)
|
|
71
|
+
seen_ranges.append((match.start(1), match.end(1)))
|
|
72
|
+
|
|
73
|
+
for start_match in JSON_START_PATTERN.finditer(content):
|
|
74
|
+
start_idx = start_match.start()
|
|
75
|
+
|
|
76
|
+
if any(start <= start_idx < stop for start, stop in seen_ranges):
|
|
77
|
+
continue
|
|
78
|
+
|
|
79
|
+
segment = _find_json_candidate(content, start_idx)
|
|
80
|
+
if segment is None:
|
|
81
|
+
continue
|
|
82
|
+
|
|
83
|
+
end_idx = start_idx + len(segment)
|
|
84
|
+
if any(start <= start_idx < stop for start, stop in seen_ranges):
|
|
85
|
+
continue
|
|
86
|
+
|
|
87
|
+
parsed = _try_parse_json_like(segment)
|
|
88
|
+
if parsed is None:
|
|
89
|
+
logger.debug(
|
|
90
|
+
"Unable to parse JSON-like candidate: %s",
|
|
91
|
+
_truncate_snippet(segment),
|
|
92
|
+
)
|
|
93
|
+
continue
|
|
94
|
+
|
|
95
|
+
_collect_tool_calls(parsed, tool_calls)
|
|
96
|
+
seen_ranges.append((start_idx, end_idx))
|
|
97
|
+
|
|
98
|
+
return tool_calls
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _collect_tool_calls(
|
|
102
|
+
payload: Any, accumulator: List[Dict[str, Any]]
|
|
103
|
+
) -> None:
|
|
104
|
+
"""Collect valid tool call dictionaries from parsed payloads."""
|
|
105
|
+
|
|
106
|
+
if isinstance(payload, dict):
|
|
107
|
+
if payload.get("tool_name") is None:
|
|
108
|
+
return
|
|
109
|
+
accumulator.append(payload)
|
|
110
|
+
elif isinstance(payload, list):
|
|
111
|
+
for item in payload:
|
|
112
|
+
_collect_tool_calls(item, accumulator)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _try_parse_json_like(snippet: str) -> Optional[Any]:
|
|
116
|
+
"""Parse a JSON or JSON-like snippet into Python data."""
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
return json.loads(snippet)
|
|
120
|
+
except json.JSONDecodeError as exc:
|
|
121
|
+
logger.debug(
|
|
122
|
+
"json.loads failed: %s | snippet=%s",
|
|
123
|
+
exc,
|
|
124
|
+
_truncate_snippet(snippet),
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
if yaml is not None:
|
|
128
|
+
try:
|
|
129
|
+
return yaml.safe_load(snippet)
|
|
130
|
+
except yaml.YAMLError:
|
|
131
|
+
pass
|
|
132
|
+
|
|
133
|
+
try:
|
|
134
|
+
return ast.literal_eval(snippet)
|
|
135
|
+
except (ValueError, SyntaxError):
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _find_json_candidate(content: str, start_idx: int) -> Optional[str]:
|
|
140
|
+
"""Locate a balanced JSON-like segment starting at ``start_idx``."""
|
|
141
|
+
|
|
142
|
+
opening = content[start_idx]
|
|
143
|
+
if opening not in "{[":
|
|
144
|
+
return None
|
|
145
|
+
|
|
146
|
+
stack = ["}" if opening == "{" else "]"]
|
|
147
|
+
|
|
148
|
+
for token in JSON_TOKEN_PATTERN.finditer(content, start_idx + 1):
|
|
149
|
+
if token.lastgroup in {"double", "single"}:
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
brace = token.group("brace")
|
|
153
|
+
if brace in "{[":
|
|
154
|
+
stack.append("}" if brace == "{" else "]")
|
|
155
|
+
continue
|
|
156
|
+
|
|
157
|
+
if not stack:
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
expected = stack.pop()
|
|
161
|
+
if brace != expected:
|
|
162
|
+
return None
|
|
163
|
+
|
|
164
|
+
if not stack:
|
|
165
|
+
return content[start_idx : token.end()]
|
|
166
|
+
|
|
167
|
+
return None
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
def _truncate_snippet(snippet: str, limit: int = 120) -> str:
|
|
171
|
+
"""Return a truncated representation suitable for logging."""
|
|
172
|
+
|
|
173
|
+
compact = " ".join(snippet.strip().split())
|
|
174
|
+
if len(compact) <= limit:
|
|
175
|
+
return compact
|
|
176
|
+
return f"{compact[: limit - 3]}..."
|
|
@@ -97,6 +97,7 @@ class AutoRetriever:
|
|
|
97
97
|
"URL (database url) and API key required for TiDB storage "
|
|
98
98
|
"are not provided. Format: "
|
|
99
99
|
"mysql+pymysql://<username>:<password>@<host>:4000/test"
|
|
100
|
+
"You can get the database url from https://tidbcloud.com/console/clusters"
|
|
100
101
|
)
|
|
101
102
|
return TiDBStorage(
|
|
102
103
|
vector_dim=self.embedding_model.get_output_dim(),
|