camel-ai 0.2.67__py3-none-any.whl → 0.2.80a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (224) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +6 -2
  3. camel/agents/_utils.py +38 -0
  4. camel/agents/chat_agent.py +4014 -410
  5. camel/agents/mcp_agent.py +30 -27
  6. camel/agents/repo_agent.py +2 -1
  7. camel/benchmarks/browsecomp.py +6 -6
  8. camel/configs/__init__.py +15 -0
  9. camel/configs/aihubmix_config.py +88 -0
  10. camel/configs/amd_config.py +70 -0
  11. camel/configs/cometapi_config.py +104 -0
  12. camel/configs/minimax_config.py +93 -0
  13. camel/configs/nebius_config.py +103 -0
  14. camel/configs/vllm_config.py +2 -0
  15. camel/data_collectors/alpaca_collector.py +15 -6
  16. camel/datagen/self_improving_cot.py +1 -1
  17. camel/datasets/base_generator.py +39 -10
  18. camel/environments/__init__.py +12 -0
  19. camel/environments/rlcards_env.py +860 -0
  20. camel/environments/single_step.py +28 -3
  21. camel/environments/tic_tac_toe.py +1 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker/Dockerfile +4 -16
  24. camel/interpreters/docker_interpreter.py +3 -2
  25. camel/interpreters/e2b_interpreter.py +34 -1
  26. camel/interpreters/internal_python_interpreter.py +51 -2
  27. camel/interpreters/microsandbox_interpreter.py +395 -0
  28. camel/loaders/__init__.py +11 -2
  29. camel/loaders/base_loader.py +85 -0
  30. camel/loaders/chunkr_reader.py +9 -0
  31. camel/loaders/firecrawl_reader.py +4 -4
  32. camel/logger.py +1 -1
  33. camel/memories/agent_memories.py +84 -1
  34. camel/memories/base.py +34 -0
  35. camel/memories/blocks/chat_history_block.py +122 -4
  36. camel/memories/blocks/vectordb_block.py +8 -1
  37. camel/memories/context_creators/score_based.py +29 -237
  38. camel/memories/records.py +88 -8
  39. camel/messages/base.py +166 -40
  40. camel/messages/func_message.py +32 -5
  41. camel/models/__init__.py +10 -0
  42. camel/models/aihubmix_model.py +83 -0
  43. camel/models/aiml_model.py +1 -16
  44. camel/models/amd_model.py +101 -0
  45. camel/models/anthropic_model.py +117 -18
  46. camel/models/aws_bedrock_model.py +2 -33
  47. camel/models/azure_openai_model.py +205 -91
  48. camel/models/base_audio_model.py +3 -1
  49. camel/models/base_model.py +189 -24
  50. camel/models/cohere_model.py +5 -17
  51. camel/models/cometapi_model.py +83 -0
  52. camel/models/crynux_model.py +1 -16
  53. camel/models/deepseek_model.py +6 -16
  54. camel/models/fish_audio_model.py +6 -0
  55. camel/models/gemini_model.py +71 -20
  56. camel/models/groq_model.py +1 -17
  57. camel/models/internlm_model.py +1 -16
  58. camel/models/litellm_model.py +49 -32
  59. camel/models/lmstudio_model.py +1 -17
  60. camel/models/minimax_model.py +83 -0
  61. camel/models/mistral_model.py +1 -16
  62. camel/models/model_factory.py +27 -1
  63. camel/models/model_manager.py +24 -6
  64. camel/models/modelscope_model.py +1 -16
  65. camel/models/moonshot_model.py +185 -19
  66. camel/models/nebius_model.py +83 -0
  67. camel/models/nemotron_model.py +0 -5
  68. camel/models/netmind_model.py +1 -16
  69. camel/models/novita_model.py +1 -16
  70. camel/models/nvidia_model.py +1 -16
  71. camel/models/ollama_model.py +4 -19
  72. camel/models/openai_compatible_model.py +171 -46
  73. camel/models/openai_model.py +205 -77
  74. camel/models/openrouter_model.py +1 -17
  75. camel/models/ppio_model.py +1 -16
  76. camel/models/qianfan_model.py +1 -16
  77. camel/models/qwen_model.py +1 -16
  78. camel/models/reka_model.py +1 -16
  79. camel/models/samba_model.py +34 -47
  80. camel/models/sglang_model.py +64 -31
  81. camel/models/siliconflow_model.py +1 -16
  82. camel/models/stub_model.py +0 -4
  83. camel/models/togetherai_model.py +1 -16
  84. camel/models/vllm_model.py +1 -16
  85. camel/models/volcano_model.py +0 -17
  86. camel/models/watsonx_model.py +1 -16
  87. camel/models/yi_model.py +1 -16
  88. camel/models/zhipuai_model.py +60 -16
  89. camel/parsers/__init__.py +18 -0
  90. camel/parsers/mcp_tool_call_parser.py +176 -0
  91. camel/retrievers/auto_retriever.py +1 -0
  92. camel/runtimes/configs.py +11 -11
  93. camel/runtimes/daytona_runtime.py +15 -16
  94. camel/runtimes/docker_runtime.py +6 -6
  95. camel/runtimes/remote_http_runtime.py +5 -5
  96. camel/services/agent_openapi_server.py +380 -0
  97. camel/societies/__init__.py +2 -0
  98. camel/societies/role_playing.py +26 -28
  99. camel/societies/workforce/__init__.py +2 -0
  100. camel/societies/workforce/events.py +122 -0
  101. camel/societies/workforce/prompts.py +249 -38
  102. camel/societies/workforce/role_playing_worker.py +82 -20
  103. camel/societies/workforce/single_agent_worker.py +634 -34
  104. camel/societies/workforce/structured_output_handler.py +512 -0
  105. camel/societies/workforce/task_channel.py +169 -23
  106. camel/societies/workforce/utils.py +176 -9
  107. camel/societies/workforce/worker.py +77 -23
  108. camel/societies/workforce/workflow_memory_manager.py +772 -0
  109. camel/societies/workforce/workforce.py +3168 -478
  110. camel/societies/workforce/workforce_callback.py +74 -0
  111. camel/societies/workforce/workforce_logger.py +203 -175
  112. camel/societies/workforce/workforce_metrics.py +33 -0
  113. camel/storages/__init__.py +4 -0
  114. camel/storages/key_value_storages/json.py +15 -2
  115. camel/storages/key_value_storages/mem0_cloud.py +48 -47
  116. camel/storages/object_storages/google_cloud.py +1 -1
  117. camel/storages/vectordb_storages/__init__.py +6 -0
  118. camel/storages/vectordb_storages/chroma.py +731 -0
  119. camel/storages/vectordb_storages/oceanbase.py +13 -13
  120. camel/storages/vectordb_storages/pgvector.py +349 -0
  121. camel/storages/vectordb_storages/qdrant.py +3 -3
  122. camel/storages/vectordb_storages/surreal.py +365 -0
  123. camel/storages/vectordb_storages/tidb.py +8 -6
  124. camel/tasks/task.py +244 -27
  125. camel/toolkits/__init__.py +46 -8
  126. camel/toolkits/aci_toolkit.py +64 -19
  127. camel/toolkits/arxiv_toolkit.py +6 -6
  128. camel/toolkits/base.py +63 -5
  129. camel/toolkits/code_execution.py +28 -1
  130. camel/toolkits/context_summarizer_toolkit.py +684 -0
  131. camel/toolkits/craw4ai_toolkit.py +93 -0
  132. camel/toolkits/dappier_toolkit.py +10 -6
  133. camel/toolkits/dingtalk.py +1135 -0
  134. camel/toolkits/edgeone_pages_mcp_toolkit.py +49 -0
  135. camel/toolkits/excel_toolkit.py +901 -67
  136. camel/toolkits/file_toolkit.py +1402 -0
  137. camel/toolkits/function_tool.py +30 -6
  138. camel/toolkits/github_toolkit.py +107 -20
  139. camel/toolkits/gmail_toolkit.py +1839 -0
  140. camel/toolkits/google_calendar_toolkit.py +38 -4
  141. camel/toolkits/google_drive_mcp_toolkit.py +54 -0
  142. camel/toolkits/human_toolkit.py +34 -10
  143. camel/toolkits/hybrid_browser_toolkit/__init__.py +18 -0
  144. camel/toolkits/hybrid_browser_toolkit/config_loader.py +185 -0
  145. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +246 -0
  146. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +1973 -0
  147. camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
  148. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +3749 -0
  149. camel/toolkits/hybrid_browser_toolkit/ts/package.json +32 -0
  150. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
  151. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +1815 -0
  152. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +233 -0
  153. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +590 -0
  154. camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
  155. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  156. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
  157. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  158. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +130 -0
  159. camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
  160. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +319 -0
  161. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +1032 -0
  162. camel/toolkits/hybrid_browser_toolkit_py/__init__.py +17 -0
  163. camel/toolkits/hybrid_browser_toolkit_py/actions.py +575 -0
  164. camel/toolkits/hybrid_browser_toolkit_py/agent.py +311 -0
  165. camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +787 -0
  166. camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +490 -0
  167. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +2390 -0
  168. camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +233 -0
  169. camel/toolkits/hybrid_browser_toolkit_py/stealth_script.js +0 -0
  170. camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +1043 -0
  171. camel/toolkits/image_generation_toolkit.py +390 -0
  172. camel/toolkits/jina_reranker_toolkit.py +3 -4
  173. camel/toolkits/klavis_toolkit.py +5 -1
  174. camel/toolkits/markitdown_toolkit.py +104 -0
  175. camel/toolkits/math_toolkit.py +64 -10
  176. camel/toolkits/mcp_toolkit.py +370 -45
  177. camel/toolkits/memory_toolkit.py +5 -1
  178. camel/toolkits/message_agent_toolkit.py +608 -0
  179. camel/toolkits/message_integration.py +724 -0
  180. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  181. camel/toolkits/note_taking_toolkit.py +277 -0
  182. camel/toolkits/notion_mcp_toolkit.py +224 -0
  183. camel/toolkits/openbb_toolkit.py +5 -1
  184. camel/toolkits/origene_mcp_toolkit.py +56 -0
  185. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  186. camel/toolkits/pptx_toolkit.py +25 -12
  187. camel/toolkits/resend_toolkit.py +168 -0
  188. camel/toolkits/screenshot_toolkit.py +213 -0
  189. camel/toolkits/search_toolkit.py +437 -142
  190. camel/toolkits/slack_toolkit.py +104 -50
  191. camel/toolkits/sympy_toolkit.py +1 -1
  192. camel/toolkits/task_planning_toolkit.py +3 -3
  193. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  194. camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
  195. camel/toolkits/terminal_toolkit/utils.py +532 -0
  196. camel/toolkits/thinking_toolkit.py +1 -1
  197. camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
  198. camel/toolkits/video_analysis_toolkit.py +106 -26
  199. camel/toolkits/video_download_toolkit.py +17 -14
  200. camel/toolkits/web_deploy_toolkit.py +1219 -0
  201. camel/toolkits/wechat_official_toolkit.py +483 -0
  202. camel/toolkits/zapier_toolkit.py +5 -1
  203. camel/types/__init__.py +2 -2
  204. camel/types/agents/tool_calling_record.py +4 -1
  205. camel/types/enums.py +316 -40
  206. camel/types/openai_types.py +2 -2
  207. camel/types/unified_model_type.py +31 -4
  208. camel/utils/commons.py +36 -5
  209. camel/utils/constants.py +3 -0
  210. camel/utils/context_utils.py +1003 -0
  211. camel/utils/mcp.py +138 -4
  212. camel/utils/mcp_client.py +45 -1
  213. camel/utils/message_summarizer.py +148 -0
  214. camel/utils/token_counting.py +43 -20
  215. camel/utils/tool_result.py +44 -0
  216. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +296 -85
  217. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +219 -146
  218. camel/loaders/pandas_reader.py +0 -368
  219. camel/toolkits/dalle_toolkit.py +0 -175
  220. camel/toolkits/file_write_toolkit.py +0 -444
  221. camel/toolkits/openai_agent_toolkit.py +0 -135
  222. camel/toolkits/terminal_toolkit.py +0 -1037
  223. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
  224. {camel_ai-0.2.67.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
@@ -16,6 +16,8 @@ import os
16
16
  from abc import ABC, abstractmethod
17
17
  from typing import Any, Optional
18
18
 
19
+ from camel.utils import Constants
20
+
19
21
 
20
22
  class BaseAudioModel(ABC):
21
23
  r"""Base class for audio models providing Text-to-Speech (TTS) and
@@ -26,7 +28,7 @@ class BaseAudioModel(ABC):
26
28
  self,
27
29
  api_key: Optional[str] = None,
28
30
  url: Optional[str] = None,
29
- timeout: Optional[float] = None,
31
+ timeout: Optional[float] = Constants.TIMEOUT_THRESHOLD,
30
32
  ) -> None:
31
33
  r"""Initialize an instance of BaseAudioModel.
32
34
 
@@ -12,13 +12,19 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import abc
15
+ import os
15
16
  import re
16
17
  from abc import ABC, abstractmethod
17
18
  from typing import Any, Dict, List, Optional, Type, Union
18
19
 
19
20
  from openai import AsyncStream, Stream
21
+ from openai.lib.streaming.chat import (
22
+ AsyncChatCompletionStreamManager,
23
+ ChatCompletionStreamManager,
24
+ )
20
25
  from pydantic import BaseModel
21
26
 
27
+ from camel.logger import get_logger as camel_get_logger
22
28
  from camel.messages import OpenAIMessage
23
29
  from camel.types import (
24
30
  ChatCompletion,
@@ -27,7 +33,22 @@ from camel.types import (
27
33
  ParsedChatCompletion,
28
34
  UnifiedModelType,
29
35
  )
30
- from camel.utils import BaseTokenCounter
36
+ from camel.utils import BaseTokenCounter, Constants
37
+
38
+ if os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
39
+ try:
40
+ from traceroot import get_logger # type: ignore[import]
41
+ from traceroot import trace as observe # type: ignore[import]
42
+
43
+ logger = get_logger('base_model')
44
+ except ImportError:
45
+ from camel.utils import observe
46
+
47
+ logger = camel_get_logger('base_model')
48
+ else:
49
+ from camel.utils import observe
50
+
51
+ logger = camel_get_logger('base_model')
31
52
 
32
53
 
33
54
  class ModelBackendMeta(abc.ABCMeta):
@@ -82,7 +103,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
82
103
  api_key: Optional[str] = None,
83
104
  url: Optional[str] = None,
84
105
  token_counter: Optional[BaseTokenCounter] = None,
85
- timeout: Optional[float] = None,
106
+ timeout: Optional[float] = Constants.TIMEOUT_THRESHOLD,
86
107
  max_retries: int = 3,
87
108
  ) -> None:
88
109
  self.model_type: UnifiedModelType = UnifiedModelType(model_type)
@@ -94,7 +115,12 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
94
115
  self._token_counter = token_counter
95
116
  self._timeout = timeout
96
117
  self._max_retries = max_retries
97
- self.check_model_config()
118
+ # Initialize logging configuration
119
+ self._log_enabled = (
120
+ os.environ.get("CAMEL_MODEL_LOG_ENABLED", "False").lower()
121
+ == "true"
122
+ )
123
+ self._log_dir = os.environ.get("CAMEL_LOG_DIR", "camel_logs")
98
124
 
99
125
  @property
100
126
  @abstractmethod
@@ -232,13 +258,96 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
232
258
 
233
259
  return formatted_messages
234
260
 
261
+ def _log_request(self, messages: List[OpenAIMessage]) -> Optional[str]:
262
+ r"""Log the request messages to a JSON file if logging is enabled.
263
+
264
+ Args:
265
+ messages (List[OpenAIMessage]): The messages to log.
266
+
267
+ Returns:
268
+ Optional[str]: The path to the log file if logging is enabled,
269
+ None otherwise.
270
+ """
271
+ if not self._log_enabled:
272
+ return None
273
+
274
+ import json
275
+ from datetime import datetime
276
+
277
+ os.makedirs(self._log_dir, exist_ok=True)
278
+
279
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
280
+ log_file_path = os.path.join(self._log_dir, f"conv_{timestamp}.json")
281
+
282
+ log_entry = {
283
+ "request_timestamp": datetime.now().isoformat(),
284
+ "model": str(self.model_type),
285
+ "request": {"messages": messages},
286
+ }
287
+
288
+ with open(log_file_path, "w") as f:
289
+ json.dump(log_entry, f, indent=4)
290
+
291
+ return log_file_path
292
+
293
+ def _log_response(self, log_path: str, response: Any) -> None:
294
+ r"""Log the response to the existing log file.
295
+
296
+ Args:
297
+ log_path (str): The path to the log file.
298
+ response (Any): The response to log.
299
+ """
300
+ if not self._log_enabled or not log_path:
301
+ return
302
+
303
+ import json
304
+ from datetime import datetime
305
+
306
+ with open(log_path, "r+") as f:
307
+ log_data = json.load(f)
308
+
309
+ log_data["response_timestamp"] = datetime.now().isoformat()
310
+ if isinstance(response, BaseModel):
311
+ log_data["response"] = response.model_dump()
312
+ else:
313
+ try:
314
+ json.dumps(response)
315
+ log_data["response"] = response
316
+ except TypeError:
317
+ log_data["response"] = str(response)
318
+
319
+ f.seek(0)
320
+ json.dump(log_data, f, indent=4)
321
+ f.truncate()
322
+
235
323
  @abstractmethod
236
324
  def _run(
237
325
  self,
238
326
  messages: List[OpenAIMessage],
239
327
  response_format: Optional[Type[BaseModel]] = None,
240
328
  tools: Optional[List[Dict[str, Any]]] = None,
241
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
329
+ ) -> Union[
330
+ ChatCompletion,
331
+ Stream[ChatCompletionChunk],
332
+ ChatCompletionStreamManager[BaseModel],
333
+ ]:
334
+ r"""Runs the query to the backend model in a non-stream mode.
335
+
336
+ Args:
337
+ messages (List[OpenAIMessage]): Message list with the chat history
338
+ in OpenAI API format.
339
+ response_format (Optional[Type[BaseModel]]): The format of the
340
+ response.
341
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
342
+ use for the request.
343
+
344
+ Returns:
345
+ Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
346
+ `ChatCompletion` in the non-stream mode, or
347
+ `Stream[ChatCompletionChunk]` in the stream mode,
348
+ or `ChatCompletionStreamManager[BaseModel]` in the structured
349
+ stream mode.
350
+ """
242
351
  pass
243
352
 
244
353
  @abstractmethod
@@ -247,15 +356,41 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
247
356
  messages: List[OpenAIMessage],
248
357
  response_format: Optional[Type[BaseModel]] = None,
249
358
  tools: Optional[List[Dict[str, Any]]] = None,
250
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
359
+ ) -> Union[
360
+ ChatCompletion,
361
+ AsyncStream[ChatCompletionChunk],
362
+ AsyncChatCompletionStreamManager[BaseModel],
363
+ ]:
364
+ r"""Runs the query to the backend model in async non-stream mode.
365
+
366
+ Args:
367
+ messages (List[OpenAIMessage]): Message list with the chat history
368
+ in OpenAI API format.
369
+ response_format (Optional[Type[BaseModel]]): The format of the
370
+ response.
371
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
372
+ use for the request.
373
+
374
+ Returns:
375
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
376
+ `ChatCompletion` in the non-stream mode, or
377
+ `AsyncStream[ChatCompletionChunk]` in the stream mode,
378
+ or `AsyncChatCompletionStreamManager[BaseModel]` in the
379
+ structured stream mode.
380
+ """
251
381
  pass
252
382
 
383
+ @observe()
253
384
  def run(
254
385
  self,
255
386
  messages: List[OpenAIMessage],
256
387
  response_format: Optional[Type[BaseModel]] = None,
257
388
  tools: Optional[List[Dict[str, Any]]] = None,
258
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
389
+ ) -> Union[
390
+ ChatCompletion,
391
+ Stream[ChatCompletionChunk],
392
+ ChatCompletionStreamManager[BaseModel],
393
+ ]:
259
394
  r"""Runs the query to the backend model.
260
395
 
261
396
  Args:
@@ -269,24 +404,47 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
269
404
  (default: :obj:`None`)
270
405
 
271
406
  Returns:
272
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
273
- `ChatCompletion` in the non-stream mode, or
274
- `Stream[ChatCompletionChunk]` in the stream mode.
407
+ Union[ChatCompletion, Stream[ChatCompletionChunk], Any]:
408
+ `ChatCompletion` in the non-stream mode,
409
+ `Stream[ChatCompletionChunk]` in the stream mode, or
410
+ `ChatCompletionStreamManager[BaseModel]` in the structured
411
+ stream mode.
275
412
  """
413
+ # Log the request if logging is enabled
414
+ log_path = self._log_request(messages)
415
+
276
416
  # None -> use default tools
277
417
  if tools is None:
278
418
  tools = self.model_config_dict.get("tools", None)
279
419
  # Empty -> use no tools
280
420
  elif not tools:
281
421
  tools = None
282
- return self._run(messages, response_format, tools)
283
422
 
423
+ logger.info("Running model: %s", self.model_type)
424
+ logger.info("Messages: %s", messages)
425
+ logger.info("Response format: %s", response_format)
426
+ logger.info("Tools: %s", tools)
427
+
428
+ result = self._run(messages, response_format, tools)
429
+ logger.info("Result: %s", result)
430
+
431
+ # Log the response if logging is enabled
432
+ if log_path:
433
+ self._log_response(log_path, result)
434
+
435
+ return result
436
+
437
+ @observe()
284
438
  async def arun(
285
439
  self,
286
440
  messages: List[OpenAIMessage],
287
441
  response_format: Optional[Type[BaseModel]] = None,
288
442
  tools: Optional[List[Dict[str, Any]]] = None,
289
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
443
+ ) -> Union[
444
+ ChatCompletion,
445
+ AsyncStream[ChatCompletionChunk],
446
+ AsyncChatCompletionStreamManager[BaseModel],
447
+ ]:
290
448
  r"""Runs the query to the backend model asynchronously.
291
449
 
292
450
  Args:
@@ -300,26 +458,33 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
300
458
  (default: :obj:`None`)
301
459
 
302
460
  Returns:
303
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
304
- `ChatCompletion` in the non-stream mode, or
305
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
461
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk], Any]:
462
+ `ChatCompletion` in the non-stream mode,
463
+ `AsyncStream[ChatCompletionChunk]` in the stream mode, or
464
+ `AsyncChatCompletionStreamManager[BaseModel]` in the structured
465
+ stream mode.
306
466
  """
467
+ # Log the request if logging is enabled
468
+ log_path = self._log_request(messages)
469
+
307
470
  if tools is None:
308
471
  tools = self.model_config_dict.get("tools", None)
309
472
  elif not tools:
310
473
  tools = None
311
- return await self._arun(messages, response_format, tools)
312
474
 
313
- @abstractmethod
314
- def check_model_config(self):
315
- r"""Check whether the input model configuration contains unexpected
316
- arguments
475
+ logger.info("Running model: %s", self.model_type)
476
+ logger.info("Messages: %s", messages)
477
+ logger.info("Response format: %s", response_format)
478
+ logger.info("Tools: %s", tools)
317
479
 
318
- Raises:
319
- ValueError: If the model configuration dictionary contains any
320
- unexpected argument for this model class.
321
- """
322
- pass
480
+ result = await self._arun(messages, response_format, tools)
481
+ logger.info("Result: %s", result)
482
+
483
+ # Log the response if logging is enabled
484
+ if log_path:
485
+ self._log_response(log_path, result)
486
+
487
+ return result
323
488
 
324
489
  def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
325
490
  r"""Count the number of tokens in the messages using the specific
@@ -21,9 +21,12 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
21
21
  from pydantic import BaseModel
22
22
 
23
23
  if TYPE_CHECKING:
24
- from cohere.types import ChatMessageV2, ChatResponse
24
+ from cohere.types import ( # type: ignore[attr-defined]
25
+ ChatMessageV2,
26
+ ChatResponse,
27
+ )
25
28
 
26
- from camel.configs import COHERE_API_PARAMS, CohereConfig
29
+ from camel.configs import CohereConfig
27
30
  from camel.messages import OpenAIMessage
28
31
  from camel.models import BaseModelBackend
29
32
  from camel.models._utils import try_modify_message_with_format
@@ -451,21 +454,6 @@ class CohereModel(BaseModelBackend):
451
454
 
452
455
  return openai_response
453
456
 
454
- def check_model_config(self):
455
- r"""Check whether the model configuration contains any unexpected
456
- arguments to Cohere API.
457
-
458
- Raises:
459
- ValueError: If the model configuration dictionary contains any
460
- unexpected arguments to Cohere API.
461
- """
462
- for param in self.model_config_dict:
463
- if param not in COHERE_API_PARAMS:
464
- raise ValueError(
465
- f"Unexpected argument `{param}` is "
466
- "input into Cohere model backend."
467
- )
468
-
469
457
  @property
470
458
  def stream(self) -> bool:
471
459
  r"""Returns whether the model is in stream mode, which sends partial
@@ -0,0 +1,83 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from camel.configs import CometAPIConfig
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
20
+ from camel.utils import (
21
+ BaseTokenCounter,
22
+ api_keys_required,
23
+ )
24
+
25
+
26
+ class CometAPIModel(OpenAICompatibleModel):
27
+ r"""LLM API served by CometAPI in a unified OpenAICompatibleModel
28
+ interface.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created.
33
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
35
+ If:obj:`None`, :obj:`CometAPIConfig().as_dict()` will be used.
36
+ (default: :obj:`None`)
37
+ api_key (Optional[str], optional): The API key for authenticating
38
+ with the CometAPI service. (default: :obj:`None`).
39
+ url (Optional[str], optional): The url to the CometAPI service.
40
+ (default: :obj:`None`)
41
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
42
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
43
+ ModelType.GPT_4O_MINI)` will be used.
44
+ (default: :obj:`None`)
45
+ timeout (Optional[float], optional): The timeout value in seconds for
46
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
47
+ environment variable or default to 180 seconds.
48
+ (default: :obj:`None`)
49
+ max_retries (int, optional): Maximum number of retries for API calls.
50
+ (default: :obj:`3`)
51
+ **kwargs (Any): Additional arguments to pass to the client
52
+ initialization.
53
+ """
54
+
55
+ @api_keys_required([("api_key", "COMETAPI_KEY")])
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ timeout: Optional[float] = None,
64
+ max_retries: int = 3,
65
+ **kwargs: Any,
66
+ ) -> None:
67
+ if model_config_dict is None:
68
+ model_config_dict = CometAPIConfig().as_dict()
69
+ api_key = api_key or os.environ.get("COMETAPI_KEY")
70
+ url = url or os.environ.get(
71
+ "COMETAPI_API_BASE_URL", "https://api.cometapi.com/v1"
72
+ )
73
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74
+ super().__init__(
75
+ model_type=model_type,
76
+ model_config_dict=model_config_dict,
77
+ api_key=api_key,
78
+ url=url,
79
+ token_counter=token_counter,
80
+ timeout=timeout,
81
+ max_retries=max_retries,
82
+ **kwargs,
83
+ )
@@ -15,7 +15,7 @@
15
15
  import os
16
16
  from typing import Any, Dict, Optional, Union
17
17
 
18
- from camel.configs import CRYNUX_API_PARAMS, CrynuxConfig
18
+ from camel.configs import CrynuxConfig
19
19
  from camel.models.openai_compatible_model import OpenAICompatibleModel
20
20
  from camel.types import ModelType
21
21
  from camel.utils import (
@@ -85,18 +85,3 @@ class CrynuxModel(OpenAICompatibleModel):
85
85
  max_retries=max_retries,
86
86
  **kwargs,
87
87
  )
88
-
89
- def check_model_config(self):
90
- r"""Check whether the model configuration contains any
91
- unexpected arguments to Crynux API.
92
-
93
- Raises:
94
- ValueError: If the model configuration dictionary contains any
95
- unexpected arguments to Crynux API.
96
- """
97
- for param in self.model_config_dict:
98
- if param not in CRYNUX_API_PARAMS:
99
- raise ValueError(
100
- f"Unexpected argument `{param}` is "
101
- "input into Crynux model backend."
102
- )
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Type, Union
18
18
  from openai import AsyncStream, Stream
19
19
  from pydantic import BaseModel
20
20
 
21
- from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
21
+ from camel.configs import DeepSeekConfig
22
22
  from camel.logger import get_logger
23
23
  from camel.messages import OpenAIMessage
24
24
  from camel.models._utils import try_modify_message_with_format
@@ -40,6 +40,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
40
40
  from langfuse.decorators import observe
41
41
  except ImportError:
42
42
  from camel.utils import observe
43
+ elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
44
+ try:
45
+ from traceroot import trace as observe # type: ignore[import]
46
+ except ImportError:
47
+ from camel.utils import observe
43
48
  else:
44
49
  from camel.utils import observe
45
50
 
@@ -282,18 +287,3 @@ class DeepSeekModel(OpenAICompatibleModel):
282
287
  )
283
288
 
284
289
  return self._post_handle_response(response)
285
-
286
- def check_model_config(self):
287
- r"""Check whether the model configuration contains any
288
- unexpected arguments to DeepSeek API.
289
-
290
- Raises:
291
- ValueError: If the model configuration dictionary contains any
292
- unexpected arguments to DeepSeek API.
293
- """
294
- for param in self.model_config_dict:
295
- if param not in DEEPSEEK_API_PARAMS:
296
- raise ValueError(
297
- f"Unexpected argument `{param}` is "
298
- "input into DeepSeek model backend."
299
- )
@@ -44,6 +44,12 @@ class FishAudioModel(BaseAudioModel):
44
44
  self._url = url or os.environ.get(
45
45
  "FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
46
46
  )
47
+ if self._api_key is None:
48
+ raise ValueError(
49
+ "API key is required for FishAudio. Please provide it via "
50
+ "the 'api_key' parameter or set the 'FISHAUDIO_API_KEY' "
51
+ "environment variable."
52
+ )
47
53
  self.session = Session(apikey=self._api_key, base_url=self._url)
48
54
 
49
55
  def text_to_speech(
@@ -17,7 +17,7 @@ from typing import Any, Dict, List, Optional, Type, Union
17
17
  from openai import AsyncStream, Stream
18
18
  from pydantic import BaseModel
19
19
 
20
- from camel.configs import Gemini_API_PARAMS, GeminiConfig
20
+ from camel.configs import GeminiConfig
21
21
  from camel.messages import OpenAIMessage
22
22
  from camel.models.openai_compatible_model import OpenAICompatibleModel
23
23
  from camel.types import (
@@ -37,6 +37,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
37
37
  from langfuse.decorators import observe
38
38
  except ImportError:
39
39
  from camel.utils import observe
40
+ elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
41
+ try:
42
+ from traceroot import trace as observe # type: ignore[import]
43
+ except ImportError:
44
+ from camel.utils import observe
40
45
  else:
41
46
  from camel.utils import observe
42
47
 
@@ -107,13 +112,46 @@ class GeminiModel(OpenAICompatibleModel):
107
112
 
108
113
  def _process_messages(self, messages) -> List[OpenAIMessage]:
109
114
  r"""Process the messages for Gemini API to ensure no empty content,
110
- which is not accepted by Gemini.
115
+ which is not accepted by Gemini. Also preserves thought signatures
116
+ required for Gemini 3 Pro function calling and adds fallback signatures
117
+ when they are missing.
111
118
  """
119
+ import copy
120
+
112
121
  processed_messages = []
113
122
  for msg in messages:
114
- msg_copy = msg.copy()
123
+ # Use deep copy to preserve all nested structures including
124
+ # thought signatures in extra_content
125
+ msg_copy = copy.deepcopy(msg)
115
126
  if 'content' in msg_copy and msg_copy['content'] == '':
116
127
  msg_copy['content'] = 'null'
128
+
129
+ # Handle missing thought signatures for function calls
130
+ # This is required for Gemini 3 Pro compatibility
131
+ # TODO: support multi round thought signatures
132
+ if (
133
+ msg_copy.get('role') == 'assistant'
134
+ and 'tool_calls' in msg_copy
135
+ and isinstance(msg_copy['tool_calls'], list)
136
+ ):
137
+ for i, tool_call in enumerate(msg_copy['tool_calls']):
138
+ # Check if this is the first tool call in a parallel set
139
+ # or any tool call that's missing a thought signature
140
+ if i == 0: # First tool call should have a signature
141
+ # Check if thought signature is missing
142
+ extra_content = tool_call.get('extra_content', {})
143
+ google_content = extra_content.get('google', {})
144
+
145
+ if 'thought_signature' not in google_content:
146
+ # Add fallback signature for missing signatures
147
+ if 'extra_content' not in tool_call:
148
+ tool_call['extra_content'] = {}
149
+ if 'google' not in tool_call['extra_content']:
150
+ tool_call['extra_content']['google'] = {}
151
+ tool_call['extra_content']['google'][
152
+ 'thought_signature'
153
+ ] = "skip_thought_signature_validator"
154
+
117
155
  processed_messages.append(msg_copy)
118
156
  return processed_messages
119
157
 
@@ -238,7 +276,7 @@ class GeminiModel(OpenAICompatibleModel):
238
276
  function_dict = tool.get('function', {})
239
277
  function_dict.pop("strict", None)
240
278
 
241
- # Process parameters to remove anyOf
279
+ # Process parameters to remove anyOf and handle enum/format
242
280
  if 'parameters' in function_dict:
243
281
  params = function_dict['parameters']
244
282
  if 'properties' in params:
@@ -255,6 +293,20 @@ class GeminiModel(OpenAICompatibleModel):
255
293
  'description'
256
294
  ] = prop_value['description']
257
295
 
296
+ # Handle enum and format restrictions for Gemini
297
+ # API enum: only allowed for string type
298
+ if prop_value.get('type') != 'string':
299
+ prop_value.pop('enum', None)
300
+
301
+ # format: only allowed for string, integer, and
302
+ # number types
303
+ if prop_value.get('type') not in [
304
+ 'string',
305
+ 'integer',
306
+ 'number',
307
+ ]:
308
+ prop_value.pop('format', None)
309
+
258
310
  request_config["tools"] = tools
259
311
 
260
312
  return self._client.chat.completions.create(
@@ -278,7 +330,7 @@ class GeminiModel(OpenAICompatibleModel):
278
330
  function_dict = tool.get('function', {})
279
331
  function_dict.pop("strict", None)
280
332
 
281
- # Process parameters to remove anyOf
333
+ # Process parameters to remove anyOf and handle enum/format
282
334
  if 'parameters' in function_dict:
283
335
  params = function_dict['parameters']
284
336
  if 'properties' in params:
@@ -295,6 +347,20 @@ class GeminiModel(OpenAICompatibleModel):
295
347
  'description'
296
348
  ] = prop_value['description']
297
349
 
350
+ # Handle enum and format restrictions for Gemini
351
+ # API enum: only allowed for string type
352
+ if prop_value.get('type') != 'string':
353
+ prop_value.pop('enum', None)
354
+
355
+ # format: only allowed for string, integer, and
356
+ # number types
357
+ if prop_value.get('type') not in [
358
+ 'string',
359
+ 'integer',
360
+ 'number',
361
+ ]:
362
+ prop_value.pop('format', None)
363
+
298
364
  request_config["tools"] = tools
299
365
 
300
366
  return await self._async_client.chat.completions.create(
@@ -302,18 +368,3 @@ class GeminiModel(OpenAICompatibleModel):
302
368
  model=self.model_type,
303
369
  **request_config,
304
370
  )
305
-
306
- def check_model_config(self):
307
- r"""Check whether the model configuration contains any
308
- unexpected arguments to Gemini API.
309
-
310
- Raises:
311
- ValueError: If the model configuration dictionary contains any
312
- unexpected arguments to Gemini API.
313
- """
314
- for param in self.model_config_dict:
315
- if param not in Gemini_API_PARAMS:
316
- raise ValueError(
317
- f"Unexpected argument `{param}` is "
318
- "input into Gemini model backend."
319
- )