camel-ai 0.2.37__py3-none-any.whl → 0.2.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (122) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +4 -0
  3. camel/agents/repo_agent.py +2 -2
  4. camel/benchmarks/apibank.py +1 -1
  5. camel/benchmarks/apibench.py +1 -1
  6. camel/configs/__init__.py +3 -0
  7. camel/configs/modelscope_config.py +59 -0
  8. camel/datagen/evol_instruct/__init__.py +20 -0
  9. camel/datagen/evol_instruct/evol_instruct.py +424 -0
  10. camel/datagen/evol_instruct/scorer.py +166 -0
  11. camel/datagen/evol_instruct/templates.py +268 -0
  12. camel/datagen/self_improving_cot.py +1 -1
  13. camel/datasets/__init__.py +2 -0
  14. camel/datasets/base_generator.py +22 -9
  15. camel/datasets/few_shot_generator.py +2 -3
  16. camel/datasets/self_instruct_generator.py +415 -0
  17. camel/embeddings/openai_compatible_embedding.py +13 -5
  18. camel/environments/models.py +10 -4
  19. camel/environments/single_step.py +181 -41
  20. camel/interpreters/docker_interpreter.py +2 -2
  21. camel/interpreters/e2b_interpreter.py +1 -1
  22. camel/interpreters/internal_python_interpreter.py +1 -1
  23. camel/interpreters/subprocess_interpreter.py +1 -1
  24. camel/loaders/__init__.py +2 -2
  25. camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
  26. camel/loaders/unstructured_io.py +2 -1
  27. camel/memories/blocks/chat_history_block.py +1 -1
  28. camel/memories/context_creators/score_based.py +198 -67
  29. camel/models/__init__.py +2 -0
  30. camel/models/aiml_model.py +9 -3
  31. camel/models/anthropic_model.py +11 -3
  32. camel/models/azure_openai_model.py +9 -3
  33. camel/models/base_audio_model.py +6 -0
  34. camel/models/base_model.py +4 -0
  35. camel/models/deepseek_model.py +9 -3
  36. camel/models/gemini_model.py +9 -3
  37. camel/models/groq_model.py +9 -3
  38. camel/models/internlm_model.py +8 -2
  39. camel/models/model_factory.py +123 -0
  40. camel/models/modelscope_model.py +208 -0
  41. camel/models/moonshot_model.py +8 -2
  42. camel/models/nemotron_model.py +9 -3
  43. camel/models/nvidia_model.py +9 -3
  44. camel/models/ollama_model.py +9 -3
  45. camel/models/openai_audio_models.py +7 -5
  46. camel/models/openai_compatible_model.py +9 -3
  47. camel/models/openai_model.py +58 -5
  48. camel/models/openrouter_model.py +9 -3
  49. camel/models/qwen_model.py +9 -3
  50. camel/models/samba_model.py +9 -3
  51. camel/models/sglang_model.py +11 -4
  52. camel/models/siliconflow_model.py +8 -2
  53. camel/models/stub_model.py +2 -1
  54. camel/models/togetherai_model.py +11 -5
  55. camel/models/vllm_model.py +10 -4
  56. camel/models/yi_model.py +9 -3
  57. camel/models/zhipuai_model.py +11 -5
  58. camel/retrievers/auto_retriever.py +14 -0
  59. camel/retrievers/vector_retriever.py +1 -1
  60. camel/storages/__init__.py +2 -0
  61. camel/storages/graph_storages/neo4j_graph.py +1 -1
  62. camel/storages/vectordb_storages/__init__.py +2 -0
  63. camel/storages/vectordb_storages/base.py +2 -2
  64. camel/storages/vectordb_storages/milvus.py +2 -2
  65. camel/storages/vectordb_storages/qdrant.py +2 -2
  66. camel/storages/vectordb_storages/tidb.py +332 -0
  67. camel/tasks/task.py +2 -2
  68. camel/toolkits/__init__.py +9 -1
  69. camel/toolkits/arxiv_toolkit.py +2 -1
  70. camel/toolkits/ask_news_toolkit.py +11 -3
  71. camel/toolkits/audio_analysis_toolkit.py +2 -0
  72. camel/toolkits/base.py +3 -0
  73. camel/toolkits/browser_toolkit.py +84 -61
  74. camel/toolkits/code_execution.py +3 -1
  75. camel/toolkits/dappier_toolkit.py +2 -1
  76. camel/toolkits/data_commons_toolkit.py +2 -0
  77. camel/toolkits/excel_toolkit.py +2 -0
  78. camel/toolkits/file_write_toolkit.py +2 -0
  79. camel/toolkits/github_toolkit.py +6 -4
  80. camel/toolkits/google_scholar_toolkit.py +2 -0
  81. camel/toolkits/human_toolkit.py +17 -1
  82. camel/toolkits/image_analysis_toolkit.py +2 -0
  83. camel/toolkits/linkedin_toolkit.py +2 -1
  84. camel/toolkits/math_toolkit.py +2 -0
  85. camel/toolkits/mcp_toolkit.py +42 -52
  86. camel/toolkits/meshy_toolkit.py +20 -2
  87. camel/toolkits/networkx_toolkit.py +2 -0
  88. camel/toolkits/notion_toolkit.py +7 -0
  89. camel/toolkits/openai_agent_toolkit.py +131 -0
  90. camel/toolkits/openbb_toolkit.py +2 -1
  91. camel/toolkits/pubmed_toolkit.py +2 -0
  92. camel/toolkits/reddit_toolkit.py +2 -1
  93. camel/toolkits/retrieval_toolkit.py +2 -1
  94. camel/toolkits/search_toolkit.py +2 -1
  95. camel/toolkits/searxng_toolkit.py +207 -0
  96. camel/toolkits/semantic_scholar_toolkit.py +2 -0
  97. camel/toolkits/slack_toolkit.py +2 -0
  98. camel/toolkits/stripe_toolkit.py +2 -1
  99. camel/toolkits/sympy_toolkit.py +2 -0
  100. camel/toolkits/terminal_toolkit.py +2 -0
  101. camel/toolkits/thinking_toolkit.py +168 -12
  102. camel/toolkits/twitter_toolkit.py +2 -1
  103. camel/toolkits/video_analysis_toolkit.py +2 -1
  104. camel/toolkits/video_download_toolkit.py +2 -1
  105. camel/toolkits/weather_toolkit.py +2 -0
  106. camel/toolkits/whatsapp_toolkit.py +2 -1
  107. camel/toolkits/zapier_toolkit.py +2 -1
  108. camel/types/enums.py +66 -0
  109. camel/types/unified_model_type.py +5 -0
  110. camel/utils/__init__.py +2 -0
  111. camel/utils/chunker/code_chunker.py +9 -9
  112. camel/utils/commons.py +50 -30
  113. camel/utils/constants.py +2 -2
  114. camel/utils/mcp.py +79 -0
  115. camel/verifiers/__init__.py +2 -0
  116. camel/verifiers/base.py +15 -15
  117. camel/verifiers/math_verifier.py +182 -0
  118. camel/verifiers/python_verifier.py +28 -28
  119. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/METADATA +54 -4
  120. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/RECORD +122 -110
  121. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/WHEEL +0 -0
  122. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/licenses/LICENSE +0 -0
@@ -60,6 +60,10 @@ class OpenAIModel(BaseModelBackend):
60
60
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
61
61
  use for the model. If not provided, :obj:`OpenAITokenCounter` will
62
62
  be used. (default: :obj:`None`)
63
+ timeout (Optional[float], optional): The timeout value in seconds for
64
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
65
+ environment variable or default to 180 seconds.
66
+ (default: :obj:`None`)
63
67
  """
64
68
 
65
69
  @api_keys_required(
@@ -74,31 +78,33 @@ class OpenAIModel(BaseModelBackend):
74
78
  api_key: Optional[str] = None,
75
79
  url: Optional[str] = None,
76
80
  token_counter: Optional[BaseTokenCounter] = None,
81
+ timeout: Optional[float] = None,
77
82
  ) -> None:
78
83
  if model_config_dict is None:
79
84
  model_config_dict = ChatGPTConfig().as_dict()
80
85
  api_key = api_key or os.environ.get("OPENAI_API_KEY")
81
86
  url = url or os.environ.get("OPENAI_API_BASE_URL")
87
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
88
 
83
89
  super().__init__(
84
- model_type, model_config_dict, api_key, url, token_counter
90
+ model_type, model_config_dict, api_key, url, token_counter, timeout
85
91
  )
86
92
 
87
93
  self._client = OpenAI(
88
- timeout=180,
94
+ timeout=self._timeout,
89
95
  max_retries=3,
90
96
  base_url=self._url,
91
97
  api_key=self._api_key,
92
98
  )
93
99
  self._async_client = AsyncOpenAI(
94
- timeout=180,
100
+ timeout=self._timeout,
95
101
  max_retries=3,
96
102
  base_url=self._url,
97
103
  api_key=self._api_key,
98
104
  )
99
105
 
100
106
  def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
101
- """Sanitize the model configuration for O1 models."""
107
+ r"""Sanitize the model configuration for O1 models."""
102
108
 
103
109
  if self.model_type in [
104
110
  ModelType.O1,
@@ -107,7 +113,7 @@ class OpenAIModel(BaseModelBackend):
107
113
  ModelType.O3_MINI,
108
114
  ]:
109
115
  warnings.warn(
110
- "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
116
+ "Warning: You are using an reasoning model (O1 or O3), "
111
117
  "which has certain limitations, reference: "
112
118
  "`https://platform.openai.com/docs/guides/reasoning`.",
113
119
  UserWarning,
@@ -119,6 +125,52 @@ class OpenAIModel(BaseModelBackend):
119
125
  }
120
126
  return config_dict
121
127
 
128
+ def _adapt_messages_for_o1_models(
129
+ self, messages: List[OpenAIMessage]
130
+ ) -> List[OpenAIMessage]:
131
+ r"""Adjust message roles to comply with O1 model requirements by
132
+ converting 'system' or 'developer' to 'user' role.
133
+
134
+ Args:
135
+ messages (List[OpenAIMessage]): Message list with the chat history
136
+ in OpenAI API format.
137
+
138
+ Returns:
139
+ processed_messages (List[OpenAIMessage]): Return a new list of
140
+ messages to avoid mutating input.
141
+ """
142
+
143
+ # Define supported O1 model types as a class constant would be better
144
+ O1_MODEL_TYPES = {ModelType.O1_MINI, ModelType.O1_PREVIEW}
145
+
146
+ if self.model_type not in O1_MODEL_TYPES:
147
+ return messages.copy()
148
+
149
+ # Issue warning only once using class state
150
+ if not hasattr(self, "_o1_warning_issued"):
151
+ warnings.warn(
152
+ "O1 models (O1_MINI/O1_PREVIEW) have role limitations: "
153
+ "System or Developer messages will be converted to user role."
154
+ "Reference: https://community.openai.com/t/"
155
+ "developer-role-not-accepted-for-o1-o1-mini-o3-mini/1110750/7",
156
+ UserWarning,
157
+ stacklevel=2,
158
+ )
159
+ self._o1_warning_issued = True
160
+
161
+ # Create new message list to avoid mutating input
162
+ processed_messages = []
163
+ for message in messages:
164
+ processed_message = message.copy()
165
+ if (
166
+ processed_message["role"] == "system"
167
+ or processed_message["role"] == "developer"
168
+ ):
169
+ processed_message["role"] = "user" # type: ignore[arg-type]
170
+ processed_messages.append(processed_message)
171
+
172
+ return processed_messages
173
+
122
174
  @property
123
175
  def token_counter(self) -> BaseTokenCounter:
124
176
  r"""Initialize the token counter for the model backend.
@@ -152,6 +204,7 @@ class OpenAIModel(BaseModelBackend):
152
204
  `ChatCompletion` in the non-stream mode, or
153
205
  `Stream[ChatCompletionChunk]` in the stream mode.
154
206
  """
207
+ messages = self._adapt_messages_for_o1_models(messages)
155
208
  response_format = response_format or self.model_config_dict.get(
156
209
  "response_format", None
157
210
  )
@@ -51,6 +51,10 @@ class OpenRouterModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required([("api_key", "OPENROUTER_API_KEY")])
@@ -61,6 +65,7 @@ class OpenRouterModel(BaseModelBackend):
61
65
  api_key: Optional[str] = None,
62
66
  url: Optional[str] = None,
63
67
  token_counter: Optional[BaseTokenCounter] = None,
68
+ timeout: Optional[float] = None,
64
69
  ) -> None:
65
70
  if model_config_dict is None:
66
71
  model_config_dict = OpenRouterConfig().as_dict()
@@ -68,17 +73,18 @@ class OpenRouterModel(BaseModelBackend):
68
73
  url = url or os.environ.get(
69
74
  "OPENROUTER_API_BASE_URL", "https://openrouter.ai/api/v1"
70
75
  )
76
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
71
77
  super().__init__(
72
- model_type, model_config_dict, api_key, url, token_counter
78
+ model_type, model_config_dict, api_key, url, token_counter, timeout
73
79
  )
74
80
  self._client = OpenAI(
75
- timeout=180,
81
+ timeout=self._timeout,
76
82
  max_retries=3,
77
83
  api_key=self._api_key,
78
84
  base_url=self._url,
79
85
  )
80
86
  self._async_client = AsyncOpenAI(
81
- timeout=180,
87
+ timeout=self._timeout,
82
88
  max_retries=3,
83
89
  api_key=self._api_key,
84
90
  base_url=self._url,
@@ -52,6 +52,10 @@ class QwenModel(BaseModelBackend):
52
52
  use for the model. If not provided, :obj:`OpenAITokenCounter(
53
53
  ModelType.GPT_4O_MINI)` will be used.
54
54
  (default: :obj:`None`)
55
+ timeout (Optional[float], optional): The timeout value in seconds for
56
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
+ environment variable or default to 180 seconds.
58
+ (default: :obj:`None`)
55
59
  """
56
60
 
57
61
  @api_keys_required(
@@ -66,6 +70,7 @@ class QwenModel(BaseModelBackend):
66
70
  api_key: Optional[str] = None,
67
71
  url: Optional[str] = None,
68
72
  token_counter: Optional[BaseTokenCounter] = None,
73
+ timeout: Optional[float] = None,
69
74
  ) -> None:
70
75
  if model_config_dict is None:
71
76
  model_config_dict = QwenConfig().as_dict()
@@ -74,17 +79,18 @@ class QwenModel(BaseModelBackend):
74
79
  "QWEN_API_BASE_URL",
75
80
  "https://dashscope.aliyuncs.com/compatible-mode/v1",
76
81
  )
82
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
83
  super().__init__(
78
- model_type, model_config_dict, api_key, url, token_counter
84
+ model_type, model_config_dict, api_key, url, token_counter, timeout
79
85
  )
80
86
  self._client = OpenAI(
81
- timeout=180,
87
+ timeout=self._timeout,
82
88
  max_retries=3,
83
89
  api_key=self._api_key,
84
90
  base_url=self._url,
85
91
  )
86
92
  self._async_client = AsyncOpenAI(
87
- timeout=180,
93
+ timeout=self._timeout,
88
94
  max_retries=3,
89
95
  api_key=self._api_key,
90
96
  base_url=self._url,
@@ -73,6 +73,10 @@ class SambaModel(BaseModelBackend):
73
73
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
74
74
  use for the model. If not provided, :obj:`OpenAITokenCounter(
75
75
  ModelType.GPT_4O_MINI)` will be used.
76
+ timeout (Optional[float], optional): The timeout value in seconds for
77
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
78
+ environment variable or default to 180 seconds.
79
+ (default: :obj:`None`)
76
80
  """
77
81
 
78
82
  @api_keys_required(
@@ -87,6 +91,7 @@ class SambaModel(BaseModelBackend):
87
91
  api_key: Optional[str] = None,
88
92
  url: Optional[str] = None,
89
93
  token_counter: Optional[BaseTokenCounter] = None,
94
+ timeout: Optional[float] = None,
90
95
  ) -> None:
91
96
  if model_config_dict is None:
92
97
  model_config_dict = SambaCloudAPIConfig().as_dict()
@@ -95,19 +100,20 @@ class SambaModel(BaseModelBackend):
95
100
  "SAMBA_API_BASE_URL",
96
101
  "https://api.sambanova.ai/v1",
97
102
  )
103
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
98
104
  super().__init__(
99
- model_type, model_config_dict, api_key, url, token_counter
105
+ model_type, model_config_dict, api_key, url, token_counter, timeout
100
106
  )
101
107
 
102
108
  if self._url == "https://api.sambanova.ai/v1":
103
109
  self._client = OpenAI(
104
- timeout=180,
110
+ timeout=self._timeout,
105
111
  max_retries=3,
106
112
  base_url=self._url,
107
113
  api_key=self._api_key,
108
114
  )
109
115
  self._async_client = AsyncOpenAI(
110
- timeout=180,
116
+ timeout=self._timeout,
111
117
  max_retries=3,
112
118
  base_url=self._url,
113
119
  api_key=self._api_key,
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import logging
15
+ import os
15
16
  import subprocess
16
17
  import threading
17
18
  import time
@@ -51,6 +52,10 @@ class SGLangModel(BaseModelBackend):
51
52
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
53
  ModelType.GPT_4O_MINI)` will be used.
53
54
  (default: :obj:`None`)
55
+ timeout (Optional[float], optional): The timeout value in seconds for
56
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
+ environment variable or default to 180 seconds.
58
+ (default: :obj:`None`)
54
59
 
55
60
  Reference: https://sgl-project.github.io/backend/openai_api_completions.html
56
61
  """
@@ -62,6 +67,7 @@ class SGLangModel(BaseModelBackend):
62
67
  api_key: Optional[str] = None,
63
68
  url: Optional[str] = None,
64
69
  token_counter: Optional[BaseTokenCounter] = None,
70
+ timeout: Optional[float] = None,
65
71
  ) -> None:
66
72
  if model_config_dict is None:
67
73
  model_config_dict = SGLangConfig().as_dict()
@@ -73,8 +79,9 @@ class SGLangModel(BaseModelBackend):
73
79
  self._lock = threading.Lock()
74
80
  self._inactivity_thread: Optional[threading.Thread] = None
75
81
 
82
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
76
83
  super().__init__(
77
- model_type, model_config_dict, api_key, url, token_counter
84
+ model_type, model_config_dict, api_key, url, token_counter, timeout
78
85
  )
79
86
 
80
87
  self._client = None
@@ -82,13 +89,13 @@ class SGLangModel(BaseModelBackend):
82
89
  if self._url:
83
90
  # Initialize the client if an existing URL is provided
84
91
  self._client = OpenAI(
85
- timeout=180,
92
+ timeout=self._timeout,
86
93
  max_retries=3,
87
94
  api_key="Set-but-ignored", # required but ignored
88
95
  base_url=self._url,
89
96
  )
90
97
  self._async_client = AsyncOpenAI(
91
- timeout=180,
98
+ timeout=self._timeout,
92
99
  max_retries=3,
93
100
  api_key="Set-but-ignored", # required but ignored
94
101
  base_url=self._url,
@@ -123,7 +130,7 @@ class SGLangModel(BaseModelBackend):
123
130
  self.last_run_time = time.time()
124
131
  # Initialize the client after the server starts
125
132
  self._client = OpenAI(
126
- timeout=180,
133
+ timeout=self._timeout,
127
134
  max_retries=3,
128
135
  api_key="Set-but-ignored", # required but ignored
129
136
  base_url=self._url,
@@ -51,6 +51,10 @@ class SiliconFlowModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required(
@@ -65,6 +69,7 @@ class SiliconFlowModel(BaseModelBackend):
65
69
  api_key: Optional[str] = None,
66
70
  url: Optional[str] = None,
67
71
  token_counter: Optional[BaseTokenCounter] = None,
72
+ timeout: Optional[float] = None,
68
73
  ) -> None:
69
74
  if model_config_dict is None:
70
75
  model_config_dict = SiliconFlowConfig().as_dict()
@@ -73,11 +78,12 @@ class SiliconFlowModel(BaseModelBackend):
73
78
  "SILICONFLOW_API_BASE_URL",
74
79
  "https://api.siliconflow.cn/v1/",
75
80
  )
81
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
76
82
  super().__init__(
77
- model_type, model_config_dict, api_key, url, token_counter
83
+ model_type, model_config_dict, api_key, url, token_counter, timeout
78
84
  )
79
85
  self._client = OpenAI(
80
- timeout=180,
86
+ timeout=self._timeout,
81
87
  max_retries=3,
82
88
  api_key=self._api_key,
83
89
  base_url=self._url,
@@ -82,10 +82,11 @@ class StubModel(BaseModelBackend):
82
82
  api_key: Optional[str] = None,
83
83
  url: Optional[str] = None,
84
84
  token_counter: Optional[BaseTokenCounter] = None,
85
+ timeout: Optional[float] = None,
85
86
  ) -> None:
86
87
  r"""All arguments are unused for the dummy model."""
87
88
  super().__init__(
88
- model_type, model_config_dict, api_key, url, token_counter
89
+ model_type, model_config_dict, api_key, url, token_counter, timeout
89
90
  )
90
91
 
91
92
  @property
@@ -52,6 +52,10 @@ class TogetherAIModel(BaseModelBackend):
52
52
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
53
53
  use for the model. If not provided, :obj:`OpenAITokenCounter(
54
54
  ModelType.GPT_4O_MINI)` will be used.
55
+ timeout (Optional[float], optional): The timeout value in seconds for
56
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
+ environment variable or default to 180 seconds.
58
+ (default: :obj:`None`)
55
59
  """
56
60
 
57
61
  @api_keys_required(
@@ -66,6 +70,7 @@ class TogetherAIModel(BaseModelBackend):
66
70
  api_key: Optional[str] = None,
67
71
  url: Optional[str] = None,
68
72
  token_counter: Optional[BaseTokenCounter] = None,
73
+ timeout: Optional[float] = None,
69
74
  ) -> None:
70
75
  if model_config_dict is None:
71
76
  model_config_dict = TogetherAIConfig().as_dict()
@@ -73,18 +78,19 @@ class TogetherAIModel(BaseModelBackend):
73
78
  url = url or os.environ.get(
74
79
  "TOGETHER_API_BASE_URL", "https://api.together.xyz/v1"
75
80
  )
81
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
76
82
  super().__init__(
77
- model_type, model_config_dict, api_key, url, token_counter
83
+ model_type, model_config_dict, api_key, url, token_counter, timeout
78
84
  )
79
85
 
80
86
  self._client = OpenAI(
81
- timeout=180,
87
+ timeout=self._timeout,
82
88
  max_retries=3,
83
89
  api_key=self._api_key,
84
90
  base_url=self._url,
85
91
  )
86
92
  self._async_client = AsyncOpenAI(
87
- timeout=180,
93
+ timeout=self._timeout,
88
94
  max_retries=3,
89
95
  api_key=self._api_key,
90
96
  base_url=self._url,
@@ -107,7 +113,7 @@ class TogetherAIModel(BaseModelBackend):
107
113
  `ChatCompletion` in the non-stream mode, or
108
114
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
109
115
  """
110
- # Use OpenAI cilent as interface call Together AI
116
+ # Use OpenAI client as interface call Together AI
111
117
  # Reference: https://docs.together.ai/docs/openai-api-compatibility
112
118
  response = await self._async_client.chat.completions.create(
113
119
  messages=messages,
@@ -133,7 +139,7 @@ class TogetherAIModel(BaseModelBackend):
133
139
  `ChatCompletion` in the non-stream mode, or
134
140
  `Stream[ChatCompletionChunk]` in the stream mode.
135
141
  """
136
- # Use OpenAI cilent as interface call Together AI
142
+ # Use OpenAI client as interface call Together AI
137
143
  # Reference: https://docs.together.ai/docs/openai-api-compatibility
138
144
  response = self._client.chat.completions.create(
139
145
  messages=messages,
@@ -50,6 +50,10 @@ class VLLMModel(BaseModelBackend):
50
50
  use for the model. If not provided, :obj:`OpenAITokenCounter(
51
51
  ModelType.GPT_4O_MINI)` will be used.
52
52
  (default: :obj:`None`)
53
+ timeout (Optional[float], optional): The timeout value in seconds for
54
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
55
+ environment variable or default to 180 seconds.
56
+ (default: :obj:`None`)
53
57
 
54
58
  References:
55
59
  https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html
@@ -62,24 +66,26 @@ class VLLMModel(BaseModelBackend):
62
66
  api_key: Optional[str] = None,
63
67
  url: Optional[str] = None,
64
68
  token_counter: Optional[BaseTokenCounter] = None,
69
+ timeout: Optional[float] = None,
65
70
  ) -> None:
66
71
  if model_config_dict is None:
67
72
  model_config_dict = VLLMConfig().as_dict()
68
73
  url = url or os.environ.get("VLLM_BASE_URL")
74
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
69
75
  super().__init__(
70
- model_type, model_config_dict, api_key, url, token_counter
76
+ model_type, model_config_dict, api_key, url, token_counter, timeout
71
77
  )
72
78
  if not self._url:
73
79
  self._start_server()
74
- # Use OpenAI cilent as interface call vLLM
80
+ # Use OpenAI client as interface call vLLM
75
81
  self._client = OpenAI(
76
- timeout=180,
82
+ timeout=self._timeout,
77
83
  max_retries=3,
78
84
  api_key="EMPTY", # required but ignored
79
85
  base_url=self._url,
80
86
  )
81
87
  self._async_client = AsyncOpenAI(
82
- timeout=180,
88
+ timeout=self._timeout,
83
89
  max_retries=3,
84
90
  api_key="EMPTY", # required but ignored
85
91
  base_url=self._url,
camel/models/yi_model.py CHANGED
@@ -51,6 +51,10 @@ class YiModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required(
@@ -65,6 +69,7 @@ class YiModel(BaseModelBackend):
65
69
  api_key: Optional[str] = None,
66
70
  url: Optional[str] = None,
67
71
  token_counter: Optional[BaseTokenCounter] = None,
72
+ timeout: Optional[float] = None,
68
73
  ) -> None:
69
74
  if model_config_dict is None:
70
75
  model_config_dict = YiConfig().as_dict()
@@ -72,17 +77,18 @@ class YiModel(BaseModelBackend):
72
77
  url = url or os.environ.get(
73
78
  "YI_API_BASE_URL", "https://api.lingyiwanwu.com/v1"
74
79
  )
80
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
75
81
  super().__init__(
76
- model_type, model_config_dict, api_key, url, token_counter
82
+ model_type, model_config_dict, api_key, url, token_counter, timeout
77
83
  )
78
84
  self._client = OpenAI(
79
- timeout=180,
85
+ timeout=self._timeout,
80
86
  max_retries=3,
81
87
  api_key=self._api_key,
82
88
  base_url=self._url,
83
89
  )
84
90
  self._async_client = AsyncOpenAI(
85
- timeout=180,
91
+ timeout=self._timeout,
86
92
  max_retries=3,
87
93
  api_key=self._api_key,
88
94
  base_url=self._url,
@@ -51,6 +51,10 @@ class ZhipuAIModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required(
@@ -65,6 +69,7 @@ class ZhipuAIModel(BaseModelBackend):
65
69
  api_key: Optional[str] = None,
66
70
  url: Optional[str] = None,
67
71
  token_counter: Optional[BaseTokenCounter] = None,
72
+ timeout: Optional[float] = None,
68
73
  ) -> None:
69
74
  if model_config_dict is None:
70
75
  model_config_dict = ZhipuAIConfig().as_dict()
@@ -72,17 +77,18 @@ class ZhipuAIModel(BaseModelBackend):
72
77
  url = url or os.environ.get(
73
78
  "ZHIPUAI_API_BASE_URL", "https://open.bigmodel.cn/api/paas/v4/"
74
79
  )
80
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
75
81
  super().__init__(
76
- model_type, model_config_dict, api_key, url, token_counter
82
+ model_type, model_config_dict, api_key, url, token_counter, timeout
77
83
  )
78
84
  self._client = OpenAI(
79
- timeout=180,
85
+ timeout=self._timeout,
80
86
  max_retries=3,
81
87
  api_key=self._api_key,
82
88
  base_url=self._url,
83
89
  )
84
90
  self._async_client = AsyncOpenAI(
85
- timeout=180,
91
+ timeout=self._timeout,
86
92
  max_retries=3,
87
93
  api_key=self._api_key,
88
94
  base_url=self._url,
@@ -105,7 +111,7 @@ class ZhipuAIModel(BaseModelBackend):
105
111
  `ChatCompletion` in the non-stream mode, or
106
112
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
107
113
  """
108
- # Use OpenAI cilent as interface call ZhipuAI
114
+ # Use OpenAI client as interface call ZhipuAI
109
115
  # Reference: https://open.bigmodel.cn/dev/api#openai_sdk
110
116
  response = await self._async_client.chat.completions.create(
111
117
  messages=messages,
@@ -131,7 +137,7 @@ class ZhipuAIModel(BaseModelBackend):
131
137
  `ChatCompletion` in the non-stream mode, or
132
138
  `Stream[ChatCompletionChunk]` in the stream mode.
133
139
  """
134
- # Use OpenAI cilent as interface call ZhipuAI
140
+ # Use OpenAI client as interface call ZhipuAI
135
141
  # Reference: https://open.bigmodel.cn/dev/api#openai_sdk
136
142
  response = self._client.chat.completions.create(
137
143
  messages=messages,
@@ -29,6 +29,7 @@ from camel.storages import (
29
29
  BaseVectorStorage,
30
30
  MilvusStorage,
31
31
  QdrantStorage,
32
+ TiDBStorage,
32
33
  )
33
34
  from camel.types import StorageType
34
35
  from camel.utils import Constants
@@ -90,6 +91,19 @@ class AutoRetriever:
90
91
  url_and_api_key=self.url_and_api_key,
91
92
  )
92
93
 
94
+ if self.storage_type == StorageType.TIDB:
95
+ if self.url_and_api_key is None:
96
+ raise ValueError(
97
+ "URL (database url) and API key required for TiDB storage "
98
+ "are not provided. Format: "
99
+ "mysql+pymysql://<username>:<password>@<host>:4000/test"
100
+ )
101
+ return TiDBStorage(
102
+ vector_dim=self.embedding_model.get_output_dim(),
103
+ collection_name=collection_name,
104
+ url_and_api_key=self.url_and_api_key,
105
+ )
106
+
93
107
  if self.storage_type == StorageType.QDRANT:
94
108
  return QdrantStorage(
95
109
  vector_dim=self.embedding_model.get_output_dim(),
@@ -224,7 +224,7 @@ class VectorRetriever(BaseRetriever):
224
224
  if top_k <= 0:
225
225
  raise ValueError("top_k must be a positive integer.")
226
226
 
227
- # Load the storage incase it's hosted remote
227
+ # Load the storage in case it's hosted remote
228
228
  self.storage.load()
229
229
 
230
230
  query_vector = self.embedding_model.embed(obj=query)
@@ -28,6 +28,7 @@ from .vectordb_storages.base import (
28
28
  )
29
29
  from .vectordb_storages.milvus import MilvusStorage
30
30
  from .vectordb_storages.qdrant import QdrantStorage
31
+ from .vectordb_storages.tidb import TiDBStorage
31
32
 
32
33
  __all__ = [
33
34
  'BaseKeyValueStorage',
@@ -40,6 +41,7 @@ __all__ = [
40
41
  'VectorDBQueryResult',
41
42
  'QdrantStorage',
42
43
  'MilvusStorage',
44
+ "TiDBStorage",
43
45
  'BaseGraphStorage',
44
46
  'Neo4jGraph',
45
47
  'NebulaGraph',
@@ -69,7 +69,7 @@ class Neo4jGraph(BaseGraphStorage):
69
69
  The detailed information about Neo4j is available at:
70
70
  `Neo4j https://neo4j.com/docs/getting-started`
71
71
 
72
- This module refered to the work of Langchian and Llamaindex.
72
+ This module referred to the work of Langchian and Llamaindex.
73
73
 
74
74
  Args:
75
75
  url (str): The URL of the Neo4j database server.
@@ -21,6 +21,7 @@ from .base import (
21
21
  )
22
22
  from .milvus import MilvusStorage
23
23
  from .qdrant import QdrantStorage
24
+ from .tidb import TiDBStorage
24
25
 
25
26
  __all__ = [
26
27
  'BaseVectorStorage',
@@ -28,6 +29,7 @@ __all__ = [
28
29
  'VectorDBQueryResult',
29
30
  'QdrantStorage',
30
31
  'MilvusStorage',
32
+ "TiDBStorage",
31
33
  'VectorRecord',
32
34
  'VectorDBStatus',
33
35
  ]
@@ -100,7 +100,7 @@ class VectorDBStatus(BaseModel):
100
100
  r"""Vector database status.
101
101
 
102
102
  Attributes:
103
- vector_dim (int): The dimention of stored vectors.
103
+ vector_dim (int): The dimension of stored vectors.
104
104
  vector_count (int): The number of stored vectors.
105
105
 
106
106
  """
@@ -204,7 +204,7 @@ class BaseVectorStorage(ABC):
204
204
 
205
205
  Args:
206
206
  vector (List[float]): The search vector.
207
- top_k (int): The number of top similer vectors.
207
+ top_k (int): The number of top similar vectors.
208
208
 
209
209
  Returns:
210
210
  List[List[Dict[str, Any]]]: A list of vector payloads retrieved