camel-ai 0.2.37__py3-none-any.whl → 0.2.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (122) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +4 -0
  3. camel/agents/repo_agent.py +2 -2
  4. camel/benchmarks/apibank.py +1 -1
  5. camel/benchmarks/apibench.py +1 -1
  6. camel/configs/__init__.py +3 -0
  7. camel/configs/modelscope_config.py +59 -0
  8. camel/datagen/evol_instruct/__init__.py +20 -0
  9. camel/datagen/evol_instruct/evol_instruct.py +424 -0
  10. camel/datagen/evol_instruct/scorer.py +166 -0
  11. camel/datagen/evol_instruct/templates.py +268 -0
  12. camel/datagen/self_improving_cot.py +1 -1
  13. camel/datasets/__init__.py +2 -0
  14. camel/datasets/base_generator.py +22 -9
  15. camel/datasets/few_shot_generator.py +2 -3
  16. camel/datasets/self_instruct_generator.py +415 -0
  17. camel/embeddings/openai_compatible_embedding.py +13 -5
  18. camel/environments/models.py +10 -4
  19. camel/environments/single_step.py +181 -41
  20. camel/interpreters/docker_interpreter.py +2 -2
  21. camel/interpreters/e2b_interpreter.py +1 -1
  22. camel/interpreters/internal_python_interpreter.py +1 -1
  23. camel/interpreters/subprocess_interpreter.py +1 -1
  24. camel/loaders/__init__.py +2 -2
  25. camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
  26. camel/loaders/unstructured_io.py +2 -1
  27. camel/memories/blocks/chat_history_block.py +1 -1
  28. camel/memories/context_creators/score_based.py +198 -67
  29. camel/models/__init__.py +2 -0
  30. camel/models/aiml_model.py +9 -3
  31. camel/models/anthropic_model.py +11 -3
  32. camel/models/azure_openai_model.py +9 -3
  33. camel/models/base_audio_model.py +6 -0
  34. camel/models/base_model.py +4 -0
  35. camel/models/deepseek_model.py +9 -3
  36. camel/models/gemini_model.py +9 -3
  37. camel/models/groq_model.py +9 -3
  38. camel/models/internlm_model.py +8 -2
  39. camel/models/model_factory.py +123 -0
  40. camel/models/modelscope_model.py +208 -0
  41. camel/models/moonshot_model.py +8 -2
  42. camel/models/nemotron_model.py +9 -3
  43. camel/models/nvidia_model.py +9 -3
  44. camel/models/ollama_model.py +9 -3
  45. camel/models/openai_audio_models.py +7 -5
  46. camel/models/openai_compatible_model.py +9 -3
  47. camel/models/openai_model.py +58 -5
  48. camel/models/openrouter_model.py +9 -3
  49. camel/models/qwen_model.py +9 -3
  50. camel/models/samba_model.py +9 -3
  51. camel/models/sglang_model.py +11 -4
  52. camel/models/siliconflow_model.py +8 -2
  53. camel/models/stub_model.py +2 -1
  54. camel/models/togetherai_model.py +11 -5
  55. camel/models/vllm_model.py +10 -4
  56. camel/models/yi_model.py +9 -3
  57. camel/models/zhipuai_model.py +11 -5
  58. camel/retrievers/auto_retriever.py +14 -0
  59. camel/retrievers/vector_retriever.py +1 -1
  60. camel/storages/__init__.py +2 -0
  61. camel/storages/graph_storages/neo4j_graph.py +1 -1
  62. camel/storages/vectordb_storages/__init__.py +2 -0
  63. camel/storages/vectordb_storages/base.py +2 -2
  64. camel/storages/vectordb_storages/milvus.py +2 -2
  65. camel/storages/vectordb_storages/qdrant.py +2 -2
  66. camel/storages/vectordb_storages/tidb.py +332 -0
  67. camel/tasks/task.py +2 -2
  68. camel/toolkits/__init__.py +9 -1
  69. camel/toolkits/arxiv_toolkit.py +2 -1
  70. camel/toolkits/ask_news_toolkit.py +11 -3
  71. camel/toolkits/audio_analysis_toolkit.py +2 -0
  72. camel/toolkits/base.py +3 -0
  73. camel/toolkits/browser_toolkit.py +84 -61
  74. camel/toolkits/code_execution.py +3 -1
  75. camel/toolkits/dappier_toolkit.py +2 -1
  76. camel/toolkits/data_commons_toolkit.py +2 -0
  77. camel/toolkits/excel_toolkit.py +2 -0
  78. camel/toolkits/file_write_toolkit.py +2 -0
  79. camel/toolkits/github_toolkit.py +6 -4
  80. camel/toolkits/google_scholar_toolkit.py +2 -0
  81. camel/toolkits/human_toolkit.py +17 -1
  82. camel/toolkits/image_analysis_toolkit.py +2 -0
  83. camel/toolkits/linkedin_toolkit.py +2 -1
  84. camel/toolkits/math_toolkit.py +2 -0
  85. camel/toolkits/mcp_toolkit.py +42 -52
  86. camel/toolkits/meshy_toolkit.py +20 -2
  87. camel/toolkits/networkx_toolkit.py +2 -0
  88. camel/toolkits/notion_toolkit.py +7 -0
  89. camel/toolkits/openai_agent_toolkit.py +131 -0
  90. camel/toolkits/openbb_toolkit.py +2 -1
  91. camel/toolkits/pubmed_toolkit.py +2 -0
  92. camel/toolkits/reddit_toolkit.py +2 -1
  93. camel/toolkits/retrieval_toolkit.py +2 -1
  94. camel/toolkits/search_toolkit.py +2 -1
  95. camel/toolkits/searxng_toolkit.py +207 -0
  96. camel/toolkits/semantic_scholar_toolkit.py +2 -0
  97. camel/toolkits/slack_toolkit.py +2 -0
  98. camel/toolkits/stripe_toolkit.py +2 -1
  99. camel/toolkits/sympy_toolkit.py +2 -0
  100. camel/toolkits/terminal_toolkit.py +2 -0
  101. camel/toolkits/thinking_toolkit.py +168 -12
  102. camel/toolkits/twitter_toolkit.py +2 -1
  103. camel/toolkits/video_analysis_toolkit.py +2 -1
  104. camel/toolkits/video_download_toolkit.py +2 -1
  105. camel/toolkits/weather_toolkit.py +2 -0
  106. camel/toolkits/whatsapp_toolkit.py +2 -1
  107. camel/toolkits/zapier_toolkit.py +2 -1
  108. camel/types/enums.py +66 -0
  109. camel/types/unified_model_type.py +5 -0
  110. camel/utils/__init__.py +2 -0
  111. camel/utils/chunker/code_chunker.py +9 -9
  112. camel/utils/commons.py +50 -30
  113. camel/utils/constants.py +2 -2
  114. camel/utils/mcp.py +79 -0
  115. camel/verifiers/__init__.py +2 -0
  116. camel/verifiers/base.py +15 -15
  117. camel/verifiers/math_verifier.py +182 -0
  118. camel/verifiers/python_verifier.py +28 -28
  119. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/METADATA +54 -4
  120. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/RECORD +122 -110
  121. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/WHEEL +0 -0
  122. {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/licenses/LICENSE +0 -0
@@ -11,7 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from typing import List, Tuple
14
+ from typing import List, Optional, Tuple
15
15
 
16
16
  from pydantic import BaseModel
17
17
 
@@ -19,6 +19,7 @@ from camel.logger import get_logger
19
19
  from camel.memories.base import BaseContextCreator
20
20
  from camel.memories.records import ContextRecord
21
21
  from camel.messages import OpenAIMessage
22
+ from camel.types.enums import OpenAIBackendRole
22
23
  from camel.utils import BaseTokenCounter
23
24
 
24
25
  logger = get_logger(__name__)
@@ -64,96 +65,226 @@ class ScoreBasedContextCreator(BaseContextCreator):
64
65
  self,
65
66
  records: List[ContextRecord],
66
67
  ) -> Tuple[List[OpenAIMessage], int]:
67
- r"""Creates conversational context from chat history while respecting
68
+ r"""Constructs conversation context from chat history while respecting
68
69
  token limits.
69
70
 
70
- Constructs the context from provided records and ensures that the total
71
- token count does not exceed the specified limit by pruning the least
72
- score messages if necessary.
71
+ Key strategies:
72
+ 1. System message is always prioritized and preserved
73
+ 2. Truncation removes low-score messages first
74
+ 3. Final output maintains chronological order and in history memory,
75
+ the score of each message decreases according to keep_rate. The
76
+ newer the message, the higher the score.
73
77
 
74
78
  Args:
75
- records (List[ContextRecord]): A list of message records from which
76
- to generate the context.
79
+ records (List[ContextRecord]): List of context records with scores
80
+ and timestamps.
77
81
 
78
82
  Returns:
79
- Tuple[List[OpenAIMessage], int]: A tuple containing the constructed
80
- context in OpenAIMessage format and the total token count.
83
+ Tuple[List[OpenAIMessage], int]:
84
+ - Ordered list of OpenAI messages
85
+ - Total token count of the final context
81
86
 
82
87
  Raises:
83
- RuntimeError: If it's impossible to create a valid context without
84
- exceeding the token limit.
88
+ RuntimeError: If system message alone exceeds token limit
85
89
  """
86
- # Create unique context units list
87
- uuid_set = set()
88
- context_units = []
90
+ # ======================
91
+ # 1. System Message Handling
92
+ # ======================
93
+ system_unit, regular_units = self._extract_system_message(records)
94
+ system_tokens = system_unit.num_tokens if system_unit else 0
95
+
96
+ # Check early if system message alone exceeds token limit
97
+ if system_tokens > self.token_limit:
98
+ raise RuntimeError(
99
+ f"System message alone exceeds token limit"
100
+ f": {system_tokens} > {self.token_limit}",
101
+ system_tokens,
102
+ )
103
+
104
+ # ======================
105
+ # 2. Deduplication & Initial Processing
106
+ # ======================
107
+ seen_uuids = set()
108
+ if system_unit:
109
+ seen_uuids.add(system_unit.record.memory_record.uuid)
110
+
111
+ # Process non-system messages with deduplication
89
112
  for idx, record in enumerate(records):
90
- if record.memory_record.uuid not in uuid_set:
91
- uuid_set.add(record.memory_record.uuid)
92
- context_units.append(
93
- _ContextUnit(
94
- idx=idx,
95
- record=record,
96
- num_tokens=self.token_counter.count_tokens_from_messages(
97
- [record.memory_record.to_openai_message()]
98
- ),
99
- )
113
+ if record.memory_record.uuid in seen_uuids:
114
+ continue
115
+ seen_uuids.add(record.memory_record.uuid)
116
+
117
+ token_count = self.token_counter.count_tokens_from_messages(
118
+ [record.memory_record.to_openai_message()]
119
+ )
120
+ regular_units.append(
121
+ _ContextUnit(
122
+ idx=idx,
123
+ record=record,
124
+ num_tokens=token_count,
100
125
  )
126
+ )
101
127
 
102
- # TODO: optimize the process, may give information back to memory
128
+ # ======================
129
+ # 3. Token Calculation
130
+ # ======================
131
+ total_tokens = system_tokens + sum(u.num_tokens for u in regular_units)
103
132
 
104
- # If not exceed token limit, simply return
105
- total_tokens = sum([unit.num_tokens for unit in context_units])
133
+ # ======================
134
+ # 4. Early Return if Within Limit
135
+ # ======================
106
136
  if total_tokens <= self.token_limit:
107
- context_units = sorted(
108
- context_units,
109
- key=lambda unit: (unit.record.timestamp, unit.record.score),
137
+ sorted_units = sorted(
138
+ regular_units, key=self._conversation_sort_key
110
139
  )
111
- return self._create_output(context_units)
140
+ return self._assemble_output(sorted_units, system_unit)
112
141
 
113
- # Log warning about token limit being exceeded
142
+ # ======================
143
+ # 5. Truncation Logic
144
+ # ======================
114
145
  logger.warning(
115
- f"Token limit reached ({total_tokens} > {self.token_limit}). "
116
- f"Some messages will be pruned from memory to meet the limit."
146
+ f"Context truncation required "
147
+ f"({total_tokens} > {self.token_limit}), "
148
+ f"pruning low-score messages."
117
149
  )
118
150
 
119
- # Sort by score
120
- context_units = sorted(
121
- context_units,
122
- key=lambda unit: (unit.record.timestamp, unit.record.score),
151
+ # Sort for truncation: high scores first, older messages first at same
152
+ # score
153
+ sorted_for_truncation = sorted(
154
+ regular_units, key=self._truncation_sort_key
123
155
  )
124
156
 
125
- # Remove the least score messages until total token number is smaller
126
- # than token limit
127
- truncate_idx = None
128
- for i, unit in enumerate(context_units):
129
- if i == len(context_units) - 1:
130
- # If we reach the end of the list and still exceed the token
131
- raise RuntimeError(
132
- "Cannot create context: exceed token limit.", total_tokens
133
- )
134
- total_tokens -= unit.num_tokens
135
- if total_tokens <= self.token_limit:
136
- truncate_idx = i
137
- break
138
- if truncate_idx is None:
157
+ # Reverse to process from lowest score (end of sorted list)
158
+ remaining_units = []
159
+ current_total = system_tokens
160
+
161
+ for unit in sorted_for_truncation:
162
+ potential_total = current_total + unit.num_tokens
163
+ if potential_total <= self.token_limit:
164
+ remaining_units.append(unit)
165
+ current_total = potential_total
166
+
167
+ # ======================
168
+ # 6. Output Assembly
169
+ # ======================
170
+
171
+ # In case system message is the only message in memory when sorted
172
+ # units are empty, raise an error
173
+ if system_unit and len(remaining_units) == 0 and len(records) > 1:
139
174
  raise RuntimeError(
140
- "Cannot create context: exceed token limit.", total_tokens
175
+ "System message and current message exceeds token limit ",
176
+ total_tokens,
141
177
  )
142
- return self._create_output(context_units[truncate_idx + 1 :])
143
178
 
144
- def _create_output(
145
- self, context_units: List[_ContextUnit]
146
- ) -> Tuple[List[OpenAIMessage], int]:
147
- r"""Helper method to generate output from context units.
179
+ # Sort remaining units chronologically
180
+ final_units = sorted(remaining_units, key=self._conversation_sort_key)
181
+ return self._assemble_output(final_units, system_unit)
182
+
183
+ def _extract_system_message(
184
+ self, records: List[ContextRecord]
185
+ ) -> Tuple[Optional[_ContextUnit], List[_ContextUnit]]:
186
+ r"""Extracts the system message from records and validates it.
187
+
188
+ Args:
189
+ records (List[ContextRecord]): List of context records
190
+ representing conversation history.
148
191
 
149
- This method converts the provided context units into a format suitable
150
- for output, specifically a list of OpenAIMessages and an integer
151
- representing the total token count.
192
+ Returns:
193
+ Tuple[Optional[_ContextUnit], List[_ContextUnit]]: containing:
194
+ - The system message as a `_ContextUnit`, if valid; otherwise,
195
+ `None`.
196
+ - An empty list, serving as the initial container for regular
197
+ messages.
152
198
  """
153
- context_units = sorted(
154
- context_units, key=lambda unit: unit.record.timestamp
199
+ if not records:
200
+ return None, []
201
+
202
+ first_record = records[0]
203
+ if (
204
+ first_record.memory_record.role_at_backend
205
+ != OpenAIBackendRole.SYSTEM
206
+ ):
207
+ return None, []
208
+
209
+ message = first_record.memory_record.to_openai_message()
210
+ tokens = self.token_counter.count_tokens_from_messages([message])
211
+ system_message_unit = _ContextUnit(
212
+ idx=0,
213
+ record=first_record,
214
+ num_tokens=tokens,
155
215
  )
156
- return [
157
- unit.record.memory_record.to_openai_message()
158
- for unit in context_units
159
- ], sum([unit.num_tokens for unit in context_units])
216
+ return system_message_unit, []
217
+
218
+ def _truncation_sort_key(self, unit: _ContextUnit) -> Tuple[float, float]:
219
+ r"""Defines the sorting key for the truncation phase.
220
+
221
+ Sorting priority:
222
+ - Primary: Sort by score in descending order (higher scores first).
223
+ - Secondary: Sort by timestamp in ascending order (older messages
224
+ first when scores are equal).
225
+
226
+ Args:
227
+ unit (_ContextUnit): A `_ContextUnit` representing a conversation
228
+ record.
229
+
230
+ Returns:
231
+ Tuple[float, float]:
232
+ - Negative score for descending order sorting.
233
+ - Timestamp for ascending order sorting.
234
+ """
235
+ return (-unit.record.score, unit.record.timestamp)
236
+
237
+ def _conversation_sort_key(
238
+ self, unit: _ContextUnit
239
+ ) -> Tuple[float, float]:
240
+ r"""Defines the sorting key for assembling the final output.
241
+
242
+ Sorting priority:
243
+ - Primary: Sort by timestamp in ascending order (chronological order).
244
+ - Secondary: Sort by score in descending order (higher scores first
245
+ when timestamps are equal).
246
+
247
+ Args:
248
+ unit (_ContextUnit): A `_ContextUnit` representing a conversation
249
+ record.
250
+
251
+ Returns:
252
+ Tuple[float, float]:
253
+ - Timestamp for chronological sorting.
254
+ - Negative score for descending order sorting.
255
+ """
256
+ return (unit.record.timestamp, -unit.record.score)
257
+
258
+ def _assemble_output(
259
+ self,
260
+ context_units: List[_ContextUnit],
261
+ system_unit: Optional[_ContextUnit],
262
+ ) -> Tuple[List[OpenAIMessage], int]:
263
+ r"""Assembles final message list with proper ordering and token count.
264
+
265
+ Args:
266
+ context_units (List[_ContextUnit]): Sorted list of regular message
267
+ units.
268
+ system_unit (Optional[_ContextUnit]): System message unit (if
269
+ present).
270
+
271
+ Returns:
272
+ Tuple[List[OpenAIMessage], int]: Tuple of (ordered messages, total
273
+ tokens)
274
+ """
275
+ messages = []
276
+ total_tokens = 0
277
+
278
+ # Add system message first if present
279
+ if system_unit:
280
+ messages.append(
281
+ system_unit.record.memory_record.to_openai_message()
282
+ )
283
+ total_tokens += system_unit.num_tokens
284
+
285
+ # Add sorted regular messages
286
+ for unit in context_units:
287
+ messages.append(unit.record.memory_record.to_openai_message())
288
+ total_tokens += unit.num_tokens
289
+
290
+ return messages, total_tokens
camel/models/__init__.py CHANGED
@@ -26,6 +26,7 @@ from .litellm_model import LiteLLMModel
26
26
  from .mistral_model import MistralModel
27
27
  from .model_factory import ModelFactory
28
28
  from .model_manager import ModelManager, ModelProcessingError
29
+ from .modelscope_model import ModelScopeModel
29
30
  from .moonshot_model import MoonshotModel
30
31
  from .nemotron_model import NemotronModel
31
32
  from .nvidia_model import NvidiaModel
@@ -77,6 +78,7 @@ __all__ = [
77
78
  'DeepSeekModel',
78
79
  'FishAudioModel',
79
80
  'InternLMModel',
81
+ 'ModelScopeModel',
80
82
  'MoonshotModel',
81
83
  'AIMLModel',
82
84
  'BaseAudioModel',
@@ -52,6 +52,10 @@ class AIMLModel(BaseModelBackend):
52
52
  use for the model. If not provided, :obj:`OpenAITokenCounter(
53
53
  ModelType.GPT_4O_MINI)` will be used.
54
54
  (default: :obj:`None`)
55
+ timeout (Optional[float], optional): The timeout value in seconds for
56
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
57
+ environment variable or default to 180 seconds.
58
+ (default: :obj:`None`)
55
59
  """
56
60
 
57
61
  @api_keys_required([("api_key", "AIML_API_KEY")])
@@ -62,6 +66,7 @@ class AIMLModel(BaseModelBackend):
62
66
  api_key: Optional[str] = None,
63
67
  url: Optional[str] = None,
64
68
  token_counter: Optional[BaseTokenCounter] = None,
69
+ timeout: Optional[float] = None,
65
70
  ) -> None:
66
71
  if model_config_dict is None:
67
72
  model_config_dict = AIMLConfig().as_dict()
@@ -70,17 +75,18 @@ class AIMLModel(BaseModelBackend):
70
75
  "AIML_API_BASE_URL",
71
76
  "https://api.aimlapi.com/v1",
72
77
  )
78
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
73
79
  super().__init__(
74
- model_type, model_config_dict, api_key, url, token_counter
80
+ model_type, model_config_dict, api_key, url, token_counter, timeout
75
81
  )
76
82
  self._client = OpenAI(
77
- timeout=180,
83
+ timeout=self._timeout,
78
84
  max_retries=3,
79
85
  api_key=self._api_key,
80
86
  base_url=self._url,
81
87
  )
82
88
  self._async_client = AsyncOpenAI(
83
- timeout=180,
89
+ timeout=self._timeout,
84
90
  max_retries=3,
85
91
  api_key=self._api_key,
86
92
  base_url=self._url,
@@ -45,6 +45,10 @@ class AnthropicModel(BaseModelBackend):
45
45
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
46
46
  use for the model. If not provided, :obj:`AnthropicTokenCounter`
47
47
  will be used. (default: :obj:`None`)
48
+ timeout (Optional[float], optional): The timeout value in seconds for
49
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
50
+ environment variable or default to 180 seconds.
51
+ (default: :obj:`None`)
48
52
  """
49
53
 
50
54
  @api_keys_required(
@@ -60,6 +64,7 @@ class AnthropicModel(BaseModelBackend):
60
64
  api_key: Optional[str] = None,
61
65
  url: Optional[str] = None,
62
66
  token_counter: Optional[BaseTokenCounter] = None,
67
+ timeout: Optional[float] = None,
63
68
  ) -> None:
64
69
  from openai import AsyncOpenAI, OpenAI
65
70
 
@@ -71,13 +76,16 @@ class AnthropicModel(BaseModelBackend):
71
76
  or os.environ.get("ANTHROPIC_API_BASE_URL")
72
77
  or "https://api.anthropic.com/v1/"
73
78
  )
79
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74
80
  super().__init__(
75
- model_type, model_config_dict, api_key, url, token_counter
81
+ model_type, model_config_dict, api_key, url, token_counter, timeout
82
+ )
83
+ self.client = OpenAI(
84
+ base_url=self._url, api_key=self._api_key, timeout=self._timeout
76
85
  )
77
- self.client = OpenAI(base_url=self._url, api_key=self._api_key)
78
86
 
79
87
  self.async_client = AsyncOpenAI(
80
- api_key=self._api_key, base_url=self._url
88
+ api_key=self._api_key, base_url=self._url, timeout=self._timeout
81
89
  )
82
90
 
83
91
  @property
@@ -49,6 +49,10 @@ class AzureOpenAIModel(BaseModelBackend):
49
49
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
50
50
  use for the model. If not provided, :obj:`OpenAITokenCounter`
51
51
  will be used. (default: :obj:`None`)
52
+ timeout (Optional[float], optional): The timeout value in seconds for
53
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
54
+ environment variable or default to 180 seconds.
55
+ (default: :obj:`None`)
52
56
 
53
57
  References:
54
58
  https://learn.microsoft.com/en-us/azure/ai-services/openai/
@@ -60,6 +64,7 @@ class AzureOpenAIModel(BaseModelBackend):
60
64
  model_config_dict: Optional[Dict[str, Any]] = None,
61
65
  api_key: Optional[str] = None,
62
66
  url: Optional[str] = None,
67
+ timeout: Optional[float] = None,
63
68
  token_counter: Optional[BaseTokenCounter] = None,
64
69
  api_version: Optional[str] = None,
65
70
  azure_deployment_name: Optional[str] = None,
@@ -68,8 +73,9 @@ class AzureOpenAIModel(BaseModelBackend):
68
73
  model_config_dict = ChatGPTConfig().as_dict()
69
74
  api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
70
75
  url = url or os.environ.get("AZURE_OPENAI_BASE_URL")
76
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
71
77
  super().__init__(
72
- model_type, model_config_dict, api_key, url, token_counter
78
+ model_type, model_config_dict, api_key, url, token_counter, timeout
73
79
  )
74
80
 
75
81
  self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
@@ -92,7 +98,7 @@ class AzureOpenAIModel(BaseModelBackend):
92
98
  azure_deployment=self.azure_deployment_name,
93
99
  api_version=self.api_version,
94
100
  api_key=self._api_key,
95
- timeout=180,
101
+ timeout=self._timeout,
96
102
  max_retries=3,
97
103
  )
98
104
 
@@ -101,7 +107,7 @@ class AzureOpenAIModel(BaseModelBackend):
101
107
  azure_deployment=self.azure_deployment_name,
102
108
  api_version=self.api_version,
103
109
  api_key=self._api_key,
104
- timeout=180,
110
+ timeout=self._timeout,
105
111
  max_retries=3,
106
112
  )
107
113
 
@@ -26,6 +26,7 @@ class BaseAudioModel(ABC):
26
26
  self,
27
27
  api_key: Optional[str] = None,
28
28
  url: Optional[str] = None,
29
+ timeout: Optional[float] = None,
29
30
  ) -> None:
30
31
  r"""Initialize an instance of BaseAudioModel.
31
32
 
@@ -36,9 +37,14 @@ class BaseAudioModel(ABC):
36
37
  url (Optional[str]): Base URL for the audio API. If not provided,
37
38
  will use a default URL or look for an environment variable
38
39
  specific to the implementation.
40
+ timeout (Optional[float], optional): The timeout value in seconds
41
+ for API calls. If not provided, will fall back to the
42
+ MODEL_TIMEOUT environment variable or default to 180 seconds.
43
+ (default: :obj:`None`)
39
44
  """
40
45
  self._api_key = api_key
41
46
  self._url = url
47
+ self._timeout = timeout
42
48
 
43
49
  @abstractmethod
44
50
  def text_to_speech(
@@ -69,6 +69,8 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
69
69
  token_counter (Optional[BaseTokenCounter], optional): Token
70
70
  counter to use for the model. If not provided,
71
71
  :obj:`OpenAITokenCounter` will be used. (default: :obj:`None`)
72
+ timeout (Optional[float], optional): The timeout value in seconds for
73
+ API calls. (default: :obj:`None`)
72
74
  """
73
75
 
74
76
  def __init__(
@@ -78,6 +80,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
78
80
  api_key: Optional[str] = None,
79
81
  url: Optional[str] = None,
80
82
  token_counter: Optional[BaseTokenCounter] = None,
83
+ timeout: Optional[float] = None,
81
84
  ) -> None:
82
85
  self.model_type: UnifiedModelType = UnifiedModelType(model_type)
83
86
  if model_config_dict is None:
@@ -86,6 +89,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
86
89
  self._api_key = api_key
87
90
  self._url = url
88
91
  self._token_counter = token_counter
92
+ self._timeout = timeout
89
93
  self.check_model_config()
90
94
 
91
95
  @property
@@ -60,6 +60,10 @@ class DeepSeekModel(BaseModelBackend):
60
60
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
61
61
  use for the model. If not provided, :obj:`OpenAITokenCounter`
62
62
  will be used. (default: :obj:`None`)
63
+ timeout (Optional[float], optional): The timeout value in seconds for
64
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
65
+ environment variable or default to 180 seconds.
66
+ (default: :obj:`None`)
63
67
 
64
68
  References:
65
69
  https://api-docs.deepseek.com/
@@ -77,6 +81,7 @@ class DeepSeekModel(BaseModelBackend):
77
81
  api_key: Optional[str] = None,
78
82
  url: Optional[str] = None,
79
83
  token_counter: Optional[BaseTokenCounter] = None,
84
+ timeout: Optional[float] = None,
80
85
  ) -> None:
81
86
  if model_config_dict is None:
82
87
  model_config_dict = DeepSeekConfig().as_dict()
@@ -85,19 +90,20 @@ class DeepSeekModel(BaseModelBackend):
85
90
  "DEEPSEEK_API_BASE_URL",
86
91
  "https://api.deepseek.com",
87
92
  )
93
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
88
94
  super().__init__(
89
- model_type, model_config_dict, api_key, url, token_counter
95
+ model_type, model_config_dict, api_key, url, token_counter, timeout
90
96
  )
91
97
 
92
98
  self._client = OpenAI(
93
- timeout=180,
99
+ timeout=self._timeout,
94
100
  max_retries=3,
95
101
  api_key=self._api_key,
96
102
  base_url=self._url,
97
103
  )
98
104
 
99
105
  self._async_client = AsyncOpenAI(
100
- timeout=180,
106
+ timeout=self._timeout,
101
107
  max_retries=3,
102
108
  api_key=self._api_key,
103
109
  base_url=self._url,
@@ -51,6 +51,10 @@ class GeminiModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required(
@@ -65,6 +69,7 @@ class GeminiModel(BaseModelBackend):
65
69
  api_key: Optional[str] = None,
66
70
  url: Optional[str] = None,
67
71
  token_counter: Optional[BaseTokenCounter] = None,
72
+ timeout: Optional[float] = None,
68
73
  ) -> None:
69
74
  if model_config_dict is None:
70
75
  model_config_dict = GeminiConfig().as_dict()
@@ -73,17 +78,18 @@ class GeminiModel(BaseModelBackend):
73
78
  "GEMINI_API_BASE_URL",
74
79
  "https://generativelanguage.googleapis.com/v1beta/openai/",
75
80
  )
81
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
76
82
  super().__init__(
77
- model_type, model_config_dict, api_key, url, token_counter
83
+ model_type, model_config_dict, api_key, url, token_counter, timeout
78
84
  )
79
85
  self._client = OpenAI(
80
- timeout=180,
86
+ timeout=self._timeout,
81
87
  max_retries=3,
82
88
  api_key=self._api_key,
83
89
  base_url=self._url,
84
90
  )
85
91
  self._async_client = AsyncOpenAI(
86
- timeout=180,
92
+ timeout=self._timeout,
87
93
  max_retries=3,
88
94
  api_key=self._api_key,
89
95
  base_url=self._url,
@@ -51,6 +51,10 @@ class GroqModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required([("api_key", "GROQ_API_KEY")])
@@ -61,6 +65,7 @@ class GroqModel(BaseModelBackend):
61
65
  api_key: Optional[str] = None,
62
66
  url: Optional[str] = None,
63
67
  token_counter: Optional[BaseTokenCounter] = None,
68
+ timeout: Optional[float] = None,
64
69
  ) -> None:
65
70
  if model_config_dict is None:
66
71
  model_config_dict = GroqConfig().as_dict()
@@ -68,17 +73,18 @@ class GroqModel(BaseModelBackend):
68
73
  url = url or os.environ.get(
69
74
  "GROQ_API_BASE_URL", "https://api.groq.com/openai/v1"
70
75
  )
76
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
71
77
  super().__init__(
72
- model_type, model_config_dict, api_key, url, token_counter
78
+ model_type, model_config_dict, api_key, url, token_counter, timeout
73
79
  )
74
80
  self._client = OpenAI(
75
- timeout=180,
81
+ timeout=self._timeout,
76
82
  max_retries=3,
77
83
  api_key=self._api_key,
78
84
  base_url=self._url,
79
85
  )
80
86
  self._async_client = AsyncOpenAI(
81
- timeout=180,
87
+ timeout=self._timeout,
82
88
  max_retries=3,
83
89
  api_key=self._api_key,
84
90
  base_url=self._url,
@@ -51,6 +51,10 @@ class InternLMModel(BaseModelBackend):
51
51
  use for the model. If not provided, :obj:`OpenAITokenCounter(
52
52
  ModelType.GPT_4O_MINI)` will be used.
53
53
  (default: :obj:`None`)
54
+ timeout (Optional[float], optional): The timeout value in seconds for
55
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
56
+ environment variable or default to 180 seconds.
57
+ (default: :obj:`None`)
54
58
  """
55
59
 
56
60
  @api_keys_required(
@@ -65,6 +69,7 @@ class InternLMModel(BaseModelBackend):
65
69
  api_key: Optional[str] = None,
66
70
  url: Optional[str] = None,
67
71
  token_counter: Optional[BaseTokenCounter] = None,
72
+ timeout: Optional[float] = None,
68
73
  ) -> None:
69
74
  if model_config_dict is None:
70
75
  model_config_dict = InternLMConfig().as_dict()
@@ -73,11 +78,12 @@ class InternLMModel(BaseModelBackend):
73
78
  "INTERNLM_API_BASE_URL",
74
79
  "https://internlm-chat.intern-ai.org.cn/puyu/api/v1",
75
80
  )
81
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
76
82
  super().__init__(
77
- model_type, model_config_dict, api_key, url, token_counter
83
+ model_type, model_config_dict, api_key, url, token_counter, timeout
78
84
  )
79
85
  self._client = OpenAI(
80
- timeout=180,
86
+ timeout=self._timeout,
81
87
  max_retries=3,
82
88
  api_key=self._api_key,
83
89
  base_url=self._url,