camel-ai 0.2.35__py3-none-any.whl → 0.2.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (59) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/__init__.py +2 -0
  3. camel/agents/repo_agent.py +579 -0
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/aiml_config.py +20 -19
  6. camel/configs/anthropic_config.py +25 -27
  7. camel/configs/cohere_config.py +11 -10
  8. camel/configs/deepseek_config.py +16 -16
  9. camel/configs/gemini_config.py +8 -8
  10. camel/configs/groq_config.py +18 -19
  11. camel/configs/internlm_config.py +8 -8
  12. camel/configs/litellm_config.py +26 -24
  13. camel/configs/mistral_config.py +8 -8
  14. camel/configs/moonshot_config.py +11 -11
  15. camel/configs/nvidia_config.py +13 -13
  16. camel/configs/ollama_config.py +14 -15
  17. camel/configs/openai_config.py +3 -3
  18. camel/configs/openrouter_config.py +106 -0
  19. camel/configs/qwen_config.py +8 -8
  20. camel/configs/reka_config.py +12 -11
  21. camel/configs/samba_config.py +14 -14
  22. camel/configs/sglang_config.py +15 -16
  23. camel/configs/siliconflow_config.py +18 -17
  24. camel/configs/togetherai_config.py +18 -19
  25. camel/configs/vllm_config.py +18 -19
  26. camel/configs/yi_config.py +7 -8
  27. camel/configs/zhipuai_config.py +8 -9
  28. camel/datasets/few_shot_generator.py +2 -5
  29. camel/datasets/static_dataset.py +25 -23
  30. camel/environments/models.py +3 -0
  31. camel/environments/single_step.py +212 -132
  32. camel/extractors/__init__.py +16 -1
  33. camel/memories/agent_memories.py +2 -1
  34. camel/memories/blocks/chat_history_block.py +2 -1
  35. camel/models/__init__.py +2 -0
  36. camel/models/gemini_model.py +36 -0
  37. camel/models/groq_model.py +6 -3
  38. camel/models/model_factory.py +3 -0
  39. camel/models/openrouter_model.py +204 -0
  40. camel/storages/__init__.py +2 -0
  41. camel/storages/key_value_storages/__init__.py +2 -0
  42. camel/storages/key_value_storages/mem0_cloud.py +224 -0
  43. camel/storages/vectordb_storages/qdrant.py +3 -3
  44. camel/toolkits/__init__.py +2 -0
  45. camel/toolkits/browser_toolkit.py +43 -0
  46. camel/toolkits/code_execution.py +2 -1
  47. camel/toolkits/mcp_toolkit.py +30 -1
  48. camel/toolkits/thinking_toolkit.py +74 -0
  49. camel/types/enums.py +27 -0
  50. camel/types/unified_model_type.py +5 -0
  51. camel/utils/chunker/code_chunker.py +9 -15
  52. camel/verifiers/__init__.py +1 -2
  53. camel/verifiers/base.py +159 -99
  54. camel/verifiers/models.py +0 -12
  55. camel/verifiers/python_verifier.py +316 -60
  56. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/METADATA +54 -5
  57. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/RECORD +59 -54
  58. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/WHEEL +0 -0
  59. {camel_ai-0.2.35.dist-info → camel_ai-0.2.37.dist-info}/licenses/LICENSE +0 -0
@@ -12,20 +12,15 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
-
16
- from abc import abstractmethod
17
- from typing import Any, Dict, Optional, Tuple, Union
15
+ import random
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
18
17
 
19
18
  from camel.datasets import BaseGenerator, DataPoint, StaticDataset
20
- from camel.extractors.base import BaseExtractor
21
19
  from camel.logger import get_logger
22
20
  from camel.verifiers.base import (
23
21
  BaseVerifier,
24
22
  VerificationResult,
25
23
  )
26
- from camel.verifiers.models import (
27
- VerifierInput,
28
- )
29
24
 
30
25
  from .models import Action, Observation, StepResult
31
26
 
@@ -33,18 +28,23 @@ logger = get_logger(__name__)
33
28
 
34
29
 
35
30
  class SingleStepEnv:
36
- r"""A single-step environment for reinforcement learning with LLMs.
31
+ r"""A lightweight environment for single-step RL with LLMs as policy.
32
+
33
+ This environment models a single interaction between an LLM-based agent
34
+ and a problem drawn from a dataset—such as a question-answering or
35
+ math problem—where the agent produces one response and receives feedback.
36
+
37
+ Core Flow:
38
+ - A question is sampled from a (possibly infinitely long) dataset.
39
+ - The LLM generates a single-step response (the action).
40
+ - The response is verified against the ground truth.
41
+ - A reward is computed based on correctness and optional custom logic.
37
42
 
38
43
  Key Features:
39
- - Samples questions from a dataset and asks the LLM
40
- - Extracts verifiable information from model responses.
41
- - Verifies extracted responses against ground truth.
42
- - Computes and assigns rewards based on correctness.
43
- - Supports async setup, teardown, and cleanup of resources.
44
-
45
- This class is intended as a foundation for RL experiments involving
46
- LLM-based policies, ensuring structured interactions between model
47
- actions and verification mechanisms.
44
+ - Batched evaluation with per-sample state tracking.
45
+ - Async setup and teardown for verifiers and related resources.
46
+ - Supports deterministic sampling via local RNG (optional seed).
47
+ - Extensible reward computation via subclassing.
48
48
  """
49
49
 
50
50
  PLACEHOLDER_OBS = Observation(
@@ -57,43 +57,47 @@ class SingleStepEnv:
57
57
  self,
58
58
  dataset: Union[StaticDataset, BaseGenerator],
59
59
  verifier: BaseVerifier,
60
- extractor: BaseExtractor,
61
60
  **kwargs,
62
61
  ) -> None:
63
- r"""Initialize the environment.
62
+ r"""Initialize the SingleStepEnv.
64
63
 
65
64
  Args:
66
- dataset: Dataset to sample questions from.
67
- verifier: Verifier to check responses.
68
- extractor: Extractor to process LLM responses.
69
- **kwargs: Additional environment parameters.
65
+ dataset (Union[StaticDataset, BaseGenerator]): Dataset to sample
66
+ problems from.
67
+ verifier (BaseVerifier): Verifier used to evaluate LLM responses
68
+ against ground-truth answers.
69
+ **kwargs: Optional metadata or configuration values.
70
+
71
+ Notes:
72
+ This class assumes all interactions are single-step: one question,
73
+ one LLM response, one reward.
70
74
  """
71
75
  self.dataset = dataset
72
76
  self.verifier = verifier
73
- self.extractor = extractor
74
77
  self._metadata = kwargs
75
78
 
76
79
  # State tracking
77
80
  self._is_setup: bool = False
78
- self._state: Optional[DataPoint] = None
79
- self._episode_ended: bool = False
81
+ self._states: List[DataPoint] = []
82
+ self._states_done: List[bool] = []
83
+ self.current_batch_size: int = 0
80
84
 
81
85
  async def setup(self) -> None:
82
- r"""Set up the environment by initializing the verifier and extractor.
86
+ r"""Set up the environment by initializing the verifier.
83
87
 
84
88
  This method ensures that the environment is ready for interaction.
85
- It sets up necessary components, including the verifier and extractor.
89
+ It sets up necessary components, including the verifier.
86
90
 
87
91
  Raises:
88
92
  Exception: If setup fails due to an internal error.
89
93
  """
90
94
 
91
95
  if self._is_setup:
96
+ logger.warning("Environment has already been set up")
92
97
  return
93
98
 
94
99
  try:
95
100
  await self.verifier.setup()
96
- await self.extractor.setup()
97
101
 
98
102
  self._is_setup = True
99
103
  logger.info('Environment setup completed successfully')
@@ -104,7 +108,7 @@ class SingleStepEnv:
104
108
  async def close(self) -> None:
105
109
  r"""Clean up and close all resources used by the environment.
106
110
 
107
- This method shuts down the verifier and extractor, resets the internal
111
+ This method shuts down the verifier, resets the internal
108
112
  state, and ensures that the environment is properly closed.
109
113
 
110
114
  Raises:
@@ -112,173 +116,249 @@ class SingleStepEnv:
112
116
  """
113
117
 
114
118
  if not self._is_setup:
119
+ logger.warning(
120
+ "Not closing environment - has not been set up yet."
121
+ )
115
122
  return
116
123
 
117
124
  try:
118
125
  self._is_setup = False
119
126
  await self.verifier.cleanup()
120
- await self.extractor.cleanup()
121
- self._state = None
122
- self._episode_ended = False
127
+ self._states = []
128
+ self._states_done = []
123
129
  logger.info('Environment closed successfully')
124
130
  except Exception as e:
125
131
  logger.error(f'Failed to close environment: {e}')
126
132
  raise
127
133
 
128
- async def reset(self) -> Observation:
129
- r"""Reset the environment and start a new episode.
134
+ async def reset(
135
+ self, batch_size: int = 1, seed: Optional[int] = None
136
+ ) -> Union[Observation, List[Observation]]:
137
+ r"""Resets the environment and starts a new episode.
130
138
 
131
- This method samples a new data point from the dataset and returns the
132
- initial observation.
139
+ This method samples a new batch of data points from the dataset and
140
+ returns the corresponding initial observations.
141
+
142
+ If a seed is provided, a local random number generator is initialized
143
+ for deterministic sampling. The global random state is not affected.
144
+
145
+ Args:
146
+ batch_size (int): Number of data points to sample.
147
+ (default: :obj:`1`)
148
+ seed (Optional[int]): Seed for deterministic sampling. If None,
149
+ sampling is non-deterministic. (default: :obj:`None`)
133
150
 
134
151
  Returns:
135
- Observation: The first observation of the new episode, including
136
- the question.
152
+ Observation or List[Observation]: Initial observation(s) for the
153
+ episode.
137
154
 
138
155
  Raises:
139
- Exception: If the environment is not set up properly.
156
+ RuntimeError: If called before all previous states are processed.
157
+ ValueError: If batch size exceeds dataset size.
158
+ TypeError: If the dataset is of an unsupported type.
140
159
  """
141
160
 
142
161
  if not self._is_setup:
162
+ logger.warning(
163
+ "reset() called on un-setup environment. Setting up..."
164
+ )
143
165
  await self.setup()
144
166
 
145
- self._episode_ended = False
167
+ if self._batch_started() and not self._batch_done():
168
+ logger.error(
169
+ "Reset called before all states were processed. "
170
+ "Call step on remaining states first."
171
+ )
172
+ raise RuntimeError(
173
+ "reset() called before all states in batch were processed."
174
+ )
146
175
 
147
- # Sample a datapoint
176
+ if seed is not None:
177
+ rng = random.Random(seed)
178
+ else:
179
+ rng = random.Random()
148
180
 
149
- self._state = self.dataset.sample()
181
+ if isinstance(self.dataset, StaticDataset):
182
+ dataset_len = len(self.dataset)
150
183
 
151
- observation = Observation(
152
- question=self._state.question, context={}, metadata={}
153
- )
184
+ if batch_size > dataset_len:
185
+ raise ValueError(
186
+ f"Batch size {batch_size} is too large for dataset "
187
+ f"of size {dataset_len}"
188
+ )
189
+
190
+ start_idx = rng.randint(0, dataset_len - batch_size)
191
+ idx_slice = slice(start_idx, start_idx + batch_size)
192
+ val = self.dataset[idx_slice]
193
+ self._states = [val] if isinstance(val, DataPoint) else val
154
194
 
155
- return observation
195
+ self.current_batch_size = len(self._states)
196
+ self._states_done = [False] * self.current_batch_size
156
197
 
157
- async def step(self, action: Action) -> StepResult:
158
- r"""Take a step in the environment using the given action.
198
+ observations = [
199
+ Observation(question=sample.question, context={}, metadata={})
200
+ for sample in self._states
201
+ ]
159
202
 
160
- This method processes the LLM response, extracts verifiable content,
161
- verifies correctness, computes rewards, and ends the episode.
203
+ return observations[0] if batch_size == 1 else observations
204
+
205
+ elif isinstance(self.dataset, BaseGenerator):
206
+ raise NotImplementedError(
207
+ "Reset not yet implemented for BaseGenerator datasets."
208
+ )
209
+
210
+ else:
211
+ raise TypeError(f"Unsupported dataset type: {type(self.dataset)}")
212
+
213
+ async def step(
214
+ self, action: Union[Action, List[Action]]
215
+ ) -> Union[StepResult, List[StepResult]]:
216
+ r"""Process actions for a subset of states and update their
217
+ finished status.
162
218
 
163
219
  Args:
164
- action (Action): The action containing the LLM response to
165
- evaluate.
220
+ action: Single action or list of actions, where each action
221
+ contains an index indicating which state it corresponds to.
222
+ The index must be a valid position in the internal _states list
223
+ that was populated during the reset() call.
224
+
166
225
 
167
226
  Returns:
168
- StepResult: Contains the next observation (placeholder), total
169
- reward, reward breakdown, completion flag, and additional
170
- information.
227
+ Union[StepResult, List[StepResult]]: StepResult or list of
228
+ StepResults for the processed states.
171
229
 
172
230
  Raises:
173
- RuntimeError: If the environment is not set up, the episode has
174
- ended, or there is no valid current observation.
231
+ RuntimeError: If environment isn't set up or episode has ended.
232
+ ValueError: If indices are invalid, duplicate, or correspond to
233
+ finished states.
175
234
  """
176
-
177
235
  if not self._is_setup:
178
236
  raise RuntimeError("Environment not set up. Call setup() first.")
179
- if self._episode_ended:
180
- raise RuntimeError("Episode has ended. Call reset() first.")
181
- if self._state is None:
237
+ if self._batch_done():
238
+ raise RuntimeError(
239
+ "Episodes have ended for batch. Call reset() first."
240
+ )
241
+ if not self._states:
182
242
  raise RuntimeError("No current observation. Call reset() first.")
183
243
 
184
- # extract verifiable part from llm response
185
- extraction_result = await self.extractor.extract(action.llm_response)
244
+ # Normalize everything to list
245
+ actions = [action] if isinstance(action, Action) else action
246
+ indices = [act.index for act in actions]
247
+
248
+ if len(set(indices)) != len(indices):
249
+ raise ValueError("Duplicate state indices in actions.")
250
+ for idx in indices:
251
+ if idx < 0 or idx >= len(self._states):
252
+ raise ValueError(f"Invalid state index {idx}.")
253
+ if self._states_done[idx]:
254
+ raise ValueError(f"State at index {idx} is already finished.")
186
255
 
187
- if not extraction_result:
188
- raise RuntimeError(f"Couldn't extract from {action.llm_response}")
256
+ num_actions = len(actions)
189
257
 
190
- # verify the extracted
191
- verification_result = await self.verifier.verify(
192
- VerifierInput(
193
- llm_response=extraction_result,
194
- ground_truth=self._state.final_answer,
258
+ if self.current_batch_size % num_actions != 0:
259
+ logger.warning(
260
+ f"Number of actions ({num_actions}) is not a divisor of "
261
+ f"total batch size ({self.current_batch_size})"
195
262
  )
196
- )
197
263
 
198
- # compute rewards
199
- total_reward, rewards_dict = await self._compute_reward(
200
- action, extraction_result, verification_result
264
+ proposed_solutions = [act.llm_response for act in actions]
265
+ ground_truths: List[str] = [
266
+ self._states[idx].final_answer for idx in indices
267
+ ]
268
+
269
+ verification_results = await self.verifier.verify_batch(
270
+ solutions=proposed_solutions,
271
+ ground_truths=ground_truths, # type: ignore [arg-type]
272
+ raise_on_error=True,
201
273
  )
202
274
 
203
- self._episode_ended = True
204
-
205
- return StepResult(
206
- observation=self.PLACEHOLDER_OBS,
207
- reward=total_reward,
208
- rewards_dict=rewards_dict,
209
- done=True,
210
- info={
211
- "extraction_result": extraction_result,
212
- "verification_result": verification_result,
213
- "state": self._state,
214
- },
275
+ total_rewards, rewards_dicts = await self._compute_reward_batch(
276
+ proposed_solutions, verification_results
215
277
  )
216
278
 
217
- async def _compute_reward(
218
- self,
219
- action: Action,
220
- extraction_result: str,
221
- verification_result: VerificationResult,
222
- ) -> Tuple[float, Dict[str, float]]:
223
- r"""Compute reward scores based on verification results.
279
+ step_results = []
280
+ # TODO: batch this
281
+ for i, action in enumerate(actions):
282
+ idx = action.index
283
+ step_result = StepResult(
284
+ observation=self.PLACEHOLDER_OBS,
285
+ reward=total_rewards[i],
286
+ rewards_dict=rewards_dicts[i],
287
+ done=True,
288
+ info={
289
+ "proposed_solution": proposed_solutions[i],
290
+ "verification_result": verification_results[i],
291
+ "state": self._states[idx],
292
+ },
293
+ )
294
+ step_results.append(step_result)
295
+ self._states_done[idx] = True
224
296
 
225
- This method calculates the reward based on correctness and any
226
- additional custom reward components.
297
+ return step_results[0] if len(step_results) == 1 else step_results
298
+
299
+ async def _compute_reward_batch(
300
+ self,
301
+ proposed_solutions: List[str],
302
+ verification_results: List[VerificationResult],
303
+ ) -> Tuple[List[float], List[Dict[str, float]]]:
304
+ r"""Compute rewards for a batch of proposed solutions based on
305
+ verification results.
227
306
 
228
307
  Args:
229
- action (Action): The action taken in the environment.
230
- extraction_result (str): The extracted verifiable content from the
231
- LLM response.
232
- verification_result (VerificationResult): The result of verifying
233
- the extracted response.
308
+ proposed_solutions (List[str]): List of LLM-generated responses to
309
+ evaluate.
310
+ verification_results (List[VerificationResult]): List of
311
+ verification outcomes for each solution.
234
312
 
235
313
  Returns:
236
- Tuple[float, Dict[str, float]]: A tuple containing:
237
- - Total reward (float)
238
- - Dictionary of individual reward components.
239
-
240
- Raises:
241
- Exception: If an error occurs while computing rewards.
314
+ Tuple containing:
315
+ - List of total rewards for each solution.
316
+ - List of reward component dictionaries for each solution.
242
317
  """
318
+ total_rewards = []
319
+ rewards_dicts = []
243
320
 
244
- rewards: Dict[str, float] = {}
321
+ for solution, verification_result in zip(
322
+ proposed_solutions, verification_results
323
+ ):
324
+ rewards: Dict[str, float] = {}
245
325
 
246
- rewards["correctness"] = (
247
- self.ACCURACY_REWARD if verification_result.status else 0.0
248
- )
326
+ rewards["correctness"] = (
327
+ self.ACCURACY_REWARD if verification_result.status else 0.0
328
+ )
249
329
 
250
- further_rewards = await self._compute_custom_reward(
251
- action, extraction_result, verification_result
252
- )
330
+ further_rewards = await self._compute_custom_reward(
331
+ solution, verification_result
332
+ )
333
+ rewards = {**rewards, **further_rewards}
253
334
 
254
- rewards = rewards | further_rewards
335
+ total_reward = sum(rewards.values())
336
+ total_rewards.append(total_reward)
337
+ rewards_dicts.append(rewards)
255
338
 
256
- return sum(rewards.values()), rewards
339
+ return total_rewards, rewards_dicts
257
340
 
258
- @abstractmethod
259
341
  async def _compute_custom_reward(
260
- self,
261
- action: Action,
262
- extraction_result: str,
263
- verification_result: VerificationResult,
342
+ self, proposed_solution: str, verification_result: VerificationResult
264
343
  ) -> Dict[str, float]:
265
- r"""Compute additional custom reward components.
344
+ r"""Compute additional custom reward components for a single solution.
266
345
 
267
- This method should be implemented by subclasses to define
268
- domain-specific reward calculations.
346
+ To be overridden by subclasses for domain-specific rewards.
269
347
 
270
348
  Args:
271
- action (Action): The action taken in the environment.
272
- extraction_result (str): The extracted verifiable content from the
273
- LLM response.
274
- verification_result (VerificationResult): The result of verifying
275
- the extracted response.
349
+ proposed_solution (str): The LLM-generated response.
350
+ verification_result (VerificationResult): The verification outcome.
276
351
 
277
352
  Returns:
278
- Dict[str, float]: A dictionary mapping custom reward categories
279
- to their values.
353
+ Dict[str, float]: Dictionary of custom reward components.
280
354
  """
281
- pass
355
+ return {}
356
+
357
+ def _batch_done(self) -> bool:
358
+ return all(self._states_done)
359
+
360
+ def _batch_started(self) -> bool:
361
+ return any(self._states_done)
282
362
 
283
363
  @property
284
364
  def metadata(self) -> Dict[str, Any]:
@@ -12,5 +12,20 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from .base import BaseExtractor, BaseExtractorStrategy
15
+ from .python_strategies import (
16
+ BoxedStrategy,
17
+ PythonDictStrategy,
18
+ PythonListStrategy,
19
+ PythonSetStrategy,
20
+ PythonTupleStrategy,
21
+ )
15
22
 
16
- __all__ = ["BaseExtractor", "BaseExtractorStrategy"]
23
+ __all__ = [
24
+ "BaseExtractor",
25
+ "BaseExtractorStrategy",
26
+ "BoxedStrategy",
27
+ "PythonListStrategy",
28
+ "PythonDictStrategy",
29
+ "PythonSetStrategy",
30
+ "PythonTupleStrategy",
31
+ ]
@@ -18,7 +18,8 @@ from typing import List, Optional
18
18
  from camel.memories.base import AgentMemory, BaseContextCreator
19
19
  from camel.memories.blocks import ChatHistoryBlock, VectorDBBlock
20
20
  from camel.memories.records import ContextRecord, MemoryRecord
21
- from camel.storages import BaseKeyValueStorage, BaseVectorStorage
21
+ from camel.storages.key_value_storages.base import BaseKeyValueStorage
22
+ from camel.storages.vectordb_storages.base import BaseVectorStorage
22
23
  from camel.types import OpenAIBackendRole
23
24
 
24
25
 
@@ -16,7 +16,8 @@ from typing import List, Optional
16
16
 
17
17
  from camel.memories.base import MemoryBlock
18
18
  from camel.memories.records import ContextRecord, MemoryRecord
19
- from camel.storages import BaseKeyValueStorage, InMemoryKeyValueStorage
19
+ from camel.storages.key_value_storages.base import BaseKeyValueStorage
20
+ from camel.storages.key_value_storages.in_memory import InMemoryKeyValueStorage
20
21
  from camel.types import OpenAIBackendRole
21
22
 
22
23
 
camel/models/__init__.py CHANGED
@@ -33,6 +33,7 @@ from .ollama_model import OllamaModel
33
33
  from .openai_audio_models import OpenAIAudioModels
34
34
  from .openai_compatible_model import OpenAICompatibleModel
35
35
  from .openai_model import OpenAIModel
36
+ from .openrouter_model import OpenRouterModel
36
37
  from .qwen_model import QwenModel
37
38
  from .reka_model import RekaModel
38
39
  from .samba_model import SambaModel
@@ -48,6 +49,7 @@ from .zhipuai_model import ZhipuAIModel
48
49
  __all__ = [
49
50
  'BaseModelBackend',
50
51
  'OpenAIModel',
52
+ 'OpenRouterModel',
51
53
  'AzureOpenAIModel',
52
54
  'AnthropicModel',
53
55
  'MistralModel',
@@ -172,6 +172,24 @@ class GeminiModel(BaseModelBackend):
172
172
  for tool in tools:
173
173
  function_dict = tool.get('function', {})
174
174
  function_dict.pop("strict", None)
175
+
176
+ # Process parameters to remove anyOf
177
+ if 'parameters' in function_dict:
178
+ params = function_dict['parameters']
179
+ if 'properties' in params:
180
+ for prop_name, prop_value in params[
181
+ 'properties'
182
+ ].items():
183
+ if 'anyOf' in prop_value:
184
+ # Replace anyOf with the first type in the list
185
+ first_type = prop_value['anyOf'][0]
186
+ params['properties'][prop_name] = first_type
187
+ # Preserve description if it exists
188
+ if 'description' in prop_value:
189
+ params['properties'][prop_name][
190
+ 'description'
191
+ ] = prop_value['description']
192
+
175
193
  request_config["tools"] = tools
176
194
 
177
195
  return self._client.chat.completions.create(
@@ -191,6 +209,24 @@ class GeminiModel(BaseModelBackend):
191
209
  for tool in tools:
192
210
  function_dict = tool.get('function', {})
193
211
  function_dict.pop("strict", None)
212
+
213
+ # Process parameters to remove anyOf
214
+ if 'parameters' in function_dict:
215
+ params = function_dict['parameters']
216
+ if 'properties' in params:
217
+ for prop_name, prop_value in params[
218
+ 'properties'
219
+ ].items():
220
+ if 'anyOf' in prop_value:
221
+ # Replace anyOf with the first type in the list
222
+ first_type = prop_value['anyOf'][0]
223
+ params['properties'][prop_name] = first_type
224
+ # Preserve description if it exists
225
+ if 'description' in prop_value:
226
+ params['properties'][prop_name][
227
+ 'description'
228
+ ] = prop_value['description']
229
+
194
230
  request_config["tools"] = tools
195
231
 
196
232
  return await self._async_client.chat.completions.create(
@@ -195,7 +195,10 @@ class GroqModel(BaseModelBackend):
195
195
 
196
196
  @property
197
197
  def stream(self) -> bool:
198
- r"""Returns whether the model supports streaming. But Groq API does
199
- not support streaming.
198
+ r"""Returns whether the model is in stream mode, which sends partial
199
+ results each time.
200
+
201
+ Returns:
202
+ bool: Whether the model is in stream mode.
200
203
  """
201
- return False
204
+ return self.model_config_dict.get("stream", False)
@@ -29,6 +29,7 @@ from camel.models.nvidia_model import NvidiaModel
29
29
  from camel.models.ollama_model import OllamaModel
30
30
  from camel.models.openai_compatible_model import OpenAICompatibleModel
31
31
  from camel.models.openai_model import OpenAIModel
32
+ from camel.models.openrouter_model import OpenRouterModel
32
33
  from camel.models.qwen_model import QwenModel
33
34
  from camel.models.reka_model import RekaModel
34
35
  from camel.models.samba_model import SambaModel
@@ -119,6 +120,8 @@ class ModelFactory:
119
120
  model_class = AnthropicModel
120
121
  elif model_platform.is_groq and model_type.is_groq:
121
122
  model_class = GroqModel
123
+ elif model_platform.is_openrouter and model_type.is_openrouter:
124
+ model_class = OpenRouterModel
122
125
  elif model_platform.is_zhipuai and model_type.is_zhipuai:
123
126
  model_class = ZhipuAIModel
124
127
  elif model_platform.is_gemini and model_type.is_gemini: