camel-ai 0.2.34__py3-none-any.whl → 0.2.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (47) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +1 -1
  3. camel/agents/_utils.py +4 -4
  4. camel/agents/chat_agent.py +174 -29
  5. camel/configs/__init__.py +3 -0
  6. camel/configs/openai_config.py +20 -16
  7. camel/configs/openrouter_config.py +106 -0
  8. camel/datasets/base_generator.py +188 -27
  9. camel/datasets/few_shot_generator.py +2 -5
  10. camel/environments/single_step.py +1 -7
  11. camel/memories/agent_memories.py +49 -2
  12. camel/memories/base.py +23 -1
  13. camel/memories/blocks/chat_history_block.py +2 -1
  14. camel/memories/records.py +5 -0
  15. camel/models/__init__.py +2 -0
  16. camel/models/gemini_model.py +36 -0
  17. camel/models/groq_model.py +6 -3
  18. camel/models/model_factory.py +3 -0
  19. camel/models/openrouter_model.py +204 -0
  20. camel/models/stub_model.py +25 -0
  21. camel/retrievers/vector_retriever.py +12 -7
  22. camel/storages/__init__.py +2 -0
  23. camel/storages/key_value_storages/__init__.py +4 -1
  24. camel/storages/key_value_storages/json.py +3 -7
  25. camel/storages/key_value_storages/mem0_cloud.py +224 -0
  26. camel/storages/vectordb_storages/base.py +5 -1
  27. camel/storages/vectordb_storages/qdrant.py +3 -3
  28. camel/toolkits/__init__.py +2 -1
  29. camel/toolkits/browser_toolkit.py +43 -0
  30. camel/toolkits/code_execution.py +2 -1
  31. camel/toolkits/mcp_toolkit.py +30 -1
  32. camel/toolkits/memory_toolkit.py +129 -0
  33. camel/types/enums.py +24 -0
  34. camel/types/unified_model_type.py +5 -0
  35. camel/utils/chunker/__init__.py +22 -0
  36. camel/utils/chunker/base.py +24 -0
  37. camel/utils/chunker/code_chunker.py +193 -0
  38. camel/utils/chunker/uio_chunker.py +66 -0
  39. camel/utils/token_counting.py +133 -0
  40. camel/verifiers/__init__.py +1 -2
  41. camel/verifiers/base.py +133 -96
  42. camel/verifiers/models.py +0 -12
  43. camel/verifiers/python_verifier.py +25 -14
  44. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/METADATA +3 -1
  45. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/RECORD +47 -39
  46. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/WHEEL +0 -0
  47. {camel_ai-0.2.34.dist-info → camel_ai-0.2.36.dist-info}/licenses/LICENSE +0 -0
@@ -13,10 +13,14 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import abc
16
+ import asyncio
16
17
  import json
17
18
  import random
18
19
  from pathlib import Path
19
- from typing import List, Union
20
+ from typing import Any, Dict, List, Union
21
+
22
+ from pydantic import ValidationError
23
+ from torch.utils.data import IterableDataset
20
24
 
21
25
  from camel.logger import get_logger
22
26
 
@@ -25,23 +29,47 @@ from .models import DataPoint
25
29
  logger = get_logger(__name__)
26
30
 
27
31
 
28
- class BaseGenerator(abc.ABC):
32
+ class BaseGenerator(abc.ABC, IterableDataset):
29
33
  r"""Abstract base class for data generators.
30
34
 
31
35
  This class defines the interface for generating synthetic datapoints.
32
36
  Concrete implementations should provide specific generation strategies.
33
37
  """
34
38
 
35
- def __init__(self, seed: int = 42, **kwargs):
39
+ def __init__(
40
+ self,
41
+ seed: int = 42,
42
+ cache: Union[str, Path, None] = None,
43
+ data_path: Union[str, Path, None] = None,
44
+ **kwargs,
45
+ ):
36
46
  r"""Initialize the base generator.
37
47
 
38
48
  Args:
39
49
  seed (int): Random seed for reproducibility. (default: :obj:`42`)
50
+ cache (Union[str, Path, None]): Optional path to save generated
51
+ datapoints during iteration. If None is provided, datapoints
52
+ will be discarded every 100 generations.
53
+ data_path (Union[str, Path, None]): Optional path to a JSONL file
54
+ to initialize the dataset from.
40
55
  **kwargs: Additional generator parameters.
41
56
  """
42
57
  self._rng = random.Random(seed)
58
+ self.cache = Path(cache) if cache else None
43
59
 
44
60
  self._data: List[DataPoint] = []
61
+ self._batch_to_save: List[DataPoint] = []
62
+
63
+ if data_path:
64
+ file_path = Path(data_path)
65
+ raw_data = self._init_from_jsonl(file_path)
66
+ try:
67
+ data_points = [DataPoint(**item) for item in raw_data]
68
+ self._data.extend(data_points)
69
+ except ValidationError as e:
70
+ raise ValueError(
71
+ f"Failed to create DataPoint from JSONL data: {e}"
72
+ )
45
73
 
46
74
  @abc.abstractmethod
47
75
  async def generate_new(self, n: int, **kwargs) -> List[DataPoint]:
@@ -56,34 +84,112 @@ class BaseGenerator(abc.ABC):
56
84
  """
57
85
  pass
58
86
 
59
- def __len__(self) -> int:
60
- r"""Return the size of the generated dataset."""
61
- return len(self._data)
87
+ def __aiter__(self):
88
+ r"""Async iterator that yields datapoints dynamically.
62
89
 
63
- def __getitem__(self, idx: int) -> DataPoint:
64
- r"""Retrieve a datapoint by index.
90
+ If a `data_path` was provided during initialization, those datapoints
91
+ are yielded first. When self._data is empty, 20 new datapoints
92
+ are generated. Every 100 yields, the batch is appended to the
93
+ JSONL file or discarded if `cache` is None.
65
94
 
66
- Args:
67
- idx (int): Index of the datapoint.
95
+ Yields:
96
+ DataPoint: A single datapoint.
97
+ """
68
98
 
69
- Returns:
70
- DataPoint: The datapoint corresponding to the given index.
99
+ async def generator():
100
+ while True:
101
+ if not self._data:
102
+ new_datapoints = await self.generate_new(20)
103
+ self._data.extend(new_datapoints)
104
+ datapoint = self._data.pop(0)
105
+ yield datapoint
106
+ self._batch_to_save.append(datapoint)
107
+ if len(self._batch_to_save) == 100:
108
+ if self.cache:
109
+ with self.cache.open("a", encoding="utf-8") as f:
110
+ for dp in self._batch_to_save:
111
+ json.dump(dp.to_dict(), f, ensure_ascii=False)
112
+ f.write("\n")
113
+ self._batch_to_save = []
71
114
 
72
- Raises:
73
- IndexError: If idx is out of bounds.
115
+ return generator()
116
+
117
+ def __iter__(self):
118
+ r"""Synchronous iterator for PyTorch IterableDataset compatibility.
119
+
120
+ If a `data_path` was provided during initialization, those datapoints
121
+ are yielded first. When self._data is empty, 20 new datapoints
122
+ are generated. Every 100 yields, the batch is appended to the
123
+ JSONL file or discarded if `cache` is None.
124
+
125
+ Yields:
126
+ DataPoint: A single datapoint.
74
127
  """
75
- if idx < 0 or idx >= len(self._data):
76
- raise IndexError(
77
- f"Index {idx} out of bounds for dataset of "
78
- f"size {len(self._data)}"
79
- )
80
- return self._data[idx]
128
+ try:
129
+ if asyncio.get_event_loop().is_running():
130
+ raise RuntimeError(
131
+ "Cannot use synchronous iteration (__iter__) in an async "
132
+ "context; use 'async for' with __aiter__ instead"
133
+ )
134
+ except RuntimeError as e:
135
+ if "no running event loop" not in str(e):
136
+ raise
137
+
138
+ while True:
139
+ if not self._data:
140
+ new_datapoints = asyncio.run(self.generate_new(20))
141
+ self._data.extend(new_datapoints)
142
+ datapoint = self._data.pop(0)
143
+ yield datapoint
144
+ self._batch_to_save.append(datapoint)
145
+ if len(self._batch_to_save) == 100:
146
+ if self.cache:
147
+ with self.cache.open("a", encoding="utf-8") as f:
148
+ for dp in self._batch_to_save:
149
+ json.dump(dp.to_dict(), f, ensure_ascii=False)
150
+ f.write("\n")
151
+ self._batch_to_save = []
81
152
 
82
153
  def sample(self) -> DataPoint:
83
- if len(self._data) == 0:
84
- raise RuntimeError("Dataset is empty, cannot sample.")
85
- idx = self._rng.randint(0, len(self._data) - 1)
86
- return self[idx]
154
+ r"""Returns the next datapoint from the current dataset
155
+ synchronously.
156
+
157
+ Raises:
158
+ RuntimeError: If called in an async context.
159
+
160
+ Returns:
161
+ DataPoint: The next DataPoint.
162
+
163
+ Note:
164
+ This method is intended for synchronous contexts.
165
+ Use 'async_sample' in asynchronous contexts to
166
+ avoid blocking or runtime errors.
167
+ """
168
+ try:
169
+ if asyncio.get_event_loop().is_running():
170
+ raise RuntimeError(
171
+ "Cannot use synchronous sampling (sample) "
172
+ "in an async context; use async_sample instead"
173
+ )
174
+ except RuntimeError as e:
175
+ if "no running event loop" not in str(e):
176
+ raise
177
+
178
+ return next(iter(self))
179
+
180
+ async def async_sample(self) -> DataPoint:
181
+ r"""Returns the next datapoint from the current dataset asynchronously.
182
+
183
+ Returns:
184
+ DataPoint: The next datapoint.
185
+
186
+ Note:
187
+ This method is intended for asynchronous contexts. Use 'sample'
188
+ in synchronous contexts.
189
+ """
190
+
191
+ async_iter = self.__aiter__()
192
+ return await async_iter.__anext__()
87
193
 
88
194
  def save_to_jsonl(self, file_path: Union[str, Path]) -> None:
89
195
  r"""Saves the generated datapoints to a JSONL (JSON Lines) file.
@@ -99,7 +205,7 @@ class BaseGenerator(abc.ABC):
99
205
 
100
206
  Notes:
101
207
  - Uses `self._data`, which contains the generated datapoints.
102
- - Overwrites the file if it already exists.
208
+ - Appends to the file if it already exists.
103
209
  - Ensures compatibility with large datasets by using JSONL format.
104
210
  """
105
211
  if not self._data:
@@ -108,11 +214,66 @@ class BaseGenerator(abc.ABC):
108
214
  file_path = Path(file_path)
109
215
 
110
216
  try:
111
- with file_path.open("w", encoding="utf-8") as f:
217
+ with file_path.open("a", encoding="utf-8") as f:
112
218
  for datapoint in self._data:
113
219
  json.dump(datapoint.to_dict(), f, ensure_ascii=False)
114
- f.write("\n") # Ensure each entry is on a new line
220
+ f.write("\n")
115
221
  logger.info(f"Dataset saved successfully to {file_path}")
116
222
  except IOError as e:
117
223
  logger.error(f"Error writing to file {file_path}: {e}")
118
224
  raise
225
+
226
+ def flush(self, file_path: Union[str, Path]) -> None:
227
+ r"""Flush the current data to a JSONL file and clear the data.
228
+
229
+ Args:
230
+ file_path (Union[str, Path]): Path to save the JSONL file.
231
+
232
+ Notes:
233
+ - Uses `save_to_jsonl` to save `self._data`.
234
+ """
235
+
236
+ self.save_to_jsonl(file_path)
237
+ self._data = []
238
+ logger.info(f"Data flushed to {file_path} and cleared from the memory")
239
+
240
+ def _init_from_jsonl(self, file_path: Path) -> List[Dict[str, Any]]:
241
+ r"""Load and parse a dataset from a JSONL file.
242
+
243
+ Args:
244
+ file_path (Path): Path to the JSONL file.
245
+
246
+ Returns:
247
+ List[Dict[str, Any]]: A list of datapoint dictionaries.
248
+
249
+ Raises:
250
+ FileNotFoundError: If the specified JSONL file does not exist.
251
+ ValueError: If a line contains invalid JSON or is not a dictionary.
252
+ """
253
+ if not file_path.exists():
254
+ raise FileNotFoundError(f"JSONL file not found: {file_path}")
255
+
256
+ raw_data = []
257
+ logger.debug(f"Loading JSONL from {file_path}")
258
+ with file_path.open('r', encoding='utf-8') as f:
259
+ for line_number, line in enumerate(f, start=1):
260
+ line = line.strip()
261
+ if not line:
262
+ continue # Skip blank lines
263
+ try:
264
+ record = json.loads(line)
265
+ except json.JSONDecodeError as e:
266
+ raise ValueError(
267
+ f"Invalid JSON on line {line_number} "
268
+ f"in file {file_path}: {e}"
269
+ )
270
+ if not isinstance(record, dict):
271
+ raise ValueError(
272
+ f"Expected a dictionary at line {line_number}, "
273
+ f"got {type(record).__name__}"
274
+ )
275
+ raw_data.append(record)
276
+ logger.info(
277
+ f"Successfully loaded {len(raw_data)} items from {file_path}"
278
+ )
279
+ return raw_data
@@ -22,7 +22,6 @@ from camel.agents import ChatAgent
22
22
  from camel.logger import get_logger
23
23
  from camel.models.base_model import BaseModelBackend
24
24
  from camel.verifiers import BaseVerifier
25
- from camel.verifiers.models import VerifierInput
26
25
 
27
26
  from .base_generator import BaseGenerator
28
27
  from .models import DataPoint
@@ -203,10 +202,8 @@ class FewShotGenerator(BaseGenerator):
203
202
 
204
203
  try:
205
204
  verifier_response = await self.verifier.verify(
206
- VerifierInput(
207
- llm_response=rationale,
208
- ground_truth=None,
209
- )
205
+ solution=rationale,
206
+ ground_truth=None,
210
207
  )
211
208
  if not verifier_response or not verifier_response.result:
212
209
  raise ValueError(
@@ -23,9 +23,6 @@ from camel.verifiers.base import (
23
23
  BaseVerifier,
24
24
  VerificationResult,
25
25
  )
26
- from camel.verifiers.models import (
27
- VerifierInput,
28
- )
29
26
 
30
27
  from .models import Action, Observation, StepResult
31
28
 
@@ -189,10 +186,7 @@ class SingleStepEnv:
189
186
 
190
187
  # verify the extracted
191
188
  verification_result = await self.verifier.verify(
192
- VerifierInput(
193
- llm_response=extraction_result,
194
- ground_truth=self._state.final_answer,
195
- )
189
+ solution=extraction_result, ground_truth=self._state.final_answer
196
190
  )
197
191
 
198
192
  # compute rewards
@@ -18,7 +18,8 @@ from typing import List, Optional
18
18
  from camel.memories.base import AgentMemory, BaseContextCreator
19
19
  from camel.memories.blocks import ChatHistoryBlock, VectorDBBlock
20
20
  from camel.memories.records import ContextRecord, MemoryRecord
21
- from camel.storages import BaseKeyValueStorage, BaseVectorStorage
21
+ from camel.storages.key_value_storages.base import BaseKeyValueStorage
22
+ from camel.storages.vectordb_storages.base import BaseVectorStorage
22
23
  from camel.types import OpenAIBackendRole
23
24
 
24
25
 
@@ -33,6 +34,8 @@ class ChatHistoryMemory(AgentMemory):
33
34
  window_size (int, optional): The number of recent chat messages to
34
35
  retrieve. If not provided, the entire chat history will be
35
36
  retrieved. (default: :obj:`None`)
37
+ agent_id (str, optional): The ID of the agent associated with the chat
38
+ history.
36
39
  """
37
40
 
38
41
  def __init__(
@@ -40,6 +43,7 @@ class ChatHistoryMemory(AgentMemory):
40
43
  context_creator: BaseContextCreator,
41
44
  storage: Optional[BaseKeyValueStorage] = None,
42
45
  window_size: Optional[int] = None,
46
+ agent_id: Optional[str] = None,
43
47
  ) -> None:
44
48
  if window_size is not None and not isinstance(window_size, int):
45
49
  raise TypeError("`window_size` must be an integer or None.")
@@ -48,6 +52,15 @@ class ChatHistoryMemory(AgentMemory):
48
52
  self._context_creator = context_creator
49
53
  self._window_size = window_size
50
54
  self._chat_history_block = ChatHistoryBlock(storage=storage)
55
+ self._agent_id = agent_id
56
+
57
+ @property
58
+ def agent_id(self) -> Optional[str]:
59
+ return self._agent_id
60
+
61
+ @agent_id.setter
62
+ def agent_id(self, val: Optional[str]) -> None:
63
+ self._agent_id = val
51
64
 
52
65
  def retrieve(self) -> List[ContextRecord]:
53
66
  records = self._chat_history_block.retrieve(self._window_size)
@@ -63,6 +76,10 @@ class ChatHistoryMemory(AgentMemory):
63
76
  return records
64
77
 
65
78
  def write_records(self, records: List[MemoryRecord]) -> None:
79
+ for record in records:
80
+ # assign the agent_id to the record
81
+ if record.agent_id == "" and self.agent_id is not None:
82
+ record.agent_id = self.agent_id
66
83
  self._chat_history_block.write_records(records)
67
84
 
68
85
  def get_context_creator(self) -> BaseContextCreator:
@@ -84,6 +101,8 @@ class VectorDBMemory(AgentMemory):
84
101
  (default: :obj:`None`)
85
102
  retrieve_limit (int, optional): The maximum number of messages
86
103
  to be added into the context. (default: :obj:`3`)
104
+ agent_id (str, optional): The ID of the agent associated with
105
+ the messages stored in the vector database.
87
106
  """
88
107
 
89
108
  def __init__(
@@ -91,13 +110,23 @@ class VectorDBMemory(AgentMemory):
91
110
  context_creator: BaseContextCreator,
92
111
  storage: Optional[BaseVectorStorage] = None,
93
112
  retrieve_limit: int = 3,
113
+ agent_id: Optional[str] = None,
94
114
  ) -> None:
95
115
  self._context_creator = context_creator
96
116
  self._retrieve_limit = retrieve_limit
97
117
  self._vectordb_block = VectorDBBlock(storage=storage)
118
+ self._agent_id = agent_id
98
119
 
99
120
  self._current_topic: str = ""
100
121
 
122
+ @property
123
+ def agent_id(self) -> Optional[str]:
124
+ return self._agent_id
125
+
126
+ @agent_id.setter
127
+ def agent_id(self, val: Optional[str]) -> None:
128
+ self._agent_id = val
129
+
101
130
  def retrieve(self) -> List[ContextRecord]:
102
131
  return self._vectordb_block.retrieve(
103
132
  self._current_topic,
@@ -109,6 +138,11 @@ class VectorDBMemory(AgentMemory):
109
138
  for record in records:
110
139
  if record.role_at_backend == OpenAIBackendRole.USER:
111
140
  self._current_topic = record.message.content
141
+
142
+ # assign the agent_id to the record
143
+ if record.agent_id == "" and self.agent_id is not None:
144
+ record.agent_id = self.agent_id
145
+
112
146
  self._vectordb_block.write_records(records)
113
147
 
114
148
  def get_context_creator(self) -> BaseContextCreator:
@@ -133,6 +167,8 @@ class LongtermAgentMemory(AgentMemory):
133
167
  (default: :obj:`None`)
134
168
  retrieve_limit (int, optional): The maximum number of messages
135
169
  to be added into the context. (default: :obj:`3`)
170
+ agent_id (str, optional): The ID of the agent associated with the chat
171
+ history and the messages stored in the vector database.
136
172
  """
137
173
 
138
174
  def __init__(
@@ -141,12 +177,22 @@ class LongtermAgentMemory(AgentMemory):
141
177
  chat_history_block: Optional[ChatHistoryBlock] = None,
142
178
  vector_db_block: Optional[VectorDBBlock] = None,
143
179
  retrieve_limit: int = 3,
180
+ agent_id: Optional[str] = None,
144
181
  ) -> None:
145
182
  self.chat_history_block = chat_history_block or ChatHistoryBlock()
146
183
  self.vector_db_block = vector_db_block or VectorDBBlock()
147
184
  self.retrieve_limit = retrieve_limit
148
185
  self._context_creator = context_creator
149
186
  self._current_topic: str = ""
187
+ self._agent_id = agent_id
188
+
189
+ @property
190
+ def agent_id(self) -> Optional[str]:
191
+ return self._agent_id
192
+
193
+ @agent_id.setter
194
+ def agent_id(self, val: Optional[str]) -> None:
195
+ self._agent_id = val
150
196
 
151
197
  def get_context_creator(self) -> BaseContextCreator:
152
198
  r"""Returns the context creator used by the memory.
@@ -166,7 +212,8 @@ class LongtermAgentMemory(AgentMemory):
166
212
  """
167
213
  chat_history = self.chat_history_block.retrieve()
168
214
  vector_db_retrieve = self.vector_db_block.retrieve(
169
- self._current_topic, self.retrieve_limit
215
+ self._current_topic,
216
+ self.retrieve_limit,
170
217
  )
171
218
  return chat_history[:1] + vector_db_retrieve + chat_history[1:]
172
219
 
camel/memories/base.py CHANGED
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  from abc import ABC, abstractmethod
16
- from typing import List, Tuple
16
+ from typing import List, Optional, Tuple
17
17
 
18
18
  from camel.memories.records import ContextRecord, MemoryRecord
19
19
  from camel.messages import OpenAIMessage
@@ -112,6 +112,16 @@ class AgentMemory(MemoryBlock, ABC):
112
112
  the memory records stored within the AgentMemory.
113
113
  """
114
114
 
115
+ @property
116
+ @abstractmethod
117
+ def agent_id(self) -> Optional[str]:
118
+ pass
119
+
120
+ @agent_id.setter
121
+ @abstractmethod
122
+ def agent_id(self, val: Optional[str]) -> None:
123
+ pass
124
+
115
125
  @abstractmethod
116
126
  def retrieve(self) -> List[ContextRecord]:
117
127
  r"""Get a record list from the memory for creating model context.
@@ -138,3 +148,15 @@ class AgentMemory(MemoryBlock, ABC):
138
148
  context in OpenAIMessage format and the total token count.
139
149
  """
140
150
  return self.get_context_creator().create_context(self.retrieve())
151
+
152
+ def __repr__(self) -> str:
153
+ r"""Returns a string representation of the AgentMemory.
154
+
155
+ Returns:
156
+ str: A string in the format 'ClassName(agent_id=<id>)'
157
+ if agent_id exists, otherwise just 'ClassName()'.
158
+ """
159
+ agent_id = getattr(self, '_agent_id', None)
160
+ if agent_id:
161
+ return f"{self.__class__.__name__}(agent_id='{agent_id}')"
162
+ return f"{self.__class__.__name__}()"
@@ -16,7 +16,8 @@ from typing import List, Optional
16
16
 
17
17
  from camel.memories.base import MemoryBlock
18
18
  from camel.memories.records import ContextRecord, MemoryRecord
19
- from camel.storages import BaseKeyValueStorage, InMemoryKeyValueStorage
19
+ from camel.storages.key_value_storages.base import BaseKeyValueStorage
20
+ from camel.storages.key_value_storages.in_memory import InMemoryKeyValueStorage
20
21
  from camel.types import OpenAIBackendRole
21
22
 
22
23
 
camel/memories/records.py CHANGED
@@ -39,6 +39,8 @@ class MemoryRecord(BaseModel):
39
39
  key-value pairs that provide more information. If not given, it
40
40
  will be an empty `Dict`.
41
41
  timestamp (float, optional): The timestamp when the record was created.
42
+ agent_id (str): The identifier of the agent associated with this
43
+ memory.
42
44
  """
43
45
 
44
46
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -50,6 +52,7 @@ class MemoryRecord(BaseModel):
50
52
  timestamp: float = Field(
51
53
  default_factory=lambda: datetime.now(timezone.utc).timestamp()
52
54
  )
55
+ agent_id: str = Field(default="")
53
56
 
54
57
  _MESSAGE_TYPES: ClassVar[dict] = {
55
58
  "BaseMessage": BaseMessage,
@@ -73,6 +76,7 @@ class MemoryRecord(BaseModel):
73
76
  role_at_backend=record_dict["role_at_backend"],
74
77
  extra_info=record_dict["extra_info"],
75
78
  timestamp=record_dict["timestamp"],
79
+ agent_id=record_dict["agent_id"],
76
80
  )
77
81
 
78
82
  def to_dict(self) -> Dict[str, Any]:
@@ -88,6 +92,7 @@ class MemoryRecord(BaseModel):
88
92
  "role_at_backend": self.role_at_backend,
89
93
  "extra_info": self.extra_info,
90
94
  "timestamp": self.timestamp,
95
+ "agent_id": self.agent_id,
91
96
  }
92
97
 
93
98
  def to_openai_message(self) -> OpenAIMessage:
camel/models/__init__.py CHANGED
@@ -33,6 +33,7 @@ from .ollama_model import OllamaModel
33
33
  from .openai_audio_models import OpenAIAudioModels
34
34
  from .openai_compatible_model import OpenAICompatibleModel
35
35
  from .openai_model import OpenAIModel
36
+ from .openrouter_model import OpenRouterModel
36
37
  from .qwen_model import QwenModel
37
38
  from .reka_model import RekaModel
38
39
  from .samba_model import SambaModel
@@ -48,6 +49,7 @@ from .zhipuai_model import ZhipuAIModel
48
49
  __all__ = [
49
50
  'BaseModelBackend',
50
51
  'OpenAIModel',
52
+ 'OpenRouterModel',
51
53
  'AzureOpenAIModel',
52
54
  'AnthropicModel',
53
55
  'MistralModel',
@@ -172,6 +172,24 @@ class GeminiModel(BaseModelBackend):
172
172
  for tool in tools:
173
173
  function_dict = tool.get('function', {})
174
174
  function_dict.pop("strict", None)
175
+
176
+ # Process parameters to remove anyOf
177
+ if 'parameters' in function_dict:
178
+ params = function_dict['parameters']
179
+ if 'properties' in params:
180
+ for prop_name, prop_value in params[
181
+ 'properties'
182
+ ].items():
183
+ if 'anyOf' in prop_value:
184
+ # Replace anyOf with the first type in the list
185
+ first_type = prop_value['anyOf'][0]
186
+ params['properties'][prop_name] = first_type
187
+ # Preserve description if it exists
188
+ if 'description' in prop_value:
189
+ params['properties'][prop_name][
190
+ 'description'
191
+ ] = prop_value['description']
192
+
175
193
  request_config["tools"] = tools
176
194
 
177
195
  return self._client.chat.completions.create(
@@ -191,6 +209,24 @@ class GeminiModel(BaseModelBackend):
191
209
  for tool in tools:
192
210
  function_dict = tool.get('function', {})
193
211
  function_dict.pop("strict", None)
212
+
213
+ # Process parameters to remove anyOf
214
+ if 'parameters' in function_dict:
215
+ params = function_dict['parameters']
216
+ if 'properties' in params:
217
+ for prop_name, prop_value in params[
218
+ 'properties'
219
+ ].items():
220
+ if 'anyOf' in prop_value:
221
+ # Replace anyOf with the first type in the list
222
+ first_type = prop_value['anyOf'][0]
223
+ params['properties'][prop_name] = first_type
224
+ # Preserve description if it exists
225
+ if 'description' in prop_value:
226
+ params['properties'][prop_name][
227
+ 'description'
228
+ ] = prop_value['description']
229
+
194
230
  request_config["tools"] = tools
195
231
 
196
232
  return await self._async_client.chat.completions.create(
@@ -195,7 +195,10 @@ class GroqModel(BaseModelBackend):
195
195
 
196
196
  @property
197
197
  def stream(self) -> bool:
198
- r"""Returns whether the model supports streaming. But Groq API does
199
- not support streaming.
198
+ r"""Returns whether the model is in stream mode, which sends partial
199
+ results each time.
200
+
201
+ Returns:
202
+ bool: Whether the model is in stream mode.
200
203
  """
201
- return False
204
+ return self.model_config_dict.get("stream", False)
@@ -29,6 +29,7 @@ from camel.models.nvidia_model import NvidiaModel
29
29
  from camel.models.ollama_model import OllamaModel
30
30
  from camel.models.openai_compatible_model import OpenAICompatibleModel
31
31
  from camel.models.openai_model import OpenAIModel
32
+ from camel.models.openrouter_model import OpenRouterModel
32
33
  from camel.models.qwen_model import QwenModel
33
34
  from camel.models.reka_model import RekaModel
34
35
  from camel.models.samba_model import SambaModel
@@ -119,6 +120,8 @@ class ModelFactory:
119
120
  model_class = AnthropicModel
120
121
  elif model_platform.is_groq and model_type.is_groq:
121
122
  model_class = GroqModel
123
+ elif model_platform.is_openrouter and model_type.is_openrouter:
124
+ model_class = OpenRouterModel
122
125
  elif model_platform.is_zhipuai and model_type.is_zhipuai:
123
126
  model_class = ZhipuAIModel
124
127
  elif model_platform.is_gemini and model_type.is_gemini: