camel-ai 0.2.22__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (110) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +52 -3
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/environments/__init__.py +16 -0
  18. camel/environments/base.py +503 -0
  19. camel/extractors/__init__.py +16 -0
  20. camel/extractors/base.py +263 -0
  21. camel/memories/agent_memories.py +16 -1
  22. camel/memories/blocks/chat_history_block.py +10 -2
  23. camel/memories/blocks/vectordb_block.py +1 -0
  24. camel/memories/context_creators/score_based.py +20 -3
  25. camel/memories/records.py +10 -0
  26. camel/messages/base.py +8 -8
  27. camel/models/__init__.py +2 -0
  28. camel/models/_utils.py +57 -0
  29. camel/models/aiml_model.py +48 -17
  30. camel/models/anthropic_model.py +41 -3
  31. camel/models/azure_openai_model.py +39 -3
  32. camel/models/base_audio_model.py +92 -0
  33. camel/models/base_model.py +88 -13
  34. camel/models/cohere_model.py +88 -11
  35. camel/models/deepseek_model.py +107 -45
  36. camel/models/fish_audio_model.py +18 -8
  37. camel/models/gemini_model.py +133 -15
  38. camel/models/groq_model.py +72 -10
  39. camel/models/internlm_model.py +14 -3
  40. camel/models/litellm_model.py +9 -2
  41. camel/models/mistral_model.py +42 -5
  42. camel/models/model_manager.py +57 -3
  43. camel/models/moonshot_model.py +33 -4
  44. camel/models/nemotron_model.py +32 -3
  45. camel/models/nvidia_model.py +43 -3
  46. camel/models/ollama_model.py +139 -17
  47. camel/models/openai_audio_models.py +87 -2
  48. camel/models/openai_compatible_model.py +37 -3
  49. camel/models/openai_model.py +158 -46
  50. camel/models/qwen_model.py +61 -4
  51. camel/models/reka_model.py +53 -3
  52. camel/models/samba_model.py +209 -4
  53. camel/models/sglang_model.py +153 -14
  54. camel/models/siliconflow_model.py +16 -3
  55. camel/models/stub_model.py +46 -4
  56. camel/models/togetherai_model.py +38 -3
  57. camel/models/vllm_model.py +37 -3
  58. camel/models/yi_model.py +36 -3
  59. camel/models/zhipuai_model.py +38 -3
  60. camel/retrievers/__init__.py +3 -0
  61. camel/retrievers/hybrid_retrival.py +237 -0
  62. camel/toolkits/__init__.py +15 -1
  63. camel/toolkits/arxiv_toolkit.py +2 -1
  64. camel/toolkits/ask_news_toolkit.py +4 -2
  65. camel/toolkits/audio_analysis_toolkit.py +238 -0
  66. camel/toolkits/base.py +22 -3
  67. camel/toolkits/code_execution.py +2 -0
  68. camel/toolkits/dappier_toolkit.py +2 -1
  69. camel/toolkits/data_commons_toolkit.py +38 -12
  70. camel/toolkits/excel_toolkit.py +172 -0
  71. camel/toolkits/function_tool.py +13 -0
  72. camel/toolkits/github_toolkit.py +5 -1
  73. camel/toolkits/google_maps_toolkit.py +2 -1
  74. camel/toolkits/google_scholar_toolkit.py +2 -0
  75. camel/toolkits/human_toolkit.py +0 -3
  76. camel/toolkits/image_analysis_toolkit.py +202 -0
  77. camel/toolkits/linkedin_toolkit.py +3 -2
  78. camel/toolkits/meshy_toolkit.py +3 -2
  79. camel/toolkits/mineru_toolkit.py +2 -2
  80. camel/toolkits/networkx_toolkit.py +240 -0
  81. camel/toolkits/notion_toolkit.py +2 -0
  82. camel/toolkits/openbb_toolkit.py +3 -2
  83. camel/toolkits/page_script.js +376 -0
  84. camel/toolkits/reddit_toolkit.py +11 -3
  85. camel/toolkits/retrieval_toolkit.py +6 -1
  86. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  87. camel/toolkits/stripe_toolkit.py +8 -2
  88. camel/toolkits/sympy_toolkit.py +6 -1
  89. camel/toolkits/video_analysis_toolkit.py +407 -0
  90. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  91. camel/toolkits/web_toolkit.py +1307 -0
  92. camel/toolkits/whatsapp_toolkit.py +3 -2
  93. camel/toolkits/zapier_toolkit.py +191 -0
  94. camel/types/__init__.py +2 -2
  95. camel/types/agents/__init__.py +16 -0
  96. camel/types/agents/tool_calling_record.py +52 -0
  97. camel/types/enums.py +3 -0
  98. camel/types/openai_types.py +16 -14
  99. camel/utils/__init__.py +2 -1
  100. camel/utils/async_func.py +2 -2
  101. camel/utils/commons.py +114 -1
  102. camel/verifiers/__init__.py +23 -0
  103. camel/verifiers/base.py +340 -0
  104. camel/verifiers/models.py +82 -0
  105. camel/verifiers/python_verifier.py +202 -0
  106. camel_ai-0.2.23.dist-info/METADATA +671 -0
  107. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/RECORD +122 -97
  108. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  109. camel_ai-0.2.22.dist-info/METADATA +0 -527
  110. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,263 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from abc import ABC, abstractmethod
16
+ from types import TracebackType
17
+ from typing import Any, Dict, Optional, Type
18
+
19
+ from typing_extensions import Self
20
+
21
+ from camel.logger import get_logger
22
+ from camel.utils import BatchProcessor
23
+
24
+ logger = get_logger(__name__)
25
+
26
+
27
+ class BaseExtractor(ABC):
28
+ r"""Base class for all response extractors.
29
+
30
+ An extractor takes the response and extracts the relevant parts,
31
+ converting them into a format that the verifier can handle.
32
+ Implements async context manager protocol for proper resource management.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ cache_templates: bool = True,
38
+ max_cache_size: int = 1000,
39
+ extraction_timeout: float = 30.0,
40
+ batch_size: int = 10,
41
+ monitoring_interval: float = 5.0,
42
+ cpu_threshold: float = 80.0,
43
+ memory_threshold: float = 85.0,
44
+ **kwargs,
45
+ ):
46
+ r"""Initialize the extractor.
47
+
48
+ Args:
49
+ cache_templates (bool): Whether to cache extraction templates.
50
+ (default: :obj:`True`)
51
+ max_cache_size (int): Maximum number of templates to cache.
52
+ (default: :obj:`1000`)
53
+ extraction_timeout (float): Maximum time for extraction in seconds.
54
+ (default: :obj:`30.0`)
55
+ batch_size (int): Size of batches for parallel extraction.
56
+ (default: :obj:`10`)
57
+ monitoring_interval (float): Interval in seconds between resource
58
+ checks. (default: :obj:`5.0`)
59
+ cpu_threshold (float): CPU usage percentage threshold for scaling
60
+ down. (default: :obj:`80.0`)
61
+ memory_threshold (float): Memory usage percentage threshold for
62
+ scaling down. (default: :obj:`85.0`)
63
+ **kwargs: Additional extractor parameters.
64
+
65
+ Raises:
66
+ ValueError: If invalid parameter values are provided
67
+ """
68
+ # Store all parameters in metadata dict for compatibility
69
+ self._metadata = {
70
+ 'cache_templates': cache_templates,
71
+ 'max_cache_size': max_cache_size,
72
+ 'extraction_timeout': extraction_timeout,
73
+ 'batch_size': batch_size,
74
+ 'monitoring_interval': monitoring_interval,
75
+ 'cpu_threshold': cpu_threshold,
76
+ 'memory_threshold': memory_threshold,
77
+ **kwargs,
78
+ }
79
+
80
+ self._is_setup = False
81
+ self._cache: Dict[str, Any] = {}
82
+ self._batch_processor: Optional[BatchProcessor] = None
83
+
84
+ # Store configuration parameters
85
+ self._cache_templates = cache_templates
86
+ self._max_cache_size = max_cache_size
87
+ self._extraction_timeout = extraction_timeout
88
+ self._batch_size = batch_size
89
+ self._monitoring_interval = monitoring_interval
90
+ self._cpu_threshold = cpu_threshold
91
+ self._memory_threshold = memory_threshold
92
+
93
+ async def setup(self) -> None:
94
+ r"""Set up the extractor with necessary resources.
95
+
96
+ This method:
97
+ 1. Initializes template cache if enabled
98
+ 2. Sets up any parallel processing resources
99
+ 3. Validates extraction patterns
100
+
101
+ Raises:
102
+ RuntimeError: If initialization fails
103
+ """
104
+ if self._is_setup:
105
+ logger.debug(f"{self.__class__.__name__} already initialized")
106
+ return
107
+
108
+ try:
109
+ # Initialize template cache if enabled
110
+ if self._cache_templates:
111
+ self._template_cache: Dict[str, Any] = {}
112
+
113
+ # Set up batch processing if needed
114
+ if self._batch_size > 1:
115
+ self._batch_processor = BatchProcessor(
116
+ initial_batch_size=self._batch_size,
117
+ monitoring_interval=self._monitoring_interval,
118
+ cpu_threshold=self._cpu_threshold,
119
+ memory_threshold=self._memory_threshold,
120
+ )
121
+
122
+ self._is_setup = True
123
+ logger.info(f"{self.__class__.__name__} initialized successfully")
124
+
125
+ except Exception as e:
126
+ error_msg = f"Error during {self.__class__.__name__} setup: {e}"
127
+ logger.error(error_msg)
128
+ await self.cleanup()
129
+ raise RuntimeError(error_msg) from e
130
+
131
+ async def cleanup(self) -> None:
132
+ r"""Clean up extractor resources.
133
+
134
+ This method handles cleanup of resources and resets the extractor
135
+ state.
136
+ It ensures:
137
+ 1. All resources are properly released
138
+ 2. Template cache is cleared
139
+ 3. Parallel processing resources are shutdown
140
+ 4. State is reset to initial
141
+ 5. Cleanup happens even if errors occur
142
+
143
+ Raises:
144
+ RuntimeError: If cleanup fails (after resetting initialization
145
+ state).
146
+ """
147
+ if not self._is_setup:
148
+ logger.debug(
149
+ f"{self.__class__.__name__} not initialized, skipping cleanup"
150
+ )
151
+ return
152
+
153
+ errors = []
154
+ try:
155
+ # Clear template cache
156
+ if hasattr(self, '_template_cache'):
157
+ try:
158
+ self._template_cache.clear()
159
+ except Exception as e:
160
+ errors.append(f"Failed to clear template cache: {e}")
161
+
162
+ # Shutdown parallel processing
163
+ if self._batch_processor is not None:
164
+ try:
165
+ # Get final performance metrics before cleanup
166
+ metrics = self._batch_processor.get_performance_metrics()
167
+ logger.info(f"Batch processor final metrics: {metrics}")
168
+ except Exception as e:
169
+ errors.append(
170
+ f"Failed to get batch processor metrics: {e}"
171
+ )
172
+
173
+ # Preserve init config in metadata
174
+ self._metadata = {
175
+ 'cache_templates': self._cache_templates,
176
+ 'max_cache_size': self._max_cache_size,
177
+ 'extraction_timeout': self._extraction_timeout,
178
+ 'batch_size': self._batch_size,
179
+ }
180
+
181
+ if not errors:
182
+ logger.info(
183
+ f"{self.__class__.__name__} cleaned up successfully"
184
+ )
185
+
186
+ except Exception as e:
187
+ errors.append(f"Unexpected error during cleanup: {e}")
188
+
189
+ finally:
190
+ # Always mark as uninitialized, even if cleanup fails
191
+ self._is_setup = False
192
+ self._batch_processor = None
193
+
194
+ if errors:
195
+ error_msg = (
196
+ f"Errors during {self.__class__.__name__} cleanup: "
197
+ f"{'; '.join(errors)}"
198
+ )
199
+ logger.error(error_msg)
200
+ raise RuntimeError(error_msg)
201
+
202
+ async def __aenter__(self) -> Self:
203
+ r"""Async context manager entry.
204
+
205
+ Returns:
206
+ Self reference for context manager usage.
207
+ """
208
+ await self.setup()
209
+ return self
210
+
211
+ async def __aexit__(
212
+ self,
213
+ exc_type: Optional[Type[BaseException]],
214
+ exc_val: Optional[BaseException],
215
+ exc_tb: Optional[TracebackType],
216
+ ) -> None:
217
+ r"""Async context manager exit.
218
+
219
+ Args:
220
+ exc_type (Optional[Type[BaseException]]): Exception type if an
221
+ error occurred.
222
+ exc_val (Optional[BaseException]): Exception value if an error
223
+ occurred.
224
+ exc_tb (Optional[TracebackType]): Exception traceback if an error
225
+ occurred.
226
+ """
227
+ await self.cleanup()
228
+
229
+ @abstractmethod
230
+ async def extract(
231
+ self, response: str, context: Optional[Dict[str, Any]] = None
232
+ ) -> str:
233
+ r"""Extract relevant parts from a response.
234
+
235
+ Extracts:
236
+ 1. Final answer or output
237
+ 2. Chain of thought reasoning steps
238
+ 3. Difficulty assessment
239
+
240
+ Args:
241
+ response (str): Raw response from agent generation.
242
+ context (Optional[Dict[str, Any]]): Optional context for
243
+ extraction like:
244
+ - final_answer
245
+ - rationale
246
+ - complexity
247
+
248
+ Returns:
249
+ str: Extracted content string.
250
+
251
+ Raises:
252
+ ValueError: If response is empty or invalid.
253
+ NotImplementedError: If no implementation is provided.
254
+ RuntimeError: If extractor is not initialized.
255
+ """
256
+ if not self._is_setup:
257
+ raise RuntimeError(
258
+ f"{self.__class__.__name__} must be initialized "
259
+ "before extraction"
260
+ )
261
+ if not response or not response.strip():
262
+ raise ValueError("Empty or whitespace-only response")
263
+ raise NotImplementedError("Subclasses must implement extract()")
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
+ import warnings
15
16
  from typing import List, Optional
16
17
 
17
18
  from camel.memories.base import AgentMemory, BaseContextCreator
@@ -49,7 +50,17 @@ class ChatHistoryMemory(AgentMemory):
49
50
  self._chat_history_block = ChatHistoryBlock(storage=storage)
50
51
 
51
52
  def retrieve(self) -> List[ContextRecord]:
52
- return self._chat_history_block.retrieve(self._window_size)
53
+ records = self._chat_history_block.retrieve(self._window_size)
54
+ if self._window_size is not None and len(records) == self._window_size:
55
+ warnings.warn(
56
+ f"Chat history window size limit ({self._window_size}) "
57
+ f"reached. Some earlier messages will not be included in "
58
+ f"the context. Consider increasing window_size if you need "
59
+ f"a longer context.",
60
+ UserWarning,
61
+ stacklevel=2,
62
+ )
63
+ return records
53
64
 
54
65
  def write_records(self, records: List[MemoryRecord]) -> None:
55
66
  self._chat_history_block.write_records(records)
@@ -103,6 +114,10 @@ class VectorDBMemory(AgentMemory):
103
114
  def get_context_creator(self) -> BaseContextCreator:
104
115
  return self._context_creator
105
116
 
117
+ def clear(self) -> None:
118
+ r"""Removes all records from the vector database memory."""
119
+ self._vectordb_block.clear()
120
+
106
121
 
107
122
  class LongtermAgentMemory(AgentMemory):
108
123
  r"""An implementation of the :obj:`AgentMemory` abstract base class for
@@ -85,13 +85,21 @@ class ChatHistoryBlock(MemoryBlock):
85
85
  if record.role_at_backend == OpenAIBackendRole.SYSTEM:
86
86
  # System messages are always kept.
87
87
  output_records.append(
88
- ContextRecord(memory_record=record, score=1.0)
88
+ ContextRecord(
89
+ memory_record=record,
90
+ score=1.0,
91
+ timestamp=record.timestamp,
92
+ )
89
93
  )
90
94
  else:
91
95
  # Other messages' score drops down gradually
92
96
  score *= self.keep_rate
93
97
  output_records.append(
94
- ContextRecord(memory_record=record, score=score)
98
+ ContextRecord(
99
+ memory_record=record,
100
+ score=score,
101
+ timestamp=record.timestamp,
102
+ )
95
103
  )
96
104
 
97
105
  output_records.reverse()
@@ -74,6 +74,7 @@ class VectorDBBlock(MemoryBlock):
74
74
  ContextRecord(
75
75
  memory_record=MemoryRecord.from_dict(result.record.payload),
76
76
  score=result.similarity,
77
+ timestamp=result.record.payload['timestamp'],
77
78
  )
78
79
  for result in results
79
80
  if result.record.payload is not None
@@ -15,11 +15,14 @@ from typing import List, Tuple
15
15
 
16
16
  from pydantic import BaseModel
17
17
 
18
+ from camel.logger import get_logger
18
19
  from camel.memories.base import BaseContextCreator
19
20
  from camel.memories.records import ContextRecord
20
21
  from camel.messages import OpenAIMessage
21
22
  from camel.utils import BaseTokenCounter
22
23
 
24
+ logger = get_logger(__name__)
25
+
23
26
 
24
27
  class _ContextUnit(BaseModel):
25
28
  idx: int
@@ -101,18 +104,30 @@ class ScoreBasedContextCreator(BaseContextCreator):
101
104
  # If not exceed token limit, simply return
102
105
  total_tokens = sum([unit.num_tokens for unit in context_units])
103
106
  if total_tokens <= self.token_limit:
107
+ context_units = sorted(
108
+ context_units,
109
+ key=lambda unit: (unit.record.timestamp, unit.record.score),
110
+ )
104
111
  return self._create_output(context_units)
105
112
 
113
+ # Log warning about token limit being exceeded
114
+ logger.warning(
115
+ f"Token limit reached ({total_tokens} > {self.token_limit}). "
116
+ f"Some messages will be pruned from memory to meet the limit."
117
+ )
118
+
106
119
  # Sort by score
107
120
  context_units = sorted(
108
- context_units, key=lambda unit: unit.record.score
121
+ context_units,
122
+ key=lambda unit: (unit.record.timestamp, unit.record.score),
109
123
  )
110
124
 
111
125
  # Remove the least score messages until total token number is smaller
112
126
  # than token limit
113
127
  truncate_idx = None
114
128
  for i, unit in enumerate(context_units):
115
- if unit.record.score == 1:
129
+ if i == len(context_units) - 1:
130
+ # If we reach the end of the list and still exceed the token
116
131
  raise RuntimeError(
117
132
  "Cannot create context: exceed token limit.", total_tokens
118
133
  )
@@ -135,7 +150,9 @@ class ScoreBasedContextCreator(BaseContextCreator):
135
150
  for output, specifically a list of OpenAIMessages and an integer
136
151
  representing the total token count.
137
152
  """
138
- context_units = sorted(context_units, key=lambda unit: unit.idx)
153
+ context_units = sorted(
154
+ context_units, key=lambda unit: unit.record.timestamp
155
+ )
139
156
  return [
140
157
  unit.record.memory_record.to_openai_message()
141
158
  for unit in context_units
camel/memories/records.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  from dataclasses import asdict
16
+ from datetime import datetime, timezone
16
17
  from typing import Any, ClassVar, Dict
17
18
  from uuid import UUID, uuid4
18
19
 
@@ -37,6 +38,7 @@ class MemoryRecord(BaseModel):
37
38
  extra_info (Dict[str, str], optional): A dictionary of additional
38
39
  key-value pairs that provide more information. If not given, it
39
40
  will be an empty `Dict`.
41
+ timestamp (float, optional): The timestamp when the record was created.
40
42
  """
41
43
 
42
44
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -45,6 +47,9 @@ class MemoryRecord(BaseModel):
45
47
  role_at_backend: OpenAIBackendRole
46
48
  uuid: UUID = Field(default_factory=uuid4)
47
49
  extra_info: Dict[str, str] = Field(default_factory=dict)
50
+ timestamp: float = Field(
51
+ default_factory=lambda: datetime.now(timezone.utc).timestamp()
52
+ )
48
53
 
49
54
  _MESSAGE_TYPES: ClassVar[dict] = {
50
55
  "BaseMessage": BaseMessage,
@@ -67,6 +72,7 @@ class MemoryRecord(BaseModel):
67
72
  message=reconstructed_message,
68
73
  role_at_backend=record_dict["role_at_backend"],
69
74
  extra_info=record_dict["extra_info"],
75
+ timestamp=record_dict["timestamp"],
70
76
  )
71
77
 
72
78
  def to_dict(self) -> Dict[str, Any]:
@@ -81,6 +87,7 @@ class MemoryRecord(BaseModel):
81
87
  },
82
88
  "role_at_backend": self.role_at_backend,
83
89
  "extra_info": self.extra_info,
90
+ "timestamp": self.timestamp,
84
91
  }
85
92
 
86
93
  def to_openai_message(self) -> OpenAIMessage:
@@ -93,3 +100,6 @@ class ContextRecord(BaseModel):
93
100
 
94
101
  memory_record: MemoryRecord
95
102
  score: float
103
+ timestamp: float = Field(
104
+ default_factory=lambda: datetime.now(timezone.utc).timestamp()
105
+ )
camel/messages/base.py CHANGED
@@ -15,7 +15,7 @@ import base64
15
15
  import io
16
16
  import re
17
17
  from dataclasses import dataclass
18
- from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union
18
+ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
19
19
 
20
20
  import numpy as np
21
21
  from PIL import Image
@@ -72,7 +72,7 @@ class BaseMessage:
72
72
  image_list: Optional[List[Image.Image]] = None
73
73
  image_detail: Literal["auto", "low", "high"] = "auto"
74
74
  video_detail: Literal["auto", "low", "high"] = "low"
75
- parsed: Optional[Union[Type[BaseModel], dict]] = None
75
+ parsed: Optional[Union[BaseModel, dict]] = None
76
76
 
77
77
  @classmethod
78
78
  def make_user_message(
@@ -416,8 +416,8 @@ class BaseMessage:
416
416
  Returns:
417
417
  OpenAIUserMessage: The converted :obj:`OpenAIUserMessage` object.
418
418
  """
419
- hybird_content: List[Any] = []
420
- hybird_content.append(
419
+ hybrid_content: List[Any] = []
420
+ hybrid_content.append(
421
421
  {
422
422
  "type": "text",
423
423
  "text": self.content,
@@ -445,7 +445,7 @@ class BaseMessage:
445
445
  "utf-8"
446
446
  )
447
447
  image_prefix = f"data:image/{image_type};base64,"
448
- hybird_content.append(
448
+ hybrid_content.append(
449
449
  {
450
450
  "type": "image_url",
451
451
  "image_url": {
@@ -504,12 +504,12 @@ class BaseMessage:
504
504
  },
505
505
  }
506
506
 
507
- hybird_content.append(item)
507
+ hybrid_content.append(item)
508
508
 
509
- if len(hybird_content) > 1:
509
+ if len(hybrid_content) > 1:
510
510
  return {
511
511
  "role": "user",
512
- "content": hybird_content,
512
+ "content": hybrid_content,
513
513
  }
514
514
  # This return just for str message
515
515
  else:
camel/models/__init__.py CHANGED
@@ -14,6 +14,7 @@
14
14
  from .aiml_model import AIMLModel
15
15
  from .anthropic_model import AnthropicModel
16
16
  from .azure_openai_model import AzureOpenAIModel
17
+ from .base_audio_model import BaseAudioModel
17
18
  from .base_model import BaseModelBackend
18
19
  from .cohere_model import CohereModel
19
20
  from .deepseek_model import DeepSeekModel
@@ -74,4 +75,5 @@ __all__ = [
74
75
  'InternLMModel',
75
76
  'MoonshotModel',
76
77
  'AIMLModel',
78
+ 'BaseAudioModel',
77
79
  ]
camel/models/_utils.py ADDED
@@ -0,0 +1,57 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import textwrap
15
+ from typing import Optional, Type
16
+
17
+ from pydantic import BaseModel
18
+
19
+ from camel.messages import OpenAIMessage
20
+
21
+
22
+ def try_modify_message_with_format(
23
+ message: OpenAIMessage,
24
+ response_format: Optional[Type[BaseModel]],
25
+ ) -> None:
26
+ r"""Modifies the content of the message to include the instruction of using
27
+ the response format.
28
+
29
+ The message will not be modified in the following cases:
30
+ - response_format is None
31
+ - message content is not a string
32
+ - message role is assistant
33
+
34
+ Args:
35
+ response_format (Optional[Type[BaseModel]]): The Pydantic model class.
36
+ message (OpenAIMessage): The message to be modified.
37
+ """
38
+ if response_format is None:
39
+ return
40
+
41
+ if not isinstance(message["content"], str):
42
+ return
43
+
44
+ if message["role"] == "assistant":
45
+ return
46
+
47
+ json_schema = response_format.model_json_schema()
48
+ updated_prompt = textwrap.dedent(
49
+ f"""\
50
+ {message["content"]}
51
+
52
+ Please generate a JSON response adhering to the following JSON schema:
53
+ {json_schema}
54
+ Make sure the JSON response is valid and matches the EXACT structure defined in the schema. Your result should ONLY be a valid json object, WITHOUT ANY OTHER TEXT OR COMMENTS.
55
+ """ # noqa: E501
56
+ )
57
+ message["content"] = updated_prompt
@@ -12,12 +12,14 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI, Stream
17
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import AIML_API_PARAMS, AIMLConfig
20
21
  from camel.messages import OpenAIMessage
22
+ from camel.models._utils import try_modify_message_with_format
21
23
  from camel.models.base_model import BaseModelBackend
22
24
  from camel.types import (
23
25
  ChatCompletion,
@@ -52,11 +54,7 @@ class AIMLModel(BaseModelBackend):
52
54
  (default: :obj:`None`)
53
55
  """
54
56
 
55
- @api_keys_required(
56
- [
57
- ("api_key", 'AIML_API_KEY'),
58
- ]
59
- )
57
+ @api_keys_required([("api_key", "AIML_API_KEY")])
60
58
  def __init__(
61
59
  self,
62
60
  model_type: Union[ModelType, str],
@@ -81,12 +79,34 @@ class AIMLModel(BaseModelBackend):
81
79
  api_key=self._api_key,
82
80
  base_url=self._url,
83
81
  )
82
+ self._async_client = AsyncOpenAI(
83
+ timeout=180,
84
+ max_retries=3,
85
+ api_key=self._api_key,
86
+ base_url=self._url,
87
+ )
84
88
 
85
- def run(
89
+ def _prepare_request(
90
+ self,
91
+ messages: List[OpenAIMessage],
92
+ response_format: Optional[Type[BaseModel]] = None,
93
+ tools: Optional[List[Dict[str, Any]]] = None,
94
+ ) -> Dict[str, Any]:
95
+ request_config = self.model_config_dict.copy()
96
+ if tools:
97
+ request_config["tools"] = tools
98
+ if response_format:
99
+ # AIML API does not natively support response format
100
+ try_modify_message_with_format(messages[-1], response_format)
101
+ return request_config
102
+
103
+ def _run(
86
104
  self,
87
105
  messages: List[OpenAIMessage],
106
+ response_format: Optional[Type[BaseModel]] = None,
107
+ tools: Optional[List[Dict[str, Any]]] = None,
88
108
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
- r"""Runs inference of OpenAI chat completion.
109
+ r"""Runs inference of AIML chat completion.
90
110
 
91
111
  Args:
92
112
  messages (List[OpenAIMessage]): Message list with the chat history
@@ -97,15 +117,26 @@ class AIMLModel(BaseModelBackend):
97
117
  `ChatCompletion` in the non-stream mode, or
98
118
  `Stream[ChatCompletionChunk]` in the stream mode.
99
119
  """
100
- # Process model configuration parameters
101
- model_config = self.model_config_dict.copy()
102
-
103
- # Handle special case for tools parameter
104
- if model_config.get('tools') is None:
105
- model_config['tools'] = []
120
+ request_config = self._prepare_request(
121
+ messages, response_format, tools
122
+ )
106
123
 
107
124
  response = self._client.chat.completions.create(
108
- messages=messages, model=self.model_type, **model_config
125
+ messages=messages, model=self.model_type, **request_config
126
+ )
127
+ return response
128
+
129
+ async def _arun(
130
+ self,
131
+ messages: List[OpenAIMessage],
132
+ response_format: Optional[Type[BaseModel]] = None,
133
+ tools: Optional[List[Dict[str, Any]]] = None,
134
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
135
+ request_config = self._prepare_request(
136
+ messages, response_format, tools
137
+ )
138
+ response = await self._async_client.chat.completions.create(
139
+ messages=messages, model=self.model_type, **request_config
109
140
  )
110
141
  return response
111
142
 
@@ -144,4 +175,4 @@ class AIMLModel(BaseModelBackend):
144
175
  Returns:
145
176
  bool: Whether the model is in stream mode.
146
177
  """
147
- return self.model_config_dict.get('stream', False)
178
+ return self.model_config_dict.get("stream", False)