camel-ai 0.2.21__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (116) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +53 -4
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/embeddings/openai_embedding.py +10 -1
  18. camel/environments/__init__.py +16 -0
  19. camel/environments/base.py +503 -0
  20. camel/extractors/__init__.py +16 -0
  21. camel/extractors/base.py +263 -0
  22. camel/interpreters/docker/Dockerfile +12 -0
  23. camel/interpreters/docker_interpreter.py +19 -1
  24. camel/interpreters/subprocess_interpreter.py +42 -17
  25. camel/loaders/__init__.py +2 -0
  26. camel/loaders/mineru_extractor.py +250 -0
  27. camel/memories/agent_memories.py +16 -1
  28. camel/memories/blocks/chat_history_block.py +10 -2
  29. camel/memories/blocks/vectordb_block.py +1 -0
  30. camel/memories/context_creators/score_based.py +20 -3
  31. camel/memories/records.py +10 -0
  32. camel/messages/base.py +8 -8
  33. camel/models/__init__.py +2 -0
  34. camel/models/_utils.py +57 -0
  35. camel/models/aiml_model.py +48 -17
  36. camel/models/anthropic_model.py +41 -3
  37. camel/models/azure_openai_model.py +39 -3
  38. camel/models/base_audio_model.py +92 -0
  39. camel/models/base_model.py +132 -4
  40. camel/models/cohere_model.py +88 -11
  41. camel/models/deepseek_model.py +107 -63
  42. camel/models/fish_audio_model.py +18 -8
  43. camel/models/gemini_model.py +133 -15
  44. camel/models/groq_model.py +72 -10
  45. camel/models/internlm_model.py +14 -3
  46. camel/models/litellm_model.py +9 -2
  47. camel/models/mistral_model.py +42 -5
  48. camel/models/model_manager.py +57 -3
  49. camel/models/moonshot_model.py +33 -4
  50. camel/models/nemotron_model.py +32 -3
  51. camel/models/nvidia_model.py +43 -3
  52. camel/models/ollama_model.py +139 -17
  53. camel/models/openai_audio_models.py +87 -2
  54. camel/models/openai_compatible_model.py +37 -3
  55. camel/models/openai_model.py +158 -46
  56. camel/models/qwen_model.py +61 -4
  57. camel/models/reka_model.py +53 -3
  58. camel/models/samba_model.py +209 -4
  59. camel/models/sglang_model.py +153 -14
  60. camel/models/siliconflow_model.py +16 -3
  61. camel/models/stub_model.py +46 -4
  62. camel/models/togetherai_model.py +38 -3
  63. camel/models/vllm_model.py +37 -3
  64. camel/models/yi_model.py +36 -3
  65. camel/models/zhipuai_model.py +38 -3
  66. camel/retrievers/__init__.py +3 -0
  67. camel/retrievers/hybrid_retrival.py +237 -0
  68. camel/toolkits/__init__.py +20 -3
  69. camel/toolkits/arxiv_toolkit.py +2 -1
  70. camel/toolkits/ask_news_toolkit.py +4 -2
  71. camel/toolkits/audio_analysis_toolkit.py +238 -0
  72. camel/toolkits/base.py +22 -3
  73. camel/toolkits/code_execution.py +2 -0
  74. camel/toolkits/dappier_toolkit.py +2 -1
  75. camel/toolkits/data_commons_toolkit.py +38 -12
  76. camel/toolkits/excel_toolkit.py +172 -0
  77. camel/toolkits/function_tool.py +13 -0
  78. camel/toolkits/github_toolkit.py +5 -1
  79. camel/toolkits/google_maps_toolkit.py +2 -1
  80. camel/toolkits/google_scholar_toolkit.py +2 -0
  81. camel/toolkits/human_toolkit.py +0 -3
  82. camel/toolkits/image_analysis_toolkit.py +202 -0
  83. camel/toolkits/linkedin_toolkit.py +3 -2
  84. camel/toolkits/meshy_toolkit.py +3 -2
  85. camel/toolkits/mineru_toolkit.py +178 -0
  86. camel/toolkits/networkx_toolkit.py +240 -0
  87. camel/toolkits/notion_toolkit.py +2 -0
  88. camel/toolkits/openbb_toolkit.py +3 -2
  89. camel/toolkits/page_script.js +376 -0
  90. camel/toolkits/reddit_toolkit.py +11 -3
  91. camel/toolkits/retrieval_toolkit.py +6 -1
  92. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  93. camel/toolkits/stripe_toolkit.py +8 -2
  94. camel/toolkits/sympy_toolkit.py +44 -1
  95. camel/toolkits/video_analysis_toolkit.py +407 -0
  96. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  97. camel/toolkits/web_toolkit.py +1307 -0
  98. camel/toolkits/whatsapp_toolkit.py +3 -2
  99. camel/toolkits/zapier_toolkit.py +191 -0
  100. camel/types/__init__.py +2 -2
  101. camel/types/agents/__init__.py +16 -0
  102. camel/types/agents/tool_calling_record.py +52 -0
  103. camel/types/enums.py +3 -0
  104. camel/types/openai_types.py +16 -14
  105. camel/utils/__init__.py +2 -1
  106. camel/utils/async_func.py +2 -2
  107. camel/utils/commons.py +114 -1
  108. camel/verifiers/__init__.py +23 -0
  109. camel/verifiers/base.py +340 -0
  110. camel/verifiers/models.py +82 -0
  111. camel/verifiers/python_verifier.py +202 -0
  112. camel_ai-0.2.23.dist-info/METADATA +671 -0
  113. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info}/RECORD +127 -99
  114. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  115. camel_ai-0.2.21.dist-info/METADATA +0 -528
  116. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -12,11 +12,13 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import logging
15
+ import subprocess
15
16
  import threading
16
17
  import time
17
- from typing import Any, Dict, List, Optional, Union
18
+ from typing import Any, Dict, List, Optional, Type, Union
18
19
 
19
- from openai import OpenAI, Stream
20
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
21
+ from pydantic import BaseModel
20
22
 
21
23
  from camel.configs import SGLANG_API_PARAMS, SGLangConfig
22
24
  from camel.messages import OpenAIMessage
@@ -85,13 +87,14 @@ class SGLangModel(BaseModelBackend):
85
87
  api_key="Set-but-ignored", # required but ignored
86
88
  base_url=self._url,
87
89
  )
90
+ self._async_client = AsyncOpenAI(
91
+ timeout=180,
92
+ max_retries=3,
93
+ api_key="Set-but-ignored", # required but ignored
94
+ base_url=self._url,
95
+ )
88
96
 
89
97
  def _start_server(self) -> None:
90
- from sglang.utils import ( # type: ignore[import-untyped]
91
- execute_shell_command,
92
- wait_for_server,
93
- )
94
-
95
98
  try:
96
99
  if not self._url:
97
100
  cmd = (
@@ -101,10 +104,10 @@ class SGLangModel(BaseModelBackend):
101
104
  f"--host 0.0.0.0"
102
105
  )
103
106
 
104
- server_process = execute_shell_command(cmd)
105
- wait_for_server("http://localhost:30000")
107
+ server_process = _execute_shell_command(cmd)
108
+ _wait_for_server("http://localhost:30000")
106
109
  self._url = "http://127.0.0.1:30000/v1"
107
- self.server_process = server_process
110
+ self.server_process = server_process # type: ignore[assignment]
108
111
  # Start the inactivity monitor in a background thread
109
112
  self._inactivity_thread = threading.Thread(
110
113
  target=self._monitor_inactivity, daemon=True
@@ -131,8 +134,6 @@ class SGLangModel(BaseModelBackend):
131
134
  r"""Monitor whether the server process has been inactive for over 10
132
135
  minutes.
133
136
  """
134
- from sglang.utils import terminate_process
135
-
136
137
  while True:
137
138
  # Check every 10 seconds
138
139
  time.sleep(10)
@@ -143,7 +144,7 @@ class SGLangModel(BaseModelBackend):
143
144
  time.time() - self.last_run_time > 600
144
145
  ):
145
146
  if self.server_process:
146
- terminate_process(self.server_process)
147
+ _terminate_process(self.server_process)
147
148
  self.server_process = None
148
149
  self._client = None # Invalidate the client
149
150
  logging.info(
@@ -178,9 +179,49 @@ class SGLangModel(BaseModelBackend):
178
179
  "input into SGLang model backend."
179
180
  )
180
181
 
181
- def run(
182
+ async def _arun(
182
183
  self,
183
184
  messages: List[OpenAIMessage],
185
+ response_format: Optional[Type[BaseModel]] = None,
186
+ tools: Optional[List[Dict[str, Any]]] = None,
187
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
188
+ r"""Runs inference of OpenAI chat completion.
189
+
190
+ Args:
191
+ messages (List[OpenAIMessage]): Message list with the chat history
192
+ in OpenAI API format.
193
+
194
+ Returns:
195
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
196
+ `ChatCompletion` in the non-stream mode, or
197
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
198
+ """
199
+
200
+ # Ensure server is running
201
+ self._ensure_server_running()
202
+
203
+ with self._lock:
204
+ # Update last run time
205
+ self.last_run_time = time.time()
206
+
207
+ if self._client is None:
208
+ raise RuntimeError(
209
+ "Client is not initialized. Ensure the server is running."
210
+ )
211
+
212
+ response = await self._async_client.chat.completions.create(
213
+ messages=messages,
214
+ model=self.model_type,
215
+ **self.model_config_dict,
216
+ )
217
+
218
+ return response
219
+
220
+ def _run(
221
+ self,
222
+ messages: List[OpenAIMessage],
223
+ response_format: Optional[Type[BaseModel]] = None,
224
+ tools: Optional[List[Dict[str, Any]]] = None,
184
225
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
226
  r"""Runs inference of OpenAI chat completion.
186
227
 
@@ -223,3 +264,101 @@ class SGLangModel(BaseModelBackend):
223
264
  bool: Whether the model is in stream mode.
224
265
  """
225
266
  return self.model_config_dict.get('stream', False)
267
+
268
+
269
+ # Below are helper functions from sglang.utils
270
+ def _terminate_process(process):
271
+ _kill_process_tree(process.pid)
272
+
273
+
274
+ def _kill_process_tree(
275
+ parent_pid, include_parent: bool = True, skip_pid: Optional[int] = None
276
+ ):
277
+ r"""Kill the process and all its child processes."""
278
+ import os
279
+ import signal
280
+
281
+ import psutil
282
+
283
+ if parent_pid is None:
284
+ parent_pid = os.getpid()
285
+ include_parent = False
286
+
287
+ try:
288
+ itself = psutil.Process(parent_pid)
289
+ except psutil.NoSuchProcess:
290
+ return
291
+
292
+ children = itself.children(recursive=True)
293
+ for child in children:
294
+ if child.pid == skip_pid:
295
+ continue
296
+ try:
297
+ child.kill()
298
+ except psutil.NoSuchProcess:
299
+ pass
300
+
301
+ if include_parent:
302
+ try:
303
+ itself.kill()
304
+
305
+ # Sometime processes cannot be killed with SIGKILL
306
+ # so we send an additional signal to kill them.
307
+ itself.send_signal(signal.SIGQUIT)
308
+ except psutil.NoSuchProcess:
309
+ pass
310
+
311
+
312
+ def _execute_shell_command(command: str) -> subprocess.Popen:
313
+ r"""Execute a shell command and return the process handle
314
+
315
+ Args:
316
+ command: Shell command as a string (can include \\ line continuations)
317
+ Returns:
318
+ subprocess.Popen: Process handle
319
+ """
320
+ import subprocess
321
+
322
+ # Replace \ newline with space and split
323
+ command = command.replace("\\\n", " ").replace("\\", " ")
324
+ parts = command.split()
325
+
326
+ return subprocess.Popen(parts, text=True, stderr=subprocess.STDOUT)
327
+
328
+
329
+ def _wait_for_server(base_url: str, timeout: Optional[int] = None) -> None:
330
+ r"""Wait for the server to be ready by polling the /v1/models endpoint.
331
+
332
+ Args:
333
+ base_url: The base URL of the server
334
+ timeout: Maximum time to wait in seconds. None means wait forever.
335
+ """
336
+ import requests
337
+
338
+ start_time = time.time()
339
+ while True:
340
+ try:
341
+ response = requests.get(
342
+ f"{base_url}/v1/models",
343
+ headers={"Authorization": "Bearer None"},
344
+ )
345
+ if response.status_code == 200:
346
+ time.sleep(5)
347
+ print(
348
+ """\n
349
+ NOTE: Typically, the server runs in a separate terminal.
350
+ In this notebook, we run the server and notebook code
351
+ together, so their outputs are combined.
352
+ To improve clarity, the server logs are displayed in the
353
+ original black color, while the notebook outputs are
354
+ highlighted in blue.
355
+ """
356
+ )
357
+ break
358
+
359
+ if timeout and time.time() - start_time > timeout:
360
+ raise TimeoutError(
361
+ "Server did not become ready within timeout period"
362
+ )
363
+ except requests.exceptions.RequestException:
364
+ time.sleep(1)
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI, Stream
17
+ from openai import AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
20
21
  from camel.messages import OpenAIMessage
@@ -82,9 +83,11 @@ class SiliconFlowModel(BaseModelBackend):
82
83
  base_url=self._url,
83
84
  )
84
85
 
85
- def run(
86
+ def _run(
86
87
  self,
87
88
  messages: List[OpenAIMessage],
89
+ response_format: Optional[Type[BaseModel]] = None,
90
+ tools: Optional[List[Dict[str, Any]]] = None,
88
91
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
92
  r"""Runs inference of SiliconFlow chat completion.
90
93
 
@@ -104,6 +107,16 @@ class SiliconFlowModel(BaseModelBackend):
104
107
  )
105
108
  return response
106
109
 
110
+ async def _arun(
111
+ self,
112
+ messages: List[OpenAIMessage],
113
+ response_format: Optional[Type[BaseModel]] = None,
114
+ tools: Optional[List[Dict[str, Any]]] = None,
115
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
116
+ raise NotImplementedError(
117
+ "SiliconFlow does not support async inference."
118
+ )
119
+
107
120
  @property
108
121
  def token_counter(self) -> BaseTokenCounter:
109
122
  r"""Initialize the token counter for the model backend.
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import time
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import Stream
17
+ from openai import AsyncStream, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.messages import OpenAIMessage
20
21
  from camel.models import BaseModelBackend
@@ -74,8 +75,49 @@ class StubModel(BaseModelBackend):
74
75
  self._token_counter = StubTokenCounter()
75
76
  return self._token_counter
76
77
 
77
- def run(
78
- self, messages: List[OpenAIMessage]
78
+ async def _arun(
79
+ self,
80
+ messages: List[OpenAIMessage],
81
+ response_format: Optional[Type[BaseModel]] = None,
82
+ tools: Optional[List[Dict[str, Any]]] = None,
83
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
84
+ r"""Run fake inference by returning a fixed string.
85
+ All arguments are unused for the dummy model.
86
+
87
+ Returns:
88
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
89
+ The response from the dummy model.
90
+ """
91
+ ARBITRARY_STRING = "Lorem Ipsum"
92
+ response: ChatCompletion = ChatCompletion(
93
+ id="stub_model_id",
94
+ model="stub",
95
+ object="chat.completion",
96
+ created=int(time.time()),
97
+ choices=[
98
+ Choice(
99
+ finish_reason="stop",
100
+ index=0,
101
+ message=ChatCompletionMessage(
102
+ content=ARBITRARY_STRING,
103
+ role="assistant",
104
+ ),
105
+ logprobs=None,
106
+ )
107
+ ],
108
+ usage=CompletionUsage(
109
+ completion_tokens=10,
110
+ prompt_tokens=10,
111
+ total_tokens=20,
112
+ ),
113
+ )
114
+ return response
115
+
116
+ def _run(
117
+ self,
118
+ messages: List[OpenAIMessage],
119
+ response_format: Optional[Type[BaseModel]] = None,
120
+ tools: Optional[List[Dict[str, Any]]] = None,
79
121
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
80
122
  r"""Run fake inference by returning a fixed string.
81
123
  All arguments are unused for the dummy model.
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig
21
22
  from camel.messages import OpenAIMessage
@@ -82,10 +83,44 @@ class TogetherAIModel(BaseModelBackend):
82
83
  api_key=self._api_key,
83
84
  base_url=self._url,
84
85
  )
86
+ self._async_client = AsyncOpenAI(
87
+ timeout=180,
88
+ max_retries=3,
89
+ api_key=self._api_key,
90
+ base_url=self._url,
91
+ )
92
+
93
+ async def _arun(
94
+ self,
95
+ messages: List[OpenAIMessage],
96
+ response_format: Optional[Type[BaseModel]] = None,
97
+ tools: Optional[List[Dict[str, Any]]] = None,
98
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
99
+ r"""Runs inference of OpenAI chat completion.
100
+
101
+ Args:
102
+ messages (List[OpenAIMessage]): Message list with the chat history
103
+ in OpenAI API format.
104
+
105
+ Returns:
106
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
107
+ `ChatCompletion` in the non-stream mode, or
108
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
109
+ """
110
+ # Use OpenAI cilent as interface call Together AI
111
+ # Reference: https://docs.together.ai/docs/openai-api-compatibility
112
+ response = await self._async_client.chat.completions.create(
113
+ messages=messages,
114
+ model=self.model_type,
115
+ **self.model_config_dict,
116
+ )
117
+ return response
85
118
 
86
- def run(
119
+ def _run(
87
120
  self,
88
121
  messages: List[OpenAIMessage],
122
+ response_format: Optional[Type[BaseModel]] = None,
123
+ tools: Optional[List[Dict[str, Any]]] = None,
89
124
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
90
125
  r"""Runs inference of OpenAI chat completion.
91
126
 
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import subprocess
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import VLLM_API_PARAMS, VLLMConfig
21
22
  from camel.messages import OpenAIMessage
@@ -77,6 +78,12 @@ class VLLMModel(BaseModelBackend):
77
78
  api_key="EMPTY", # required but ignored
78
79
  base_url=self._url,
79
80
  )
81
+ self._async_client = AsyncOpenAI(
82
+ timeout=180,
83
+ max_retries=3,
84
+ api_key="EMPTY", # required but ignored
85
+ base_url=self._url,
86
+ )
80
87
 
81
88
  def _start_server(self) -> None:
82
89
  r"""Starts the vllm server in a subprocess."""
@@ -121,9 +128,36 @@ class VLLMModel(BaseModelBackend):
121
128
  "input into vLLM model backend."
122
129
  )
123
130
 
124
- def run(
131
+ async def _arun(
132
+ self,
133
+ messages: List[OpenAIMessage],
134
+ response_format: Optional[Type[BaseModel]] = None,
135
+ tools: Optional[List[Dict[str, Any]]] = None,
136
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
137
+ r"""Runs inference of OpenAI chat completion.
138
+
139
+ Args:
140
+ messages (List[OpenAIMessage]): Message list with the chat history
141
+ in OpenAI API format.
142
+
143
+ Returns:
144
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
145
+ `ChatCompletion` in the non-stream mode, or
146
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
147
+ """
148
+
149
+ response = await self._async_client.chat.completions.create(
150
+ messages=messages,
151
+ model=self.model_type,
152
+ **self.model_config_dict,
153
+ )
154
+ return response
155
+
156
+ def _run(
125
157
  self,
126
158
  messages: List[OpenAIMessage],
159
+ response_format: Optional[Type[BaseModel]] = None,
160
+ tools: Optional[List[Dict[str, Any]]] = None,
127
161
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
128
162
  r"""Runs inference of OpenAI chat completion.
129
163
 
camel/models/yi_model.py CHANGED
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import YI_API_PARAMS, YiConfig
21
22
  from camel.messages import OpenAIMessage
@@ -80,10 +81,42 @@ class YiModel(BaseModelBackend):
80
81
  api_key=self._api_key,
81
82
  base_url=self._url,
82
83
  )
84
+ self._async_client = AsyncOpenAI(
85
+ timeout=180,
86
+ max_retries=3,
87
+ api_key=self._api_key,
88
+ base_url=self._url,
89
+ )
90
+
91
+ async def _arun(
92
+ self,
93
+ messages: List[OpenAIMessage],
94
+ response_format: Optional[Type[BaseModel]] = None,
95
+ tools: Optional[List[Dict[str, Any]]] = None,
96
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
97
+ r"""Runs inference of Yi chat completion.
98
+
99
+ Args:
100
+ messages (List[OpenAIMessage]): Message list with the chat history
101
+ in OpenAI API format.
102
+
103
+ Returns:
104
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
105
+ `ChatCompletion` in the non-stream mode, or
106
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
107
+ """
108
+ response = await self._async_client.chat.completions.create(
109
+ messages=messages,
110
+ model=self.model_type,
111
+ **self.model_config_dict,
112
+ )
113
+ return response
83
114
 
84
- def run(
115
+ def _run(
85
116
  self,
86
117
  messages: List[OpenAIMessage],
118
+ response_format: Optional[Type[BaseModel]] = None,
119
+ tools: Optional[List[Dict[str, Any]]] = None,
87
120
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
88
121
  r"""Runs inference of Yi chat completion.
89
122
 
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import ZHIPUAI_API_PARAMS, ZhipuAIConfig
21
22
  from camel.messages import OpenAIMessage
@@ -80,10 +81,44 @@ class ZhipuAIModel(BaseModelBackend):
80
81
  api_key=self._api_key,
81
82
  base_url=self._url,
82
83
  )
84
+ self._async_client = AsyncOpenAI(
85
+ timeout=180,
86
+ max_retries=3,
87
+ api_key=self._api_key,
88
+ base_url=self._url,
89
+ )
90
+
91
+ async def _arun(
92
+ self,
93
+ messages: List[OpenAIMessage],
94
+ response_format: Optional[Type[BaseModel]] = None,
95
+ tools: Optional[List[Dict[str, Any]]] = None,
96
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
97
+ r"""Runs inference of OpenAI chat completion.
98
+
99
+ Args:
100
+ messages (List[OpenAIMessage]): Message list with the chat history
101
+ in OpenAI API format.
102
+
103
+ Returns:
104
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
105
+ `ChatCompletion` in the non-stream mode, or
106
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
107
+ """
108
+ # Use OpenAI cilent as interface call ZhipuAI
109
+ # Reference: https://open.bigmodel.cn/dev/api#openai_sdk
110
+ response = await self._async_client.chat.completions.create(
111
+ messages=messages,
112
+ model=self.model_type,
113
+ **self.model_config_dict,
114
+ )
115
+ return response
83
116
 
84
- def run(
117
+ def _run(
85
118
  self,
86
119
  messages: List[OpenAIMessage],
120
+ response_format: Optional[Type[BaseModel]] = None,
121
+ tools: Optional[List[Dict[str, Any]]] = None,
87
122
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
88
123
  r"""Runs inference of OpenAI chat completion.
89
124
 
@@ -11,11 +11,13 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ # ruff: noqa: I001
14
15
  from .auto_retriever import AutoRetriever
15
16
  from .base import BaseRetriever
16
17
  from .bm25_retriever import BM25Retriever
17
18
  from .cohere_rerank_retriever import CohereRerankRetriever
18
19
  from .vector_retriever import VectorRetriever
20
+ from .hybrid_retrival import HybridRetriever
19
21
 
20
22
  __all__ = [
21
23
  'BaseRetriever',
@@ -23,4 +25,5 @@ __all__ = [
23
25
  'AutoRetriever',
24
26
  'BM25Retriever',
25
27
  'CohereRerankRetriever',
28
+ 'HybridRetriever',
26
29
  ]