camel-ai 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (76) hide show
  1. camel/__init__.py +6 -1
  2. camel/agents/chat_agent.py +87 -6
  3. camel/agents/deductive_reasoner_agent.py +4 -1
  4. camel/benchmarks/__init__.py +18 -0
  5. camel/benchmarks/base.py +152 -0
  6. camel/benchmarks/gaia.py +478 -0
  7. camel/configs/__init__.py +6 -0
  8. camel/configs/mistral_config.py +0 -3
  9. camel/configs/nvidia_config.py +70 -0
  10. camel/configs/ollama_config.py +4 -2
  11. camel/configs/sglang_config.py +71 -0
  12. camel/configs/vllm_config.py +10 -1
  13. camel/data_collector/__init__.py +19 -0
  14. camel/data_collector/alpaca_collector.py +127 -0
  15. camel/data_collector/base.py +211 -0
  16. camel/data_collector/sharegpt_collector.py +205 -0
  17. camel/datahubs/__init__.py +23 -0
  18. camel/datahubs/base.py +136 -0
  19. camel/datahubs/huggingface.py +433 -0
  20. camel/datahubs/models.py +22 -0
  21. camel/embeddings/vlm_embedding.py +4 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker_interpreter.py +7 -2
  24. camel/interpreters/e2b_interpreter.py +136 -0
  25. camel/interpreters/subprocess_interpreter.py +7 -2
  26. camel/loaders/__init__.py +3 -1
  27. camel/loaders/base_io.py +41 -41
  28. camel/loaders/firecrawl_reader.py +0 -3
  29. camel/logger.py +112 -0
  30. camel/messages/__init__.py +3 -1
  31. camel/messages/base.py +10 -7
  32. camel/messages/conversion/__init__.py +3 -1
  33. camel/messages/conversion/alpaca.py +122 -0
  34. camel/models/__init__.py +7 -0
  35. camel/models/anthropic_model.py +14 -4
  36. camel/models/base_model.py +28 -0
  37. camel/models/groq_model.py +1 -1
  38. camel/models/model_factory.py +6 -0
  39. camel/models/model_manager.py +212 -0
  40. camel/models/nvidia_model.py +141 -0
  41. camel/models/ollama_model.py +12 -0
  42. camel/models/openai_model.py +0 -25
  43. camel/models/reward/__init__.py +22 -0
  44. camel/models/reward/base_reward_model.py +58 -0
  45. camel/models/reward/evaluator.py +63 -0
  46. camel/models/reward/nemotron_model.py +112 -0
  47. camel/models/sglang_model.py +225 -0
  48. camel/models/vllm_model.py +1 -1
  49. camel/personas/persona_hub.py +2 -2
  50. camel/retrievers/vector_retriever.py +22 -5
  51. camel/schemas/openai_converter.py +2 -2
  52. camel/societies/babyagi_playing.py +4 -1
  53. camel/societies/workforce/role_playing_worker.py +2 -2
  54. camel/societies/workforce/single_agent_worker.py +2 -2
  55. camel/societies/workforce/workforce.py +3 -3
  56. camel/storages/object_storages/amazon_s3.py +2 -2
  57. camel/storages/object_storages/azure_blob.py +2 -2
  58. camel/storages/object_storages/google_cloud.py +2 -2
  59. camel/toolkits/__init__.py +5 -0
  60. camel/toolkits/code_execution.py +42 -4
  61. camel/toolkits/function_tool.py +41 -0
  62. camel/toolkits/human_toolkit.py +1 -0
  63. camel/toolkits/math_toolkit.py +47 -16
  64. camel/toolkits/meshy_toolkit.py +185 -0
  65. camel/toolkits/search_toolkit.py +154 -2
  66. camel/toolkits/stripe_toolkit.py +273 -0
  67. camel/toolkits/twitter_toolkit.py +3 -0
  68. camel/types/__init__.py +2 -0
  69. camel/types/enums.py +68 -10
  70. camel/utils/commons.py +22 -5
  71. camel/utils/token_counting.py +26 -11
  72. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/METADATA +13 -6
  73. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/RECORD +76 -51
  74. /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
  75. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/LICENSE +0 -0
  76. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,112 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI
18
+
19
+ from camel.models.reward import BaseRewardModel
20
+ from camel.types import ChatCompletion, ModelType
21
+ from camel.utils import api_keys_required
22
+
23
+
24
+ class NemotronRewardModel(BaseRewardModel):
25
+ r"""Reward model based on the Nemotron model with OpenAI compatibility.
26
+
27
+ Args:
28
+ model_type (Union[ModelType, str]): Model for which a backend is
29
+ created.
30
+ api_key (Optional[str], optional): The API key for authenticating
31
+ with the model service. (default: :obj:`None`)
32
+ url (Optional[str], optional): The url to the model service.
33
+
34
+ Note:
35
+ The Nemotron model does not support model config.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ model_type: Union[ModelType, str],
41
+ api_key: Optional[str] = None,
42
+ url: Optional[str] = None,
43
+ ) -> None:
44
+ url = url or os.environ.get(
45
+ "NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
46
+ )
47
+ api_key = api_key or os.environ.get("NVIDIA_API_KEY")
48
+ super().__init__(model_type, api_key, url)
49
+ self._client = OpenAI(
50
+ timeout=60,
51
+ max_retries=3,
52
+ base_url=self.url,
53
+ api_key=self.api_key,
54
+ )
55
+
56
+ @api_keys_required("NVIDIA_API_KEY")
57
+ def evaluate(self, messages: List[Dict[str, str]]) -> Dict[str, float]:
58
+ r"""Evaluate the messages using the Nemotron model.
59
+
60
+ Args:
61
+ messages (List[Dict[str, str]]): A list of messages where each
62
+ message is a dictionary format.
63
+
64
+ Returns:
65
+ Dict[str, float]: A dictionary mapping score types to their
66
+ values.
67
+ """
68
+ response = self._client.chat.completions.create(
69
+ messages=messages, # type: ignore[arg-type]
70
+ model=self.model_type,
71
+ )
72
+ scores = self._parse_scores(response)
73
+ return scores
74
+
75
+ def get_scores_types(self) -> List[str]:
76
+ r"""Get the list of score types that the reward model can return.
77
+
78
+ Returns:
79
+ List[str]: A list of score types that the reward model can return.
80
+ """
81
+ return [
82
+ "helpfulness",
83
+ "correctness",
84
+ "coherence",
85
+ "complexity",
86
+ "verbosity",
87
+ ]
88
+
89
+ def _parse_scores(self, response: ChatCompletion) -> Dict[str, float]:
90
+ r"""Parse the scores from the response.
91
+
92
+ Args:
93
+ response (ChatCompletion): A ChatCompletion object with the scores.
94
+
95
+ Returns:
96
+ Dict[str, float]: A dictionary mapping score types to their values.
97
+ """
98
+ try:
99
+ choices = response.choices
100
+ logprobs = (
101
+ choices[0].logprobs.content
102
+ if choices and choices[0].logprobs
103
+ else None
104
+ )
105
+ scores = (
106
+ {entry.token: entry.logprob for entry in logprobs if entry}
107
+ if logprobs
108
+ else {}
109
+ )
110
+ return scores
111
+ except Exception as e:
112
+ raise ValueError(f"Failed to parse scores: {e}")
@@ -0,0 +1,225 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import logging
15
+ import threading
16
+ import time
17
+ from typing import Any, Dict, List, Optional, Union
18
+
19
+ from openai import OpenAI, Stream
20
+
21
+ from camel.configs import SGLANG_API_PARAMS, SGLangConfig
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models import BaseModelBackend
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter
30
+
31
+
32
+ class SGLangModel(BaseModelBackend):
33
+ r"""SGLang service interface.
34
+
35
+ Args:
36
+ model_type (Union[ModelType, str]): Model for which a backend is
37
+ created.
38
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
39
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
40
+ :obj:`None`, :obj:`SGLangConfig().as_dict()` will be used.
41
+ (default: :obj:`None`)
42
+ api_key (Optional[str], optional): The API key for authenticating with
43
+ the model service. SGLang doesn't need API key, it would be ignored
44
+ if set. (default: :obj:`None`)
45
+ url (Optional[str], optional): The url to the model service. If not
46
+ provided, :obj:`"http://127.0.0.1:30000/v1"` will be used.
47
+ (default: :obj:`None`)
48
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
49
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
50
+ ModelType.GPT_4O_MINI)` will be used.
51
+ (default: :obj:`None`)
52
+
53
+ Reference: https://sgl-project.github.io/backend/openai_api_completions.html
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ ) -> None:
64
+ if model_config_dict is None:
65
+ model_config_dict = SGLangConfig().as_dict()
66
+
67
+ self.server_process = None
68
+ self.last_run_time: Optional[float] = (
69
+ None # Will be set when the server starts
70
+ )
71
+ self._lock = threading.Lock()
72
+ self._inactivity_thread: Optional[threading.Thread] = None
73
+
74
+ super().__init__(
75
+ model_type, model_config_dict, api_key, url, token_counter
76
+ )
77
+
78
+ self._client = None
79
+
80
+ if self._url:
81
+ # Initialize the client if an existing URL is provided
82
+ self._client = OpenAI(
83
+ timeout=60,
84
+ max_retries=3,
85
+ api_key="Set-but-ignored", # required but ignored
86
+ base_url=self._url,
87
+ )
88
+
89
+ def _start_server(self) -> None:
90
+ from sglang.utils import ( # type: ignore[import-untyped]
91
+ execute_shell_command,
92
+ wait_for_server,
93
+ )
94
+
95
+ try:
96
+ if not self._url:
97
+ cmd = (
98
+ f"python -m sglang.launch_server "
99
+ f"--model-path {self.model_type} "
100
+ f"--port 30000 "
101
+ f"--host 0.0.0.0"
102
+ )
103
+
104
+ server_process = execute_shell_command(cmd)
105
+ wait_for_server("http://localhost:30000")
106
+ self._url = "http://127.0.0.1:30000/v1"
107
+ self.server_process = server_process
108
+ # Start the inactivity monitor in a background thread
109
+ self._inactivity_thread = threading.Thread(
110
+ target=self._monitor_inactivity, daemon=True
111
+ )
112
+ self._inactivity_thread.start()
113
+ self.last_run_time = time.time()
114
+ # Initialize the client after the server starts
115
+ self._client = OpenAI(
116
+ timeout=60,
117
+ max_retries=3,
118
+ api_key="Set-but-ignored", # required but ignored
119
+ base_url=self._url,
120
+ )
121
+ except Exception as e:
122
+ raise RuntimeError(f"Failed to start SGLang server: {e}") from e
123
+
124
+ def _ensure_server_running(self) -> None:
125
+ r"""Ensures that the server is running. If not, starts the server."""
126
+ with self._lock:
127
+ if self.server_process is None:
128
+ self._start_server()
129
+
130
+ def _monitor_inactivity(self):
131
+ r"""Monitor whether the server process has been inactive for over 10
132
+ minutes.
133
+ """
134
+ from sglang.utils import terminate_process
135
+
136
+ while True:
137
+ # Check every 10 seconds
138
+ time.sleep(10)
139
+ # Over 10 minutes
140
+ with self._lock:
141
+ # Over 10 minutes
142
+ if self.last_run_time and (
143
+ time.time() - self.last_run_time > 600
144
+ ):
145
+ if self.server_process:
146
+ terminate_process(self.server_process)
147
+ self.server_process = None
148
+ self._client = None # Invalidate the client
149
+ logging.info(
150
+ "Server process terminated due to inactivity."
151
+ )
152
+ break
153
+
154
+ @property
155
+ def token_counter(self) -> BaseTokenCounter:
156
+ r"""Initialize the token counter for the model backend.
157
+
158
+ Returns:
159
+ BaseTokenCounter: The token counter following the model's
160
+ tokenization style.
161
+ """
162
+ if not self._token_counter:
163
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
164
+ return self._token_counter
165
+
166
+ def check_model_config(self):
167
+ r"""Check whether the model configuration contains any
168
+ unexpected arguments to SGLang API.
169
+
170
+ Raises:
171
+ ValueError: If the model configuration dictionary contains any
172
+ unexpected arguments to OpenAI API.
173
+ """
174
+ for param in self.model_config_dict:
175
+ if param not in SGLANG_API_PARAMS:
176
+ raise ValueError(
177
+ f"Unexpected argument `{param}` is "
178
+ "input into SGLang model backend."
179
+ )
180
+
181
+ def run(
182
+ self,
183
+ messages: List[OpenAIMessage],
184
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
+ r"""Runs inference of OpenAI chat completion.
186
+
187
+ Args:
188
+ messages (List[OpenAIMessage]): Message list with the chat history
189
+ in OpenAI API format.
190
+
191
+ Returns:
192
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
193
+ `ChatCompletion` in the non-stream mode, or
194
+ `Stream[ChatCompletionChunk]` in the stream mode.
195
+ """
196
+
197
+ # Ensure server is running
198
+ self._ensure_server_running()
199
+
200
+ with self._lock:
201
+ # Update last run time
202
+ self.last_run_time = time.time()
203
+
204
+ if self._client is None:
205
+ raise RuntimeError(
206
+ "Client is not initialized. Ensure the server is running."
207
+ )
208
+
209
+ response = self._client.chat.completions.create(
210
+ messages=messages,
211
+ model=self.model_type,
212
+ **self.model_config_dict,
213
+ )
214
+
215
+ return response
216
+
217
+ @property
218
+ def stream(self) -> bool:
219
+ r"""Returns whether the model is in stream mode, which sends partial
220
+ results each time.
221
+
222
+ Returns:
223
+ bool: Whether the model is in stream mode.
224
+ """
225
+ return self.model_config_dict.get('stream', False)
@@ -74,7 +74,7 @@ class VLLMModel(BaseModelBackend):
74
74
  self._client = OpenAI(
75
75
  timeout=60,
76
76
  max_retries=3,
77
- api_key="Set-but-ignored", # required but ignored
77
+ api_key="EMPTY", # required but ignored
78
78
  base_url=self._url,
79
79
  )
80
80
 
@@ -11,7 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- import ast
14
+ import json
15
15
  import re
16
16
  import uuid
17
17
  from functools import lru_cache
@@ -130,7 +130,7 @@ class PersonaHub:
130
130
  text_to_persona_prompt_instruction,
131
131
  response_format=PersonaResponse, # type: ignore[arg-type]
132
132
  )
133
- parsed_content = ast.literal_eval(response.msg.content)
133
+ parsed_content = json.loads(response.msg.content)
134
134
  persona.name = parsed_content["persona_name"]
135
135
  persona.description = parsed_content["persona_description"]
136
136
  except Exception as e:
@@ -77,6 +77,7 @@ class VectorRetriever(BaseRetriever):
77
77
  embed_batch: int = 50,
78
78
  should_chunk: bool = True,
79
79
  extra_info: Optional[dict] = None,
80
+ metadata_filename: Optional[str] = None,
80
81
  **kwargs: Any,
81
82
  ) -> None:
82
83
  r"""Processes content from local file path, remote URL, string
@@ -96,6 +97,8 @@ class VectorRetriever(BaseRetriever):
96
97
  otherwise skip chunking. Defaults to True.
97
98
  extra_info (Optional[dict]): Extra information to be added
98
99
  to the payload. Defaults to None.
100
+ metadata_filename (Optional[str]): The metadata filename to be
101
+ used for storing metadata. Defaults to None.
99
102
  **kwargs (Any): Additional keyword arguments for content parsing.
100
103
  """
101
104
  from unstructured.documents.elements import Element
@@ -103,18 +106,32 @@ class VectorRetriever(BaseRetriever):
103
106
  if isinstance(content, Element):
104
107
  elements = [content]
105
108
  elif isinstance(content, IOBase):
106
- elements = self.uio.parse_bytes(file=content, **kwargs) or []
109
+ elements = (
110
+ self.uio.parse_bytes(
111
+ file=content, metadata_filename=metadata_filename, **kwargs
112
+ )
113
+ or []
114
+ )
107
115
  elif isinstance(content, str):
108
116
  # Check if the content is URL
109
117
  parsed_url = urlparse(content)
110
118
  is_url = all([parsed_url.scheme, parsed_url.netloc])
111
119
  if is_url or os.path.exists(content):
112
120
  elements = (
113
- self.uio.parse_file_or_url(input_path=content, **kwargs)
121
+ self.uio.parse_file_or_url(
122
+ input_path=content,
123
+ metadata_filename=metadata_filename,
124
+ **kwargs,
125
+ )
114
126
  or []
115
127
  )
116
128
  else:
117
- elements = [self.uio.create_element_from_text(text=content)]
129
+ elements = [
130
+ self.uio.create_element_from_text(
131
+ text=content,
132
+ filename=metadata_filename,
133
+ )
134
+ ]
118
135
 
119
136
  if not elements:
120
137
  warnings.warn(
@@ -156,13 +173,12 @@ class VectorRetriever(BaseRetriever):
156
173
  chunk_metadata = {"metadata": chunk.metadata.to_dict()}
157
174
  # Remove the 'orig_elements' key if it exists
158
175
  chunk_metadata["metadata"].pop("orig_elements", "")
159
- extra_info = extra_info or {}
176
+ chunk_metadata["extra_info"] = extra_info or {}
160
177
  chunk_text = {"text": str(chunk)}
161
178
  combined_dict = {
162
179
  **content_path_info,
163
180
  **chunk_metadata,
164
181
  **chunk_text,
165
- **extra_info,
166
182
  }
167
183
 
168
184
  records.append(
@@ -233,6 +249,7 @@ class VectorRetriever(BaseRetriever):
233
249
  'content path', ''
234
250
  ),
235
251
  'metadata': result.record.payload.get('metadata', {}),
252
+ 'extra_info': result.record.payload.get('extra_info', {}),
236
253
  'text': result.record.payload.get('text', ''),
237
254
  }
238
255
  formatted_results.append(result_dict)
@@ -28,8 +28,8 @@ from camel.utils import (
28
28
  from .base import BaseConverter
29
29
 
30
30
  DEFAULT_CONVERTER_PROMPTS = """
31
- Extract key entities and attributes from the provided text,
32
- and convert them into a structured JSON format.
31
+ Extract key entities and attributes from the user
32
+ provided text, and convert them into a structured JSON format.
33
33
  """
34
34
 
35
35
 
@@ -22,10 +22,13 @@ from camel.agents import (
22
22
  )
23
23
  from camel.agents.chat_agent import ChatAgentResponse
24
24
  from camel.generators import SystemMessageGenerator
25
+ from camel.logger import get_logger
25
26
  from camel.messages import BaseMessage
26
27
  from camel.prompts import TextPrompt
27
28
  from camel.types import RoleType, TaskType
28
29
 
30
+ logger = get_logger(__name__)
31
+
29
32
 
30
33
  class BabyAGI:
31
34
  r"""The BabyAGI Agent adapted from `"Task-driven Autonomous Agent"
@@ -261,7 +264,7 @@ class BabyAGI:
261
264
  )
262
265
  self.subtasks = deque(prioritized_subtask_list)
263
266
  else:
264
- print("no new tasks")
267
+ logger.info("no new tasks")
265
268
  assistant_response.info['task_name'] = task_name
266
269
  assistant_response.info['subtasks'] = list(self.subtasks)
267
270
  if not self.subtasks:
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
16
+ import json
17
17
  from typing import Dict, List, Optional
18
18
 
19
19
  from colorama import Fore
@@ -173,7 +173,7 @@ class RolePlayingWorker(Worker):
173
173
  content=prompt,
174
174
  )
175
175
  response = self.summarize_agent.step(req, response_format=TaskResult)
176
- result_dict = ast.literal_eval(response.msg.content)
176
+ result_dict = json.loads(response.msg.content)
177
177
  task_result = TaskResult(**result_dict)
178
178
  task.result = task_result.content
179
179
 
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
16
+ import json
17
17
  from typing import Any, List
18
18
 
19
19
  from colorama import Fore
@@ -87,7 +87,7 @@ class SingleAgentWorker(Worker):
87
87
 
88
88
  print(f"======\n{Fore.GREEN}Reply from {self}:{Fore.RESET}")
89
89
 
90
- result_dict = ast.literal_eval(response.msg.content)
90
+ result_dict = json.loads(response.msg.content)
91
91
  task_result = TaskResult(**result_dict)
92
92
 
93
93
  color = Fore.RED if task_result.failed else Fore.GREEN
@@ -13,8 +13,8 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
17
16
  import asyncio
17
+ import json
18
18
  import logging
19
19
  from collections import deque
20
20
  from typing import Deque, Dict, List, Optional
@@ -289,7 +289,7 @@ class Workforce(BaseNode):
289
289
  response = self.coordinator_agent.step(
290
290
  req, response_format=TaskAssignResult
291
291
  )
292
- result_dict = ast.literal_eval(response.msg.content)
292
+ result_dict = json.loads(response.msg.content)
293
293
  task_assign_result = TaskAssignResult(**result_dict)
294
294
  return task_assign_result.assignee_id
295
295
 
@@ -320,7 +320,7 @@ class Workforce(BaseNode):
320
320
  content=prompt,
321
321
  )
322
322
  response = self.coordinator_agent.step(req, response_format=WorkerConf)
323
- result_dict = ast.literal_eval(response.msg.content)
323
+ result_dict = json.loads(response.msg.content)
324
324
  new_node_conf = WorkerConf(**result_dict)
325
325
 
326
326
  new_agent = self._create_new_agent(
@@ -17,7 +17,7 @@ from pathlib import Path, PurePath
17
17
  from typing import Optional, Tuple
18
18
  from warnings import warn
19
19
 
20
- from camel.loaders import File
20
+ from camel.loaders import File, create_file_from_raw_bytes
21
21
  from camel.storages.object_storages.base import BaseObjectStorage
22
22
 
23
23
 
@@ -156,7 +156,7 @@ class AmazonS3Storage(BaseObjectStorage):
156
156
  Bucket=self._bucket_name, Key=file_key
157
157
  )
158
158
  raw_bytes = response["Body"].read()
159
- return File.create_file_from_raw_bytes(raw_bytes, filename)
159
+ return create_file_from_raw_bytes(raw_bytes, filename)
160
160
 
161
161
  def _upload_file(
162
162
  self, local_file_path: Path, remote_file_key: str
@@ -16,7 +16,7 @@ from pathlib import Path, PurePath
16
16
  from typing import Optional, Tuple
17
17
  from warnings import warn
18
18
 
19
- from camel.loaders import File
19
+ from camel.loaders import File, create_file_from_raw_bytes
20
20
  from camel.storages.object_storages.base import BaseObjectStorage
21
21
 
22
22
 
@@ -123,7 +123,7 @@ class AzureBlobStorage(BaseObjectStorage):
123
123
  File: The object from the container.
124
124
  """
125
125
  raw_bytes = self._client.download_blob(file_key).readall()
126
- file = File.create_file_from_raw_bytes(raw_bytes, filename)
126
+ file = create_file_from_raw_bytes(raw_bytes, filename)
127
127
  return file
128
128
 
129
129
  def _upload_file(
@@ -15,7 +15,7 @@ from pathlib import Path, PurePath
15
15
  from typing import Tuple
16
16
  from warnings import warn
17
17
 
18
- from camel.loaders import File
18
+ from camel.loaders import File, create_file_from_raw_bytes
19
19
  from camel.storages.object_storages.base import BaseObjectStorage
20
20
 
21
21
 
@@ -111,7 +111,7 @@ class GoogleCloudStorage(BaseObjectStorage):
111
111
  File: The object from the S3 bucket.
112
112
  """
113
113
  raw_bytes = self._client.get_blob(file_key).download_as_bytes()
114
- return File.create_file_from_raw_bytes(raw_bytes, filename)
114
+ return create_file_from_raw_bytes(raw_bytes, filename)
115
115
 
116
116
  def _upload_file(
117
117
  self, local_file_path: Path, remote_file_key: str
@@ -27,6 +27,8 @@ from .dalle_toolkit import DalleToolkit
27
27
  from .ask_news_toolkit import AskNewsToolkit, AsyncAskNewsToolkit
28
28
  from .linkedin_toolkit import LinkedInToolkit
29
29
  from .reddit_toolkit import RedditToolkit
30
+ from .meshy_toolkit import MeshyToolkit
31
+
30
32
  from .base import BaseToolkit
31
33
  from .google_maps_toolkit import GoogleMapsToolkit
32
34
  from .code_execution import CodeExecutionToolkit
@@ -39,6 +41,7 @@ from .open_api_toolkit import OpenAPIToolkit
39
41
  from .retrieval_toolkit import RetrievalToolkit
40
42
  from .notion_toolkit import NotionToolkit
41
43
  from .human_toolkit import HumanToolkit
44
+ from .stripe_toolkit import StripeToolkit
42
45
  from .video_toolkit import VideoDownloaderToolkit
43
46
 
44
47
  __all__ = [
@@ -68,4 +71,6 @@ __all__ = [
68
71
  'ArxivToolkit',
69
72
  'HumanToolkit',
70
73
  'VideoDownloaderToolkit',
74
+ 'StripeToolkit',
75
+ 'MeshyToolkit',
71
76
  ]