camel-ai 0.2.10__py3-none-any.whl → 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (36) hide show
  1. camel/__init__.py +6 -1
  2. camel/agents/chat_agent.py +74 -5
  3. camel/agents/deductive_reasoner_agent.py +4 -1
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/mistral_config.py +0 -3
  6. camel/configs/nvidia_config.py +70 -0
  7. camel/configs/vllm_config.py +10 -1
  8. camel/embeddings/vlm_embedding.py +4 -1
  9. camel/interpreters/docker_interpreter.py +7 -2
  10. camel/interpreters/subprocess_interpreter.py +7 -2
  11. camel/loaders/firecrawl_reader.py +0 -3
  12. camel/logger.py +112 -0
  13. camel/messages/__init__.py +1 -1
  14. camel/messages/base.py +10 -7
  15. camel/messages/conversion/__init__.py +3 -1
  16. camel/messages/conversion/alpaca.py +122 -0
  17. camel/models/__init__.py +5 -0
  18. camel/models/model_factory.py +3 -0
  19. camel/models/model_manager.py +212 -0
  20. camel/models/nvidia_model.py +141 -0
  21. camel/models/openai_model.py +1 -0
  22. camel/retrievers/vector_retriever.py +22 -5
  23. camel/societies/babyagi_playing.py +4 -1
  24. camel/toolkits/__init__.py +3 -0
  25. camel/toolkits/code_execution.py +38 -4
  26. camel/toolkits/human_toolkit.py +1 -0
  27. camel/toolkits/meshy_toolkit.py +185 -0
  28. camel/toolkits/twitter_toolkit.py +3 -0
  29. camel/types/enums.py +41 -8
  30. camel/utils/commons.py +22 -5
  31. camel/utils/token_counting.py +4 -1
  32. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/METADATA +2 -2
  33. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/RECORD +36 -30
  34. /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
  35. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/LICENSE +0 -0
  36. {camel_ai-0.2.10.dist-info → camel_ai-0.2.11.dist-info}/WHEEL +0 -0
@@ -0,0 +1,122 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import re
16
+
17
+ from pydantic import BaseModel, Field, field_validator
18
+
19
+
20
+ class AlpacaItem(BaseModel):
21
+ r"""Represents an instruction-response item in the Alpaca format.
22
+
23
+ Appropripate for both cases where input field is empty, or populated.
24
+ Provides parsing from string format using the class method from_string().
25
+
26
+ Args:
27
+ instruction (str): The instruction/question/prompt
28
+ input (str): Input context or examples (put empty string if none)
29
+ output (str): The response/answer to the instruction
30
+ """
31
+
32
+ instruction: str = Field(description="The instruction/question/prompt")
33
+ input: str = Field(
34
+ description="Optional context or input for the task."
35
+ " For example, when the instruction is \"Summarize the "
36
+ "following article\", the input is the article."
37
+ )
38
+ output: str = Field(description="The response/answer to the instruction")
39
+
40
+ @field_validator('instruction', 'output')
41
+ def no_section_markers(cls, value: str) -> str:
42
+ r"""Ensures fields don't contain section markers like '###
43
+ Response:'
44
+ """
45
+ if (
46
+ '### Response' in value
47
+ or '### Instruction' in value
48
+ or '### Input' in value
49
+ ):
50
+ raise ValueError("Field cannot contain section markers")
51
+ return value.strip()
52
+
53
+ @classmethod
54
+ def from_string(cls, text: str) -> "AlpacaItem":
55
+ r"""Creates an AlpacaItem from a formatted string.
56
+
57
+ Args:
58
+ text: String in either of these formats:
59
+ With input:
60
+ ### Instruction:
61
+ {instruction}
62
+ ### Input:
63
+ {input}
64
+ ### Response:
65
+ {response}
66
+
67
+ Without input:
68
+ ### Instruction:
69
+ {instruction}
70
+ ### Response:
71
+ {response}
72
+
73
+ Returns:
74
+ AlpacaItem: Parsed instance
75
+
76
+ Raises:
77
+ ValueError: text doesn't match expected format or sections missing
78
+ """
79
+ # Strip and standardize newlines
80
+ text = text.strip().replace('\r\n', '\n')
81
+
82
+ # Try to extract sections using regex
83
+ instruction_match = re.search(
84
+ r'###\s*Instruction:\s*\n(.+?)(?=\n###|\Z)', text, re.DOTALL
85
+ )
86
+ input_match = re.search(
87
+ r'###\s*Input:\s*\n(.+?)(?=\n###|\Z)', text, re.DOTALL
88
+ )
89
+ response_match = re.search(
90
+ r'###\s*Response:\s*\n(.+?)(?=\n###|\Z)', text, re.DOTALL
91
+ )
92
+
93
+ if not instruction_match or not response_match:
94
+ raise ValueError(
95
+ "Text must contain '### Instruction:'"
96
+ " and '### Response:' sections"
97
+ )
98
+
99
+ return cls(
100
+ instruction=instruction_match.group(1).strip(),
101
+ input=input_match.group(1).strip() if input_match else "",
102
+ output=response_match.group(1).strip(),
103
+ )
104
+
105
+ def to_string(self) -> str:
106
+ r"""Converts the AlpacaItem to its string representation.
107
+
108
+ Returns:
109
+ str: Formatted string representation with sections markers
110
+ """
111
+ return "\n".join(
112
+ [
113
+ "### Instruction:",
114
+ self.instruction,
115
+ "",
116
+ "### Input:",
117
+ self.input,
118
+ "",
119
+ "### Response:",
120
+ self.output,
121
+ ]
122
+ )
camel/models/__init__.py CHANGED
@@ -21,7 +21,9 @@ from .groq_model import GroqModel
21
21
  from .litellm_model import LiteLLMModel
22
22
  from .mistral_model import MistralModel
23
23
  from .model_factory import ModelFactory
24
+ from .model_manager import ModelManager, ModelProcessingError
24
25
  from .nemotron_model import NemotronModel
26
+ from .nvidia_model import NvidiaModel
25
27
  from .ollama_model import OllamaModel
26
28
  from .openai_audio_models import OpenAIAudioModels
27
29
  from .openai_compatible_model import OpenAICompatibleModel
@@ -46,9 +48,11 @@ __all__ = [
46
48
  'ZhipuAIModel',
47
49
  'CohereModel',
48
50
  'ModelFactory',
51
+ 'ModelManager',
49
52
  'LiteLLMModel',
50
53
  'OpenAIAudioModels',
51
54
  'NemotronModel',
55
+ 'NvidiaModel',
52
56
  'OllamaModel',
53
57
  'VLLMModel',
54
58
  'GeminiModel',
@@ -58,5 +62,6 @@ __all__ = [
58
62
  'TogetherAIModel',
59
63
  'YiModel',
60
64
  'QwenModel',
65
+ 'ModelProcessingError',
61
66
  'DeepSeekModel',
62
67
  ]
@@ -22,6 +22,7 @@ from camel.models.gemini_model import GeminiModel
22
22
  from camel.models.groq_model import GroqModel
23
23
  from camel.models.litellm_model import LiteLLMModel
24
24
  from camel.models.mistral_model import MistralModel
25
+ from camel.models.nvidia_model import NvidiaModel
25
26
  from camel.models.ollama_model import OllamaModel
26
27
  from camel.models.openai_compatible_model import OpenAICompatibleModel
27
28
  from camel.models.openai_model import OpenAIModel
@@ -93,6 +94,8 @@ class ModelFactory:
93
94
  model_class = TogetherAIModel
94
95
  elif model_platform.is_litellm:
95
96
  model_class = LiteLLMModel
97
+ elif model_platform.is_nvidia:
98
+ model_class = NvidiaModel
96
99
 
97
100
  elif model_platform.is_openai and model_type.is_openai:
98
101
  model_class = OpenAIModel
@@ -0,0 +1,212 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import logging
16
+ from itertools import cycle
17
+ from random import choice
18
+ from typing import (
19
+ Any,
20
+ Callable,
21
+ Dict,
22
+ List,
23
+ Union,
24
+ )
25
+
26
+ from openai import Stream
27
+
28
+ from camel.messages import OpenAIMessage
29
+ from camel.models.base_model import BaseModelBackend
30
+ from camel.types import (
31
+ ChatCompletion,
32
+ ChatCompletionChunk,
33
+ UnifiedModelType,
34
+ )
35
+ from camel.utils import BaseTokenCounter
36
+
37
+ logger = logging.getLogger(__name__)
38
+
39
+
40
+ class ModelProcessingError(Exception):
41
+ r"""Raised when an error occurs during model processing."""
42
+
43
+ pass
44
+
45
+
46
+ class ModelManager:
47
+ r"""ModelManager choosing a model from provided list.
48
+ Models are picked according to defined strategy.
49
+
50
+ Args:
51
+ models(Union[BaseModelBackend, List[BaseModelBackend]]):
52
+ model backend or list of model backends
53
+ (e.g., model instances, APIs)
54
+ scheduling_strategy (str): name of function that defines how
55
+ to select the next model. (default: :str:`round_robin`)
56
+ """
57
+
58
+ def __init__(
59
+ self,
60
+ models: Union[BaseModelBackend, List[BaseModelBackend]],
61
+ scheduling_strategy: str = "round_robin",
62
+ ):
63
+ if isinstance(models, list):
64
+ self.models = models
65
+ else:
66
+ self.models = [models]
67
+ self.models_cycle = cycle(self.models)
68
+ self.current_model = self.models[0]
69
+
70
+ # Set the scheduling strategy; default is round-robin
71
+ try:
72
+ self.scheduling_strategy = getattr(self, scheduling_strategy)
73
+ except AttributeError:
74
+ logger.warning(
75
+ f"Provided strategy: {scheduling_strategy} is not implemented."
76
+ f"Using default 'round robin'"
77
+ )
78
+ self.scheduling_strategy = self.round_robin
79
+
80
+ @property
81
+ def model_type(self) -> UnifiedModelType:
82
+ r"""Return type of the current model.
83
+
84
+ Returns:
85
+ Union[ModelType, str]: Current model type.
86
+ """
87
+ return self.current_model.model_type
88
+
89
+ @property
90
+ def model_config_dict(self) -> Dict[str, Any]:
91
+ r"""Return model_config_dict of the current model.
92
+
93
+ Returns:
94
+ Dict[str, Any]: Config dictionary of the current model.
95
+ """
96
+ return self.current_model.model_config_dict
97
+
98
+ @model_config_dict.setter
99
+ def model_config_dict(self, model_config_dict: Dict[str, Any]):
100
+ r"""Set model_config_dict to the current model.
101
+
102
+ Args:
103
+ model_config_dict (Dict[str, Any]): Config dictionary to be set at
104
+ current model.
105
+ """
106
+ self.current_model.model_config_dict = model_config_dict
107
+
108
+ @property
109
+ def current_model_index(self) -> int:
110
+ r"""Return the index of current model in self.models list.
111
+
112
+ Returns:
113
+ int: index of current model in given list of models.
114
+ """
115
+ return self.models.index(self.current_model)
116
+
117
+ @property
118
+ def token_limit(self):
119
+ r"""Returns the maximum token limit for current model.
120
+
121
+ This method retrieves the maximum token limit either from the
122
+ `model_config_dict` or from the model's default token limit.
123
+
124
+ Returns:
125
+ int: The maximum token limit for the given model.
126
+ """
127
+ return self.current_model.token_limit
128
+
129
+ @property
130
+ def token_counter(self) -> BaseTokenCounter:
131
+ r"""Return token_counter of the current model.
132
+
133
+ Returns:
134
+ BaseTokenCounter: The token counter following the model's
135
+ tokenization style.
136
+ """
137
+ return self.current_model.token_counter
138
+
139
+ def add_strategy(self, name: str, strategy_fn: Callable):
140
+ r"""Add a scheduling strategy method provided by user in case when none
141
+ of existent strategies fits.
142
+ When custom strategy is provided, it will be set as
143
+ "self.scheduling_strategy" attribute.
144
+
145
+ Args:
146
+ name (str): The name of the strategy.
147
+ strategy_fn (Callable): The scheduling strategy function.
148
+ """
149
+ if not callable(strategy_fn):
150
+ raise ValueError("strategy_fn must be a callable function.")
151
+ setattr(self, name, strategy_fn.__get__(self))
152
+ self.scheduling_strategy = getattr(self, name)
153
+ logger.info(f"Custom strategy '{name}' added.")
154
+
155
+ # Strategies
156
+ def round_robin(self) -> BaseModelBackend:
157
+ r"""Return models one by one in simple round-robin fashion.
158
+
159
+ Returns:
160
+ BaseModelBackend for processing incoming messages.
161
+ """
162
+ return next(self.models_cycle)
163
+
164
+ def always_first(self) -> BaseModelBackend:
165
+ r"""Always return the first model from self.models.
166
+
167
+ Returns:
168
+ BaseModelBackend for processing incoming messages.
169
+ """
170
+ return self.models[0]
171
+
172
+ def random_model(self) -> BaseModelBackend:
173
+ r"""Return random model from self.models list.
174
+
175
+ Returns:
176
+ BaseModelBackend for processing incoming messages.
177
+ """
178
+ return choice(self.models)
179
+
180
+ def run(
181
+ self, messages: List[OpenAIMessage]
182
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
183
+ r"""Process a list of messages by selecting a model based on
184
+ the scheduling strategy.
185
+ Sends the entire list of messages to the selected model,
186
+ and returns a single response.
187
+
188
+ Args:
189
+ messages (List[OpenAIMessage]): Message list with the chat
190
+ history in OpenAI API format.
191
+
192
+ Returns:
193
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
194
+ `ChatCompletion` in the non-stream mode, or
195
+ `Stream[ChatCompletionChunk]` in the stream mode.
196
+ """
197
+ self.current_model = self.scheduling_strategy()
198
+
199
+ # Pass all messages to the selected model and get the response
200
+ try:
201
+ response = self.current_model.run(messages)
202
+ except Exception as exc:
203
+ logger.error(f"Error processing with model: {self.current_model}")
204
+ if self.scheduling_strategy == self.always_first:
205
+ self.scheduling_strategy = self.round_robin
206
+ logger.warning(
207
+ "The scheduling strategy has been changed to 'round_robin'"
208
+ )
209
+ # Skip already used one
210
+ self.current_model = self.scheduling_strategy()
211
+ raise exc
212
+ return response
@@ -0,0 +1,141 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, List, Optional, Union
17
+
18
+ from openai import OpenAI, Stream
19
+ from openai.types.chat import (
20
+ ChatCompletion,
21
+ ChatCompletionChunk,
22
+ )
23
+
24
+ from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig
25
+ from camel.messages import OpenAIMessage
26
+ from camel.models import BaseModelBackend
27
+ from camel.types import ModelType
28
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
29
+
30
+
31
+ class NvidiaModel(BaseModelBackend):
32
+ r"""NVIDIA API in a unified BaseModelBackend interface.
33
+
34
+ Args:
35
+ model_type (Union[ModelType, str]): Model for which a backend is
36
+ created, one of NVIDIA series.
37
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
38
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
39
+ :obj:`None`, :obj:`NvidiaConfig().as_dict()` will be used.
40
+ (default: :obj:`None`)
41
+ api_key (Optional[str], optional): The API key for authenticating with
42
+ the NVIDIA service. (default: :obj:`None`)
43
+ url (Optional[str], optional): The url to the NVIDIA service.
44
+ (default: :obj:`None`)
45
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
46
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
47
+ ModelType.GPT_4)` will be used.
48
+ (default: :obj:`None`)
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ model_type: Union[ModelType, str],
54
+ model_config_dict: Optional[Dict[str, Any]] = None,
55
+ api_key: Optional[str] = None,
56
+ url: Optional[str] = None,
57
+ token_counter: Optional[BaseTokenCounter] = None,
58
+ ) -> None:
59
+ if model_config_dict is None:
60
+ model_config_dict = NvidiaConfig().as_dict()
61
+ api_key = api_key or os.environ.get("NVIDIA_API_KEY")
62
+ url = url or os.environ.get(
63
+ "NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
64
+ )
65
+ super().__init__(
66
+ model_type, model_config_dict, api_key, url, token_counter
67
+ )
68
+ self._client = OpenAI(
69
+ timeout=60,
70
+ max_retries=3,
71
+ api_key=self._api_key,
72
+ base_url=self._url,
73
+ )
74
+
75
+ @api_keys_required("NVIDIA_API_KEY")
76
+ def run(
77
+ self,
78
+ messages: List[OpenAIMessage],
79
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
80
+ r"""Runs inference of NVIDIA chat completion.
81
+
82
+ Args:
83
+ messages (List[OpenAIMessage]): Message list with the chat history
84
+ in OpenAI API format.
85
+
86
+ Returns:
87
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
88
+ `ChatCompletion` in the non-stream mode, or
89
+ `Stream[ChatCompletionChunk]` in the stream mode.
90
+ """
91
+
92
+ # Remove tool-related parameters if no tools are specified
93
+ config = dict(self.model_config_dict)
94
+ if not config.get('tools'): # None or empty list
95
+ config.pop('tools', None)
96
+ config.pop('tool_choice', None)
97
+
98
+ response = self._client.chat.completions.create(
99
+ messages=messages,
100
+ model=self.model_type,
101
+ **config,
102
+ )
103
+ return response
104
+
105
+ @property
106
+ def token_counter(self) -> BaseTokenCounter:
107
+ r"""Initialize the token counter for the model backend.
108
+
109
+ Returns:
110
+ OpenAITokenCounter: The token counter following the model's
111
+ tokenization style.
112
+ """
113
+
114
+ if not self._token_counter:
115
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
116
+ return self._token_counter
117
+
118
+ def check_model_config(self):
119
+ r"""Check whether the model configuration contains any
120
+ unexpected arguments to NVIDIA API.
121
+
122
+ Raises:
123
+ ValueError: If the model configuration dictionary contains any
124
+ unexpected arguments to NVIDIA API.
125
+ """
126
+ for param in self.model_config_dict:
127
+ if param not in NVIDIA_API_PARAMS:
128
+ raise ValueError(
129
+ f"Unexpected argument `{param}` is "
130
+ "input into NVIDIA model backend."
131
+ )
132
+
133
+ @property
134
+ def stream(self) -> bool:
135
+ r"""Returns whether the model is in stream mode, which sends partial
136
+ results each time.
137
+
138
+ Returns:
139
+ bool: Whether the model is in stream mode.
140
+ """
141
+ return self.model_config_dict.get('stream', False)
@@ -158,6 +158,7 @@ class OpenAIModel(BaseModelBackend):
158
158
  "role": response.choices[0].message.role,
159
159
  "content": response.choices[0].message.content,
160
160
  "tool_calls": response.choices[0].message.tool_calls,
161
+ "parsed": response.choices[0].message.parsed,
161
162
  },
162
163
  finish_reason=response.choices[0].finish_reason,
163
164
  )
@@ -77,6 +77,7 @@ class VectorRetriever(BaseRetriever):
77
77
  embed_batch: int = 50,
78
78
  should_chunk: bool = True,
79
79
  extra_info: Optional[dict] = None,
80
+ metadata_filename: Optional[str] = None,
80
81
  **kwargs: Any,
81
82
  ) -> None:
82
83
  r"""Processes content from local file path, remote URL, string
@@ -96,6 +97,8 @@ class VectorRetriever(BaseRetriever):
96
97
  otherwise skip chunking. Defaults to True.
97
98
  extra_info (Optional[dict]): Extra information to be added
98
99
  to the payload. Defaults to None.
100
+ metadata_filename (Optional[str]): The metadata filename to be
101
+ used for storing metadata. Defaults to None.
99
102
  **kwargs (Any): Additional keyword arguments for content parsing.
100
103
  """
101
104
  from unstructured.documents.elements import Element
@@ -103,18 +106,32 @@ class VectorRetriever(BaseRetriever):
103
106
  if isinstance(content, Element):
104
107
  elements = [content]
105
108
  elif isinstance(content, IOBase):
106
- elements = self.uio.parse_bytes(file=content, **kwargs) or []
109
+ elements = (
110
+ self.uio.parse_bytes(
111
+ file=content, metadata_filename=metadata_filename, **kwargs
112
+ )
113
+ or []
114
+ )
107
115
  elif isinstance(content, str):
108
116
  # Check if the content is URL
109
117
  parsed_url = urlparse(content)
110
118
  is_url = all([parsed_url.scheme, parsed_url.netloc])
111
119
  if is_url or os.path.exists(content):
112
120
  elements = (
113
- self.uio.parse_file_or_url(input_path=content, **kwargs)
121
+ self.uio.parse_file_or_url(
122
+ input_path=content,
123
+ metadata_filename=metadata_filename,
124
+ **kwargs,
125
+ )
114
126
  or []
115
127
  )
116
128
  else:
117
- elements = [self.uio.create_element_from_text(text=content)]
129
+ elements = [
130
+ self.uio.create_element_from_text(
131
+ text=content,
132
+ filename=metadata_filename,
133
+ )
134
+ ]
118
135
 
119
136
  if not elements:
120
137
  warnings.warn(
@@ -156,13 +173,12 @@ class VectorRetriever(BaseRetriever):
156
173
  chunk_metadata = {"metadata": chunk.metadata.to_dict()}
157
174
  # Remove the 'orig_elements' key if it exists
158
175
  chunk_metadata["metadata"].pop("orig_elements", "")
159
- extra_info = extra_info or {}
176
+ chunk_metadata["extra_info"] = extra_info or {}
160
177
  chunk_text = {"text": str(chunk)}
161
178
  combined_dict = {
162
179
  **content_path_info,
163
180
  **chunk_metadata,
164
181
  **chunk_text,
165
- **extra_info,
166
182
  }
167
183
 
168
184
  records.append(
@@ -233,6 +249,7 @@ class VectorRetriever(BaseRetriever):
233
249
  'content path', ''
234
250
  ),
235
251
  'metadata': result.record.payload.get('metadata', {}),
252
+ 'extra_info': result.record.payload.get('extra_info', {}),
236
253
  'text': result.record.payload.get('text', ''),
237
254
  }
238
255
  formatted_results.append(result_dict)
@@ -22,10 +22,13 @@ from camel.agents import (
22
22
  )
23
23
  from camel.agents.chat_agent import ChatAgentResponse
24
24
  from camel.generators import SystemMessageGenerator
25
+ from camel.logger import get_logger
25
26
  from camel.messages import BaseMessage
26
27
  from camel.prompts import TextPrompt
27
28
  from camel.types import RoleType, TaskType
28
29
 
30
+ logger = get_logger(__name__)
31
+
29
32
 
30
33
  class BabyAGI:
31
34
  r"""The BabyAGI Agent adapted from `"Task-driven Autonomous Agent"
@@ -261,7 +264,7 @@ class BabyAGI:
261
264
  )
262
265
  self.subtasks = deque(prioritized_subtask_list)
263
266
  else:
264
- print("no new tasks")
267
+ logger.info("no new tasks")
265
268
  assistant_response.info['task_name'] = task_name
266
269
  assistant_response.info['subtasks'] = list(self.subtasks)
267
270
  if not self.subtasks:
@@ -27,6 +27,8 @@ from .dalle_toolkit import DalleToolkit
27
27
  from .ask_news_toolkit import AskNewsToolkit, AsyncAskNewsToolkit
28
28
  from .linkedin_toolkit import LinkedInToolkit
29
29
  from .reddit_toolkit import RedditToolkit
30
+ from .meshy_toolkit import MeshyToolkit
31
+
30
32
  from .base import BaseToolkit
31
33
  from .google_maps_toolkit import GoogleMapsToolkit
32
34
  from .code_execution import CodeExecutionToolkit
@@ -68,4 +70,5 @@ __all__ = [
68
70
  'ArxivToolkit',
69
71
  'HumanToolkit',
70
72
  'VideoDownloaderToolkit',
73
+ 'MeshyToolkit',
71
74
  ]