camel-ai 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +6 -1
- camel/agents/chat_agent.py +87 -6
- camel/agents/deductive_reasoner_agent.py +4 -1
- camel/benchmarks/__init__.py +18 -0
- camel/benchmarks/base.py +152 -0
- camel/benchmarks/gaia.py +478 -0
- camel/configs/__init__.py +6 -0
- camel/configs/mistral_config.py +0 -3
- camel/configs/nvidia_config.py +70 -0
- camel/configs/ollama_config.py +4 -2
- camel/configs/sglang_config.py +71 -0
- camel/configs/vllm_config.py +10 -1
- camel/data_collector/__init__.py +19 -0
- camel/data_collector/alpaca_collector.py +127 -0
- camel/data_collector/base.py +211 -0
- camel/data_collector/sharegpt_collector.py +205 -0
- camel/datahubs/__init__.py +23 -0
- camel/datahubs/base.py +136 -0
- camel/datahubs/huggingface.py +433 -0
- camel/datahubs/models.py +22 -0
- camel/embeddings/vlm_embedding.py +4 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker_interpreter.py +7 -2
- camel/interpreters/e2b_interpreter.py +136 -0
- camel/interpreters/subprocess_interpreter.py +7 -2
- camel/loaders/__init__.py +3 -1
- camel/loaders/base_io.py +41 -41
- camel/loaders/firecrawl_reader.py +0 -3
- camel/logger.py +112 -0
- camel/messages/__init__.py +3 -1
- camel/messages/base.py +10 -7
- camel/messages/conversion/__init__.py +3 -1
- camel/messages/conversion/alpaca.py +122 -0
- camel/models/__init__.py +7 -0
- camel/models/anthropic_model.py +14 -4
- camel/models/base_model.py +28 -0
- camel/models/groq_model.py +1 -1
- camel/models/model_factory.py +6 -0
- camel/models/model_manager.py +212 -0
- camel/models/nvidia_model.py +141 -0
- camel/models/ollama_model.py +12 -0
- camel/models/openai_model.py +0 -25
- camel/models/reward/__init__.py +22 -0
- camel/models/reward/base_reward_model.py +58 -0
- camel/models/reward/evaluator.py +63 -0
- camel/models/reward/nemotron_model.py +112 -0
- camel/models/sglang_model.py +225 -0
- camel/models/vllm_model.py +1 -1
- camel/personas/persona_hub.py +2 -2
- camel/retrievers/vector_retriever.py +22 -5
- camel/schemas/openai_converter.py +2 -2
- camel/societies/babyagi_playing.py +4 -1
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +2 -2
- camel/societies/workforce/workforce.py +3 -3
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/google_cloud.py +2 -2
- camel/toolkits/__init__.py +5 -0
- camel/toolkits/code_execution.py +42 -4
- camel/toolkits/function_tool.py +41 -0
- camel/toolkits/human_toolkit.py +1 -0
- camel/toolkits/math_toolkit.py +47 -16
- camel/toolkits/meshy_toolkit.py +185 -0
- camel/toolkits/search_toolkit.py +154 -2
- camel/toolkits/stripe_toolkit.py +273 -0
- camel/toolkits/twitter_toolkit.py +3 -0
- camel/types/__init__.py +2 -0
- camel/types/enums.py +68 -10
- camel/utils/commons.py +22 -5
- camel/utils/token_counting.py +26 -11
- {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/METADATA +13 -6
- {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/RECORD +76 -51
- /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
- {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, ClassVar, Dict, List, Optional
|
|
16
|
+
|
|
17
|
+
from camel.interpreters.base import BaseInterpreter
|
|
18
|
+
from camel.interpreters.interpreter_error import InterpreterError
|
|
19
|
+
from camel.logger import get_logger
|
|
20
|
+
from camel.utils import api_keys_required
|
|
21
|
+
|
|
22
|
+
logger = get_logger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class E2BInterpreter(BaseInterpreter):
|
|
26
|
+
r"""E2B Code Interpreter implementation.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
require_confirm (bool, optional): If True, prompt user before running
|
|
30
|
+
code strings for security. (default: :obj:`True`)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
_CODE_TYPE_MAPPING: ClassVar[Dict[str, Optional[str]]] = {
|
|
34
|
+
"python": None,
|
|
35
|
+
"py3": None,
|
|
36
|
+
"python3": None,
|
|
37
|
+
"py": None,
|
|
38
|
+
"shell": "bash",
|
|
39
|
+
"bash": "bash",
|
|
40
|
+
"sh": "bash",
|
|
41
|
+
"java": "java",
|
|
42
|
+
"javascript": "js",
|
|
43
|
+
"r": "r",
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
@api_keys_required("E2B_API_KEY")
|
|
47
|
+
def __init__(
|
|
48
|
+
self,
|
|
49
|
+
require_confirm: bool = True,
|
|
50
|
+
) -> None:
|
|
51
|
+
from e2b_code_interpreter import Sandbox
|
|
52
|
+
|
|
53
|
+
self.require_confirm = require_confirm
|
|
54
|
+
self._sandbox = Sandbox(api_key=os.environ.get("E2B_API_KEY"))
|
|
55
|
+
|
|
56
|
+
def __del__(self) -> None:
|
|
57
|
+
r"""Destructor for the E2BInterpreter class.
|
|
58
|
+
|
|
59
|
+
This method ensures that the e2b sandbox is killed when the
|
|
60
|
+
interpreter is deleted.
|
|
61
|
+
"""
|
|
62
|
+
if (
|
|
63
|
+
hasattr(self, '_sandbox')
|
|
64
|
+
and self._sandbox is not None
|
|
65
|
+
and self._sandbox.is_running()
|
|
66
|
+
):
|
|
67
|
+
self._sandbox.kill()
|
|
68
|
+
|
|
69
|
+
def run(
|
|
70
|
+
self,
|
|
71
|
+
code: str,
|
|
72
|
+
code_type: str,
|
|
73
|
+
) -> str:
|
|
74
|
+
r"""Executes the given code in the e2b sandbox.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
code (str): The code string to execute.
|
|
78
|
+
code_type (str): The type of code to execute (e.g., 'python',
|
|
79
|
+
'bash').
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
str: The string representation of the output of the executed code.
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
InterpreterError: If the `code_type` is not supported or if any
|
|
86
|
+
runtime error occurs during the execution of the code.
|
|
87
|
+
"""
|
|
88
|
+
if code_type not in self._CODE_TYPE_MAPPING:
|
|
89
|
+
raise InterpreterError(
|
|
90
|
+
f"Unsupported code type {code_type}. "
|
|
91
|
+
f"`{self.__class__.__name__}` only supports "
|
|
92
|
+
f"{', '.join(list(self._CODE_TYPE_MAPPING.keys()))}."
|
|
93
|
+
)
|
|
94
|
+
# Print code for security checking
|
|
95
|
+
if self.require_confirm:
|
|
96
|
+
logger.info(
|
|
97
|
+
f"The following {code_type} code will run on your "
|
|
98
|
+
"e2b sandbox: {code}"
|
|
99
|
+
)
|
|
100
|
+
while True:
|
|
101
|
+
choice = input("Running code? [Y/n]:").lower()
|
|
102
|
+
if choice in ["y", "yes", "ye"]:
|
|
103
|
+
break
|
|
104
|
+
elif choice not in ["no", "n"]:
|
|
105
|
+
continue
|
|
106
|
+
raise InterpreterError(
|
|
107
|
+
"Execution halted: User opted not to run the code. "
|
|
108
|
+
"This choice stops the current operation and any "
|
|
109
|
+
"further code execution."
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
if self._CODE_TYPE_MAPPING[code_type] is None:
|
|
113
|
+
execution = self._sandbox.run_code(code)
|
|
114
|
+
else:
|
|
115
|
+
execution = self._sandbox.run_code(
|
|
116
|
+
code=code, language=self._CODE_TYPE_MAPPING[code_type]
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
if execution.text and execution.text.lower() != "none":
|
|
120
|
+
return execution.text
|
|
121
|
+
|
|
122
|
+
if execution.logs:
|
|
123
|
+
if execution.logs.stdout:
|
|
124
|
+
return ",".join(execution.logs.stdout)
|
|
125
|
+
elif execution.logs.stderr:
|
|
126
|
+
return ",".join(execution.logs.stderr)
|
|
127
|
+
|
|
128
|
+
return str(execution.error)
|
|
129
|
+
|
|
130
|
+
def supported_code_types(self) -> List[str]:
|
|
131
|
+
r"""Provides supported code types by the interpreter."""
|
|
132
|
+
return list(self._CODE_TYPE_MAPPING.keys())
|
|
133
|
+
|
|
134
|
+
def update_action_space(self, action_space: Dict[str, Any]) -> None:
|
|
135
|
+
r"""Updates action space for *python* interpreter"""
|
|
136
|
+
raise RuntimeError("E2B doesn't support " "`action_space`.")
|
|
@@ -22,6 +22,9 @@ from colorama import Fore
|
|
|
22
22
|
|
|
23
23
|
from camel.interpreters.base import BaseInterpreter
|
|
24
24
|
from camel.interpreters.interpreter_error import InterpreterError
|
|
25
|
+
from camel.logger import get_logger
|
|
26
|
+
|
|
27
|
+
logger = get_logger(__name__)
|
|
25
28
|
|
|
26
29
|
|
|
27
30
|
class SubprocessInterpreter(BaseInterpreter):
|
|
@@ -141,8 +144,10 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
141
144
|
|
|
142
145
|
# Print code for security checking
|
|
143
146
|
if self.require_confirm:
|
|
144
|
-
|
|
145
|
-
|
|
147
|
+
logger.info(
|
|
148
|
+
f"The following {code_type} code will run on your "
|
|
149
|
+
"computer: {code}"
|
|
150
|
+
)
|
|
146
151
|
while True:
|
|
147
152
|
choice = input("Running code? [Y/n]:").lower()
|
|
148
153
|
if choice in ["y", "yes", "ye", ""]:
|
camel/loaders/__init__.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
from .apify_reader import Apify
|
|
16
|
-
from .base_io import File
|
|
16
|
+
from .base_io import File, create_file, create_file_from_raw_bytes
|
|
17
17
|
from .chunkr_reader import ChunkrReader
|
|
18
18
|
from .firecrawl_reader import Firecrawl
|
|
19
19
|
from .jina_url_reader import JinaURLReader
|
|
@@ -21,6 +21,8 @@ from .unstructured_io import UnstructuredIO
|
|
|
21
21
|
|
|
22
22
|
__all__ = [
|
|
23
23
|
'File',
|
|
24
|
+
'create_file',
|
|
25
|
+
'create_file_from_raw_bytes',
|
|
24
26
|
'UnstructuredIO',
|
|
25
27
|
'JinaURLReader',
|
|
26
28
|
'Firecrawl',
|
camel/loaders/base_io.py
CHANGED
|
@@ -22,6 +22,47 @@ from typing import Any, Dict, List, Optional
|
|
|
22
22
|
from camel.utils import dependencies_required
|
|
23
23
|
|
|
24
24
|
|
|
25
|
+
def create_file(file: BytesIO, filename: str) -> "File":
|
|
26
|
+
r"""Reads an uploaded file and returns a File object.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
file (BytesIO): A BytesIO object representing the contents of the
|
|
30
|
+
file.
|
|
31
|
+
filename (str): The name of the file.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
File: A File object.
|
|
35
|
+
"""
|
|
36
|
+
ext_to_cls = {
|
|
37
|
+
"docx": DocxFile,
|
|
38
|
+
"pdf": PdfFile,
|
|
39
|
+
"txt": TxtFile,
|
|
40
|
+
"json": JsonFile,
|
|
41
|
+
"html": HtmlFile,
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
ext = filename.split(".")[-1].lower()
|
|
45
|
+
if ext not in ext_to_cls:
|
|
46
|
+
raise NotImplementedError(f"File type {ext} not supported")
|
|
47
|
+
|
|
48
|
+
out_file = ext_to_cls[ext].from_bytes(file, filename)
|
|
49
|
+
return out_file
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def create_file_from_raw_bytes(raw_bytes: bytes, filename: str) -> "File":
|
|
53
|
+
r"""Reads raw bytes and returns a File object.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
raw_bytes (bytes): The raw bytes content of the file.
|
|
57
|
+
filename (str): The name of the file.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
File: A File object.
|
|
61
|
+
"""
|
|
62
|
+
file = BytesIO(raw_bytes)
|
|
63
|
+
return create_file(file, filename)
|
|
64
|
+
|
|
65
|
+
|
|
25
66
|
class File(ABC):
|
|
26
67
|
r"""Represents an uploaded file comprised of Documents.
|
|
27
68
|
|
|
@@ -79,47 +120,6 @@ class File(ABC):
|
|
|
79
120
|
file = BytesIO(raw_bytes)
|
|
80
121
|
return cls.from_bytes(file, filename)
|
|
81
122
|
|
|
82
|
-
@staticmethod
|
|
83
|
-
def create_file(file: BytesIO, filename: str) -> "File":
|
|
84
|
-
r"""Reads an uploaded file and returns a File object.
|
|
85
|
-
|
|
86
|
-
Args:
|
|
87
|
-
file (BytesIO): A BytesIO object representing the contents of the
|
|
88
|
-
file.
|
|
89
|
-
filename (str): The name of the file.
|
|
90
|
-
|
|
91
|
-
Returns:
|
|
92
|
-
File: A File object.
|
|
93
|
-
"""
|
|
94
|
-
ext_to_cls = {
|
|
95
|
-
"docx": DocxFile,
|
|
96
|
-
"pdf": PdfFile,
|
|
97
|
-
"txt": TxtFile,
|
|
98
|
-
"json": JsonFile,
|
|
99
|
-
"html": HtmlFile,
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
ext = filename.split(".")[-1].lower()
|
|
103
|
-
if ext not in ext_to_cls:
|
|
104
|
-
raise NotImplementedError(f"File type {ext} not supported")
|
|
105
|
-
|
|
106
|
-
out_file = ext_to_cls[ext].from_bytes(file, filename)
|
|
107
|
-
return out_file
|
|
108
|
-
|
|
109
|
-
@staticmethod
|
|
110
|
-
def create_file_from_raw_bytes(raw_bytes: bytes, filename: str) -> "File":
|
|
111
|
-
r"""Reads raw bytes and returns a File object.
|
|
112
|
-
|
|
113
|
-
Args:
|
|
114
|
-
raw_bytes (bytes): The raw bytes content of the file.
|
|
115
|
-
filename (str): The name of the file.
|
|
116
|
-
|
|
117
|
-
Returns:
|
|
118
|
-
File: A File object.
|
|
119
|
-
"""
|
|
120
|
-
file = BytesIO(raw_bytes)
|
|
121
|
-
return File.create_file(file, filename)
|
|
122
|
-
|
|
123
123
|
def __repr__(self) -> str:
|
|
124
124
|
return (
|
|
125
125
|
f"File(name={self.name}, id={self.file_id}, "
|
|
@@ -17,8 +17,6 @@ from typing import Any, Dict, Optional
|
|
|
17
17
|
|
|
18
18
|
from pydantic import BaseModel
|
|
19
19
|
|
|
20
|
-
from camel.utils import api_keys_required
|
|
21
|
-
|
|
22
20
|
|
|
23
21
|
class Firecrawl:
|
|
24
22
|
r"""Firecrawl allows you to turn entire websites into LLM-ready markdown.
|
|
@@ -32,7 +30,6 @@ class Firecrawl:
|
|
|
32
30
|
https://docs.firecrawl.dev/introduction
|
|
33
31
|
"""
|
|
34
32
|
|
|
35
|
-
@api_keys_required("FIRECRAWL_API_KEY")
|
|
36
33
|
def __init__(
|
|
37
34
|
self,
|
|
38
35
|
api_key: Optional[str] = None,
|
camel/logger.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import os
|
|
17
|
+
import sys
|
|
18
|
+
|
|
19
|
+
# Create a private logger
|
|
20
|
+
_logger = logging.getLogger('camel')
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _configure_library_logging():
|
|
24
|
+
if os.environ.get('CAMEL_LOGGING_DISABLED', 'False').lower() == 'true':
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
if not logging.root.handlers and not _logger.handlers:
|
|
28
|
+
logging.basicConfig(
|
|
29
|
+
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
|
|
30
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
31
|
+
stream=sys.stdout,
|
|
32
|
+
)
|
|
33
|
+
logging.setLoggerClass(logging.Logger)
|
|
34
|
+
_logger.info("Camel library logging has been configured.")
|
|
35
|
+
else:
|
|
36
|
+
_logger.debug("Existing logger configuration found, using that.")
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def disable_logging():
|
|
40
|
+
r"""Disable all logging for the Camel library.
|
|
41
|
+
|
|
42
|
+
This function sets the log level to a value higher than CRITICAL,
|
|
43
|
+
effectively disabling all log messages, and adds a NullHandler to
|
|
44
|
+
suppress any potential warnings about no handlers being found.
|
|
45
|
+
"""
|
|
46
|
+
os.environ['CAMEL_LOGGING_DISABLED'] = 'true'
|
|
47
|
+
_logger.setLevel(logging.CRITICAL + 1)
|
|
48
|
+
# Avoid adding multiple NullHandlers
|
|
49
|
+
if not any(
|
|
50
|
+
isinstance(handler, logging.NullHandler)
|
|
51
|
+
for handler in _logger.handlers
|
|
52
|
+
):
|
|
53
|
+
_logger.addHandler(logging.NullHandler())
|
|
54
|
+
_logger.debug("Logging has been disabled.")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def enable_logging():
|
|
58
|
+
r"""Enable logging for the Camel library.
|
|
59
|
+
|
|
60
|
+
This function re-enables logging if it was previously disabled,
|
|
61
|
+
and configures the library logging using the default settings.
|
|
62
|
+
If the logging is already configured,
|
|
63
|
+
this function does not change its configuration.
|
|
64
|
+
"""
|
|
65
|
+
os.environ['CAMEL_LOGGING_DISABLED'] = 'false'
|
|
66
|
+
_configure_library_logging()
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def set_log_level(level):
|
|
70
|
+
r"""Set the logging level for the Camel library.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
level (Union[str, int]): The logging level to set. This can be a string
|
|
74
|
+
(e.g., 'INFO') or a logging level constant (e.g., logging.INFO,
|
|
75
|
+
logging.DEBUG).
|
|
76
|
+
See https://docs.python.org/3/library/logging.html#levels
|
|
77
|
+
|
|
78
|
+
Raises:
|
|
79
|
+
ValueError: If the provided level is not a valid logging level.
|
|
80
|
+
"""
|
|
81
|
+
valid_levels = ['NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
|
|
82
|
+
if isinstance(level, str):
|
|
83
|
+
if level.upper() not in valid_levels:
|
|
84
|
+
raise ValueError(
|
|
85
|
+
f"Invalid logging level."
|
|
86
|
+
f" Choose from: {', '.join(valid_levels)}"
|
|
87
|
+
)
|
|
88
|
+
level = level.upper()
|
|
89
|
+
elif not isinstance(level, int):
|
|
90
|
+
raise ValueError(
|
|
91
|
+
"Logging level must be an option from the logging module."
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
_logger.setLevel(level)
|
|
95
|
+
_logger.debug(f"Logging level set to: {logging.getLevelName(level)}")
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def get_logger(name):
|
|
99
|
+
r"""Get a logger with the specified name, prefixed with 'camel.'.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
name (str): The name to be appended to 'camel.' to create the logger.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
logging.Logger: A logger instance with the name 'camel.{name}'.
|
|
106
|
+
"""
|
|
107
|
+
return logging.getLogger(f'camel.{name}')
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
# Lazy configuration: Only configure logging if explicitly enabled.
|
|
111
|
+
if os.environ.get('CAMEL_LOGGING_DISABLED', 'False').strip().lower() != 'true':
|
|
112
|
+
_configure_library_logging()
|
camel/messages/__init__.py
CHANGED
|
@@ -20,10 +20,11 @@ from camel.types import (
|
|
|
20
20
|
)
|
|
21
21
|
|
|
22
22
|
from .conversion import (
|
|
23
|
+
AlpacaItem,
|
|
23
24
|
HermesFunctionFormatter,
|
|
24
25
|
ShareGPTMessage,
|
|
25
26
|
)
|
|
26
|
-
from .conversion.
|
|
27
|
+
from .conversion.conversation_models import (
|
|
27
28
|
ShareGPTConversation,
|
|
28
29
|
)
|
|
29
30
|
from .conversion.sharegpt.function_call_formatter import (
|
|
@@ -52,4 +53,5 @@ __all__ = [
|
|
|
52
53
|
'ShareGPTMessage',
|
|
53
54
|
'BaseMessage',
|
|
54
55
|
'FunctionCallingMessage',
|
|
56
|
+
'AlpacaItem',
|
|
55
57
|
]
|
camel/messages/base.py
CHANGED
|
@@ -15,10 +15,11 @@ import base64
|
|
|
15
15
|
import io
|
|
16
16
|
import re
|
|
17
17
|
from dataclasses import dataclass
|
|
18
|
-
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
|
18
|
+
from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union
|
|
19
19
|
|
|
20
20
|
import numpy as np
|
|
21
21
|
from PIL import Image
|
|
22
|
+
from pydantic import BaseModel
|
|
22
23
|
|
|
23
24
|
from camel.messages import (
|
|
24
25
|
FunctionCallFormatter,
|
|
@@ -51,24 +52,27 @@ class BaseMessage:
|
|
|
51
52
|
for the message.
|
|
52
53
|
content (str): The content of the message.
|
|
53
54
|
video_bytes (Optional[bytes]): Optional bytes of a video associated
|
|
54
|
-
with the message.
|
|
55
|
+
with the message. (default::obj:`None`)
|
|
55
56
|
image_list (Optional[List[Image.Image]]): Optional list of PIL Image
|
|
56
|
-
objects associated with the message.
|
|
57
|
+
objects associated with the message. (default::obj:`None`)
|
|
57
58
|
image_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
58
|
-
images associated with the message.
|
|
59
|
+
images associated with the message. (default::obj:`auto`)
|
|
59
60
|
video_detail (Literal["auto", "low", "high"]): Detail level of the
|
|
60
|
-
videos associated with the message.
|
|
61
|
+
videos associated with the message. (default::obj:`low`)
|
|
62
|
+
parsed: Optional[Union[Type[BaseModel], dict]]: Optional object which
|
|
63
|
+
is parsed from the content. (default::obj:`None`)
|
|
61
64
|
"""
|
|
62
65
|
|
|
63
66
|
role_name: str
|
|
64
67
|
role_type: RoleType
|
|
65
|
-
meta_dict: Optional[Dict[str,
|
|
68
|
+
meta_dict: Optional[Dict[str, Any]]
|
|
66
69
|
content: str
|
|
67
70
|
|
|
68
71
|
video_bytes: Optional[bytes] = None
|
|
69
72
|
image_list: Optional[List[Image.Image]] = None
|
|
70
73
|
image_detail: Literal["auto", "low", "high"] = "auto"
|
|
71
74
|
video_detail: Literal["auto", "low", "high"] = "low"
|
|
75
|
+
parsed: Optional[Union[Type[BaseModel], dict]] = None
|
|
72
76
|
|
|
73
77
|
@classmethod
|
|
74
78
|
def make_user_message(
|
|
@@ -419,7 +423,6 @@ class BaseMessage:
|
|
|
419
423
|
"text": self.content,
|
|
420
424
|
}
|
|
421
425
|
)
|
|
422
|
-
|
|
423
426
|
if self.image_list and len(self.image_list) > 0:
|
|
424
427
|
for image in self.image_list:
|
|
425
428
|
if image.format is None:
|
|
@@ -12,7 +12,8 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
|
-
from .
|
|
15
|
+
from .alpaca import AlpacaItem
|
|
16
|
+
from .conversation_models import (
|
|
16
17
|
ShareGPTConversation,
|
|
17
18
|
ShareGPTMessage,
|
|
18
19
|
ToolCall,
|
|
@@ -24,6 +25,7 @@ __all__ = [
|
|
|
24
25
|
'ShareGPTMessage',
|
|
25
26
|
'ShareGPTConversation',
|
|
26
27
|
'HermesFunctionFormatter',
|
|
28
|
+
'AlpacaItem',
|
|
27
29
|
'ToolCall',
|
|
28
30
|
'ToolResponse',
|
|
29
31
|
]
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import re
|
|
16
|
+
|
|
17
|
+
from pydantic import BaseModel, Field, field_validator
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AlpacaItem(BaseModel):
|
|
21
|
+
r"""Represents an instruction-response item in the Alpaca format.
|
|
22
|
+
|
|
23
|
+
Appropripate for both cases where input field is empty, or populated.
|
|
24
|
+
Provides parsing from string format using the class method from_string().
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
instruction (str): The instruction/question/prompt
|
|
28
|
+
input (str): Input context or examples (put empty string if none)
|
|
29
|
+
output (str): The response/answer to the instruction
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
instruction: str = Field(description="The instruction/question/prompt")
|
|
33
|
+
input: str = Field(
|
|
34
|
+
description="Optional context or input for the task."
|
|
35
|
+
" For example, when the instruction is \"Summarize the "
|
|
36
|
+
"following article\", the input is the article."
|
|
37
|
+
)
|
|
38
|
+
output: str = Field(description="The response/answer to the instruction")
|
|
39
|
+
|
|
40
|
+
@field_validator('instruction', 'output')
|
|
41
|
+
def no_section_markers(cls, value: str) -> str:
|
|
42
|
+
r"""Ensures fields don't contain section markers like '###
|
|
43
|
+
Response:'
|
|
44
|
+
"""
|
|
45
|
+
if (
|
|
46
|
+
'### Response' in value
|
|
47
|
+
or '### Instruction' in value
|
|
48
|
+
or '### Input' in value
|
|
49
|
+
):
|
|
50
|
+
raise ValueError("Field cannot contain section markers")
|
|
51
|
+
return value.strip()
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def from_string(cls, text: str) -> "AlpacaItem":
|
|
55
|
+
r"""Creates an AlpacaItem from a formatted string.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
text: String in either of these formats:
|
|
59
|
+
With input:
|
|
60
|
+
### Instruction:
|
|
61
|
+
{instruction}
|
|
62
|
+
### Input:
|
|
63
|
+
{input}
|
|
64
|
+
### Response:
|
|
65
|
+
{response}
|
|
66
|
+
|
|
67
|
+
Without input:
|
|
68
|
+
### Instruction:
|
|
69
|
+
{instruction}
|
|
70
|
+
### Response:
|
|
71
|
+
{response}
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
AlpacaItem: Parsed instance
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
ValueError: text doesn't match expected format or sections missing
|
|
78
|
+
"""
|
|
79
|
+
# Strip and standardize newlines
|
|
80
|
+
text = text.strip().replace('\r\n', '\n')
|
|
81
|
+
|
|
82
|
+
# Try to extract sections using regex
|
|
83
|
+
instruction_match = re.search(
|
|
84
|
+
r'###\s*Instruction:\s*\n(.+?)(?=\n###|\Z)', text, re.DOTALL
|
|
85
|
+
)
|
|
86
|
+
input_match = re.search(
|
|
87
|
+
r'###\s*Input:\s*\n(.+?)(?=\n###|\Z)', text, re.DOTALL
|
|
88
|
+
)
|
|
89
|
+
response_match = re.search(
|
|
90
|
+
r'###\s*Response:\s*\n(.+?)(?=\n###|\Z)', text, re.DOTALL
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
if not instruction_match or not response_match:
|
|
94
|
+
raise ValueError(
|
|
95
|
+
"Text must contain '### Instruction:'"
|
|
96
|
+
" and '### Response:' sections"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
return cls(
|
|
100
|
+
instruction=instruction_match.group(1).strip(),
|
|
101
|
+
input=input_match.group(1).strip() if input_match else "",
|
|
102
|
+
output=response_match.group(1).strip(),
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def to_string(self) -> str:
|
|
106
|
+
r"""Converts the AlpacaItem to its string representation.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
str: Formatted string representation with sections markers
|
|
110
|
+
"""
|
|
111
|
+
return "\n".join(
|
|
112
|
+
[
|
|
113
|
+
"### Instruction:",
|
|
114
|
+
self.instruction,
|
|
115
|
+
"",
|
|
116
|
+
"### Input:",
|
|
117
|
+
self.input,
|
|
118
|
+
"",
|
|
119
|
+
"### Response:",
|
|
120
|
+
self.output,
|
|
121
|
+
]
|
|
122
|
+
)
|
camel/models/__init__.py
CHANGED
|
@@ -21,7 +21,9 @@ from .groq_model import GroqModel
|
|
|
21
21
|
from .litellm_model import LiteLLMModel
|
|
22
22
|
from .mistral_model import MistralModel
|
|
23
23
|
from .model_factory import ModelFactory
|
|
24
|
+
from .model_manager import ModelManager, ModelProcessingError
|
|
24
25
|
from .nemotron_model import NemotronModel
|
|
26
|
+
from .nvidia_model import NvidiaModel
|
|
25
27
|
from .ollama_model import OllamaModel
|
|
26
28
|
from .openai_audio_models import OpenAIAudioModels
|
|
27
29
|
from .openai_compatible_model import OpenAICompatibleModel
|
|
@@ -29,6 +31,7 @@ from .openai_model import OpenAIModel
|
|
|
29
31
|
from .qwen_model import QwenModel
|
|
30
32
|
from .reka_model import RekaModel
|
|
31
33
|
from .samba_model import SambaModel
|
|
34
|
+
from .sglang_model import SGLangModel
|
|
32
35
|
from .stub_model import StubModel
|
|
33
36
|
from .togetherai_model import TogetherAIModel
|
|
34
37
|
from .vllm_model import VLLMModel
|
|
@@ -46,11 +49,14 @@ __all__ = [
|
|
|
46
49
|
'ZhipuAIModel',
|
|
47
50
|
'CohereModel',
|
|
48
51
|
'ModelFactory',
|
|
52
|
+
'ModelManager',
|
|
49
53
|
'LiteLLMModel',
|
|
50
54
|
'OpenAIAudioModels',
|
|
51
55
|
'NemotronModel',
|
|
56
|
+
'NvidiaModel',
|
|
52
57
|
'OllamaModel',
|
|
53
58
|
'VLLMModel',
|
|
59
|
+
'SGLangModel',
|
|
54
60
|
'GeminiModel',
|
|
55
61
|
'OpenAICompatibleModel',
|
|
56
62
|
'RekaModel',
|
|
@@ -58,5 +64,6 @@ __all__ = [
|
|
|
58
64
|
'TogetherAIModel',
|
|
59
65
|
'YiModel',
|
|
60
66
|
'QwenModel',
|
|
67
|
+
'ModelProcessingError',
|
|
61
68
|
'DeepSeekModel',
|
|
62
69
|
]
|