camel-ai 0.1.5.4__py3-none-any.whl → 0.1.5.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/knowledge_graph_agent.py +11 -15
- camel/agents/task_agent.py +0 -1
- camel/configs/__init__.py +12 -0
- camel/configs/gemini_config.py +97 -0
- camel/configs/litellm_config.py +8 -18
- camel/configs/ollama_config.py +85 -0
- camel/configs/zhipuai_config.py +78 -0
- camel/embeddings/openai_embedding.py +2 -2
- camel/functions/search_functions.py +5 -14
- camel/functions/slack_functions.py +5 -7
- camel/functions/twitter_function.py +3 -8
- camel/functions/weather_functions.py +3 -8
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker_interpreter.py +235 -0
- camel/loaders/__init__.py +2 -0
- camel/loaders/base_io.py +5 -9
- camel/loaders/jina_url_reader.py +99 -0
- camel/loaders/unstructured_io.py +4 -6
- camel/models/__init__.py +2 -0
- camel/models/anthropic_model.py +6 -4
- camel/models/gemini_model.py +203 -0
- camel/models/litellm_model.py +49 -21
- camel/models/model_factory.py +4 -2
- camel/models/nemotron_model.py +14 -6
- camel/models/ollama_model.py +11 -17
- camel/models/openai_audio_models.py +10 -2
- camel/models/openai_model.py +4 -3
- camel/models/zhipuai_model.py +12 -6
- camel/retrievers/bm25_retriever.py +3 -8
- camel/retrievers/cohere_rerank_retriever.py +3 -5
- camel/storages/__init__.py +2 -0
- camel/storages/graph_storages/neo4j_graph.py +3 -7
- camel/storages/key_value_storages/__init__.py +2 -0
- camel/storages/key_value_storages/redis.py +169 -0
- camel/storages/vectordb_storages/milvus.py +3 -7
- camel/storages/vectordb_storages/qdrant.py +3 -7
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/code_execution.py +69 -0
- camel/toolkits/github_toolkit.py +5 -9
- camel/types/enums.py +53 -1
- camel/utils/__init__.py +4 -2
- camel/utils/async_func.py +42 -0
- camel/utils/commons.py +31 -49
- camel/utils/token_counting.py +74 -1
- {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/METADATA +12 -3
- {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/RECORD +48 -39
- {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/WHEEL +0 -0
|
@@ -23,6 +23,7 @@ from camel.storages.vectordb_storages import (
|
|
|
23
23
|
VectorDBStatus,
|
|
24
24
|
VectorRecord,
|
|
25
25
|
)
|
|
26
|
+
from camel.utils import dependencies_required
|
|
26
27
|
|
|
27
28
|
logger = logging.getLogger(__name__)
|
|
28
29
|
|
|
@@ -52,6 +53,7 @@ class MilvusStorage(BaseVectorStorage):
|
|
|
52
53
|
ImportError: If `pymilvus` package is not installed.
|
|
53
54
|
"""
|
|
54
55
|
|
|
56
|
+
@dependencies_required('pymilvus')
|
|
55
57
|
def __init__(
|
|
56
58
|
self,
|
|
57
59
|
vector_dim: int,
|
|
@@ -59,13 +61,7 @@ class MilvusStorage(BaseVectorStorage):
|
|
|
59
61
|
collection_name: Optional[str] = None,
|
|
60
62
|
**kwargs: Any,
|
|
61
63
|
) -> None:
|
|
62
|
-
|
|
63
|
-
from pymilvus import MilvusClient
|
|
64
|
-
except ImportError as exc:
|
|
65
|
-
raise ImportError(
|
|
66
|
-
"Please install `pymilvus` first. You can install it by "
|
|
67
|
-
"running `pip install pymilvus`."
|
|
68
|
-
) from exc
|
|
64
|
+
from pymilvus import MilvusClient
|
|
69
65
|
|
|
70
66
|
self._client: MilvusClient
|
|
71
67
|
self._create_client(url_and_api_key, **kwargs)
|
|
@@ -23,6 +23,7 @@ from camel.storages.vectordb_storages import (
|
|
|
23
23
|
VectorRecord,
|
|
24
24
|
)
|
|
25
25
|
from camel.types import VectorDistance
|
|
26
|
+
from camel.utils import dependencies_required
|
|
26
27
|
|
|
27
28
|
_qdrant_local_client_map: Dict[str, Tuple[Any, int]] = {}
|
|
28
29
|
|
|
@@ -62,6 +63,7 @@ class QdrantStorage(BaseVectorStorage):
|
|
|
62
63
|
be initialized with an in-memory storage (`":memory:"`).
|
|
63
64
|
"""
|
|
64
65
|
|
|
66
|
+
@dependencies_required('qdrant_client')
|
|
65
67
|
def __init__(
|
|
66
68
|
self,
|
|
67
69
|
vector_dim: int,
|
|
@@ -72,13 +74,7 @@ class QdrantStorage(BaseVectorStorage):
|
|
|
72
74
|
delete_collection_on_del: bool = False,
|
|
73
75
|
**kwargs: Any,
|
|
74
76
|
) -> None:
|
|
75
|
-
|
|
76
|
-
from qdrant_client import QdrantClient
|
|
77
|
-
except ImportError as exc:
|
|
78
|
-
raise ImportError(
|
|
79
|
-
"Please install `qdrant-client` first. You can install it by "
|
|
80
|
-
"running `pip install qdrant-client`."
|
|
81
|
-
) from exc
|
|
77
|
+
from qdrant_client import QdrantClient
|
|
82
78
|
|
|
83
79
|
self._client: QdrantClient
|
|
84
80
|
self._local_path: Optional[str] = None
|
camel/toolkits/__init__.py
CHANGED
|
@@ -13,9 +13,11 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
|
|
15
15
|
from .base import BaseToolkit
|
|
16
|
+
from .code_execution import CodeExecutionToolkit
|
|
16
17
|
from .github_toolkit import GithubToolkit
|
|
17
18
|
|
|
18
19
|
__all__ = [
|
|
19
20
|
'BaseToolkit',
|
|
20
21
|
'GithubToolkit',
|
|
22
|
+
'CodeExecutionToolkit',
|
|
21
23
|
]
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import List, Literal
|
|
15
|
+
|
|
16
|
+
from camel.functions import OpenAIFunction
|
|
17
|
+
from camel.interpreters import InternalPythonInterpreter
|
|
18
|
+
|
|
19
|
+
from .base import BaseToolkit
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class CodeExecutionToolkit(BaseToolkit):
|
|
23
|
+
r"""A tookit for code execution.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
sandbox (str): the environment type used to execute code.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
sandbox: Literal[
|
|
32
|
+
"internal_python", "jupyter", "docker"
|
|
33
|
+
] = "internal_python",
|
|
34
|
+
verbose: bool = False,
|
|
35
|
+
) -> None:
|
|
36
|
+
# TODO: Add support for docker and jupyter.
|
|
37
|
+
self.verbose = verbose
|
|
38
|
+
if sandbox == "internal_python":
|
|
39
|
+
self.interpreter = InternalPythonInterpreter()
|
|
40
|
+
else:
|
|
41
|
+
raise RuntimeError(
|
|
42
|
+
f"The sandbox type `{sandbox}` is not supported."
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
def execute_code(self, code: str) -> str:
|
|
46
|
+
r"""Execute a given code snippet.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
code (str): The input code to the Code Interpreter tool call.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
str: The text output from the Code Interpreter tool call.
|
|
53
|
+
"""
|
|
54
|
+
output = self.interpreter.run(code, "python")
|
|
55
|
+
# ruff: noqa: E501
|
|
56
|
+
content = f"Executed the code below:\n```py\n{code}\n```\n> Executed Results:\n{output}"
|
|
57
|
+
if self.verbose:
|
|
58
|
+
print(content)
|
|
59
|
+
return content
|
|
60
|
+
|
|
61
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
62
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
63
|
+
functions in the toolkit.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
67
|
+
representing the functions in the toolkit.
|
|
68
|
+
"""
|
|
69
|
+
return [OpenAIFunction(self.execute_code)]
|
camel/toolkits/github_toolkit.py
CHANGED
|
@@ -18,8 +18,8 @@ from datetime import datetime, timedelta
|
|
|
18
18
|
from typing import List, Optional
|
|
19
19
|
|
|
20
20
|
from camel.functions import OpenAIFunction
|
|
21
|
-
|
|
22
|
-
from .
|
|
21
|
+
from camel.toolkits.base import BaseToolkit
|
|
22
|
+
from camel.utils import dependencies_required
|
|
23
23
|
|
|
24
24
|
|
|
25
25
|
@dataclass
|
|
@@ -130,6 +130,7 @@ class GithubToolkit(BaseToolkit):
|
|
|
130
130
|
`get_github_access_token` method.
|
|
131
131
|
"""
|
|
132
132
|
|
|
133
|
+
@dependencies_required('github')
|
|
133
134
|
def __init__(
|
|
134
135
|
self, repo_name: str, access_token: Optional[str] = None
|
|
135
136
|
) -> None:
|
|
@@ -144,13 +145,8 @@ class GithubToolkit(BaseToolkit):
|
|
|
144
145
|
if access_token is None:
|
|
145
146
|
access_token = self.get_github_access_token()
|
|
146
147
|
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
except ImportError:
|
|
150
|
-
raise ImportError(
|
|
151
|
-
"Please install `github` first. You can install it by running "
|
|
152
|
-
"`pip install pygithub`."
|
|
153
|
-
)
|
|
148
|
+
from github import Auth, Github
|
|
149
|
+
|
|
154
150
|
self.github = Github(auth=Auth.Token(access_token))
|
|
155
151
|
self.repo = self.github.get_repo(repo_name)
|
|
156
152
|
|
camel/types/enums.py
CHANGED
|
@@ -30,15 +30,19 @@ class ModelType(Enum):
|
|
|
30
30
|
GPT_4_TURBO = "gpt-4-turbo"
|
|
31
31
|
GPT_4O = "gpt-4o"
|
|
32
32
|
GLM_4 = "glm-4"
|
|
33
|
+
GLM_4_OPEN_SOURCE = "glm-4-open-source"
|
|
33
34
|
GLM_4V = 'glm-4v'
|
|
34
35
|
GLM_3_TURBO = "glm-3-turbo"
|
|
35
36
|
|
|
36
37
|
STUB = "stub"
|
|
37
38
|
|
|
38
39
|
LLAMA_2 = "llama-2"
|
|
40
|
+
LLAMA_3 = "llama-3"
|
|
39
41
|
VICUNA = "vicuna"
|
|
40
42
|
VICUNA_16K = "vicuna-16k"
|
|
41
43
|
|
|
44
|
+
QWEN_2 = "qwen-2"
|
|
45
|
+
|
|
42
46
|
# Legacy anthropic models
|
|
43
47
|
# NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
|
|
44
48
|
CLAUDE_2_1 = "claude-2.1"
|
|
@@ -54,6 +58,10 @@ class ModelType(Enum):
|
|
|
54
58
|
# Nvidia models
|
|
55
59
|
NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
|
|
56
60
|
|
|
61
|
+
# Gemini models
|
|
62
|
+
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
|
63
|
+
GEMINI_1_5_PRO = "gemini-1.5-pro"
|
|
64
|
+
|
|
57
65
|
@property
|
|
58
66
|
def value_for_tiktoken(self) -> str:
|
|
59
67
|
return (
|
|
@@ -87,6 +95,9 @@ class ModelType(Enum):
|
|
|
87
95
|
r"""Returns whether this type of models is open-source."""
|
|
88
96
|
return self in {
|
|
89
97
|
ModelType.LLAMA_2,
|
|
98
|
+
ModelType.LLAMA_3,
|
|
99
|
+
ModelType.QWEN_2,
|
|
100
|
+
ModelType.GLM_4_OPEN_SOURCE,
|
|
90
101
|
ModelType.VICUNA,
|
|
91
102
|
ModelType.VICUNA_16K,
|
|
92
103
|
}
|
|
@@ -119,6 +130,10 @@ class ModelType(Enum):
|
|
|
119
130
|
ModelType.NEMOTRON_4_REWARD,
|
|
120
131
|
}
|
|
121
132
|
|
|
133
|
+
@property
|
|
134
|
+
def is_gemini(self) -> bool:
|
|
135
|
+
return self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}
|
|
136
|
+
|
|
122
137
|
@property
|
|
123
138
|
def token_limit(self) -> int:
|
|
124
139
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -135,7 +150,11 @@ class ModelType(Enum):
|
|
|
135
150
|
return 128000
|
|
136
151
|
elif self is ModelType.GPT_4O:
|
|
137
152
|
return 128000
|
|
138
|
-
elif self == ModelType.
|
|
153
|
+
elif self == ModelType.GEMINI_1_5_FLASH:
|
|
154
|
+
return 1048576
|
|
155
|
+
elif self == ModelType.GEMINI_1_5_PRO:
|
|
156
|
+
return 1048576
|
|
157
|
+
elif self == ModelType.GLM_4_OPEN_SOURCE:
|
|
139
158
|
return 8192
|
|
140
159
|
elif self == ModelType.GLM_3_TURBO:
|
|
141
160
|
return 8192
|
|
@@ -145,6 +164,12 @@ class ModelType(Enum):
|
|
|
145
164
|
return 4096
|
|
146
165
|
elif self is ModelType.LLAMA_2:
|
|
147
166
|
return 4096
|
|
167
|
+
elif self is ModelType.LLAMA_3:
|
|
168
|
+
return 8192
|
|
169
|
+
elif self is ModelType.QWEN_2:
|
|
170
|
+
return 128000
|
|
171
|
+
elif self is ModelType.GLM_4:
|
|
172
|
+
return 8192
|
|
148
173
|
elif self is ModelType.VICUNA:
|
|
149
174
|
# reference: https://lmsys.org/blog/2023-03-30-vicuna/
|
|
150
175
|
return 2048
|
|
@@ -184,6 +209,20 @@ class ModelType(Enum):
|
|
|
184
209
|
self.value in model_name.lower()
|
|
185
210
|
or "llama2" in model_name.lower()
|
|
186
211
|
)
|
|
212
|
+
elif self is ModelType.LLAMA_3:
|
|
213
|
+
return (
|
|
214
|
+
self.value in model_name.lower()
|
|
215
|
+
or "llama3" in model_name.lower()
|
|
216
|
+
)
|
|
217
|
+
elif self is ModelType.QWEN_2:
|
|
218
|
+
return (
|
|
219
|
+
self.value in model_name.lower()
|
|
220
|
+
or "qwen2" in model_name.lower()
|
|
221
|
+
)
|
|
222
|
+
elif self is ModelType.GLM_4_OPEN_SOURCE:
|
|
223
|
+
return (
|
|
224
|
+
'glm-4' in model_name.lower() or "glm4" in model_name.lower()
|
|
225
|
+
)
|
|
187
226
|
else:
|
|
188
227
|
return self.value in model_name.lower()
|
|
189
228
|
|
|
@@ -304,6 +343,7 @@ class ModelPlatformType(Enum):
|
|
|
304
343
|
LITELLM = "litellm"
|
|
305
344
|
ZHIPU = "zhipuai"
|
|
306
345
|
DEFAULT = "default"
|
|
346
|
+
GEMINI = "gemini"
|
|
307
347
|
|
|
308
348
|
@property
|
|
309
349
|
def is_openai(self) -> bool:
|
|
@@ -340,6 +380,11 @@ class ModelPlatformType(Enum):
|
|
|
340
380
|
r"""Returns whether this platform is opensource."""
|
|
341
381
|
return self is ModelPlatformType.OPENSOURCE
|
|
342
382
|
|
|
383
|
+
@property
|
|
384
|
+
def is_gemini(self) -> bool:
|
|
385
|
+
r"""Returns whether this platform is Gemini."""
|
|
386
|
+
return self is ModelPlatformType.GEMINI
|
|
387
|
+
|
|
343
388
|
|
|
344
389
|
class AudioModelType(Enum):
|
|
345
390
|
TTS_1 = "tts-1"
|
|
@@ -374,3 +419,10 @@ class VoiceType(Enum):
|
|
|
374
419
|
VoiceType.NOVA,
|
|
375
420
|
VoiceType.SHIMMER,
|
|
376
421
|
}
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
class JinaReturnFormat(Enum):
|
|
425
|
+
DEFAULT = None
|
|
426
|
+
MARKDOWN = "markdown"
|
|
427
|
+
HTML = "html"
|
|
428
|
+
TEXT = "text"
|
camel/utils/__init__.py
CHANGED
|
@@ -23,7 +23,7 @@ from .commons import (
|
|
|
23
23
|
get_prompt_template_key_words,
|
|
24
24
|
get_system_information,
|
|
25
25
|
get_task_list,
|
|
26
|
-
|
|
26
|
+
is_docker_running,
|
|
27
27
|
print_text_animated,
|
|
28
28
|
text_extract_from_web,
|
|
29
29
|
to_pascal,
|
|
@@ -32,6 +32,7 @@ from .constants import Constants
|
|
|
32
32
|
from .token_counting import (
|
|
33
33
|
AnthropicTokenCounter,
|
|
34
34
|
BaseTokenCounter,
|
|
35
|
+
GeminiTokenCounter,
|
|
35
36
|
LiteLLMTokenCounter,
|
|
36
37
|
OpenAITokenCounter,
|
|
37
38
|
OpenSourceTokenCounter,
|
|
@@ -39,7 +40,6 @@ from .token_counting import (
|
|
|
39
40
|
)
|
|
40
41
|
|
|
41
42
|
__all__ = [
|
|
42
|
-
'model_api_key_required',
|
|
43
43
|
'print_text_animated',
|
|
44
44
|
'get_prompt_template_key_words',
|
|
45
45
|
'get_first_int',
|
|
@@ -60,4 +60,6 @@ __all__ = [
|
|
|
60
60
|
'create_chunks',
|
|
61
61
|
'dependencies_required',
|
|
62
62
|
'api_keys_required',
|
|
63
|
+
'is_docker_running',
|
|
64
|
+
'GeminiTokenCounter',
|
|
63
65
|
]
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import asyncio
|
|
15
|
+
from copy import deepcopy
|
|
16
|
+
|
|
17
|
+
from camel.functions.openai_function import OpenAIFunction
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def sync_funcs_to_async(funcs: list[OpenAIFunction]) -> list[OpenAIFunction]:
|
|
21
|
+
r"""Convert a list of Python synchronous functions to Python
|
|
22
|
+
asynchronous functions.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
funcs (list[OpenAIFunction]): List of Python synchronous
|
|
26
|
+
functions in the :obj:`OpenAIFunction` format.
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
list[OpenAIFunction]: List of Python asynchronous functions
|
|
30
|
+
in the :obj:`OpenAIFunction` format.
|
|
31
|
+
"""
|
|
32
|
+
async_funcs = []
|
|
33
|
+
for func in funcs:
|
|
34
|
+
sync_func = func.func
|
|
35
|
+
|
|
36
|
+
def async_callable(*args, **kwargs):
|
|
37
|
+
return asyncio.to_thread(sync_func, *args, **kwargs) # noqa: B023
|
|
38
|
+
|
|
39
|
+
async_funcs.append(
|
|
40
|
+
OpenAIFunction(async_callable, deepcopy(func.openai_tool_schema))
|
|
41
|
+
)
|
|
42
|
+
return async_funcs
|
camel/utils/commons.py
CHANGED
|
@@ -16,6 +16,7 @@ import os
|
|
|
16
16
|
import platform
|
|
17
17
|
import re
|
|
18
18
|
import socket
|
|
19
|
+
import subprocess
|
|
19
20
|
import time
|
|
20
21
|
import zipfile
|
|
21
22
|
from functools import wraps
|
|
@@ -30,48 +31,6 @@ from camel.types import TaskType
|
|
|
30
31
|
F = TypeVar('F', bound=Callable[..., Any])
|
|
31
32
|
|
|
32
33
|
|
|
33
|
-
def model_api_key_required(func: F) -> F:
|
|
34
|
-
r"""Decorator that checks if the API key is available either as an
|
|
35
|
-
environment variable or passed directly for a model.
|
|
36
|
-
|
|
37
|
-
Args:
|
|
38
|
-
func (callable): The function to be wrapped.
|
|
39
|
-
|
|
40
|
-
Returns:
|
|
41
|
-
callable: The decorated function.
|
|
42
|
-
|
|
43
|
-
Raises:
|
|
44
|
-
ValueError: If the API key is not found, either as an environment
|
|
45
|
-
variable or directly passed.
|
|
46
|
-
|
|
47
|
-
Note:
|
|
48
|
-
Supported model type: `OpenAI` and `Anthropic`.
|
|
49
|
-
"""
|
|
50
|
-
|
|
51
|
-
@wraps(func)
|
|
52
|
-
def wrapper(self, *args, **kwargs):
|
|
53
|
-
if self.model_type.is_openai:
|
|
54
|
-
if not self._api_key and 'OPENAI_API_KEY' not in os.environ:
|
|
55
|
-
raise ValueError('OpenAI API key not found.')
|
|
56
|
-
return func(self, *args, **kwargs)
|
|
57
|
-
elif self.model_type.is_zhipuai:
|
|
58
|
-
if 'ZHIPUAI_API_KEY' not in os.environ:
|
|
59
|
-
raise ValueError('ZhiPuAI API key not found.')
|
|
60
|
-
return func(self, *args, **kwargs)
|
|
61
|
-
elif self.model_type.is_anthropic:
|
|
62
|
-
if not self._api_key and 'ANTHROPIC_API_KEY' not in os.environ:
|
|
63
|
-
raise ValueError('Anthropic API key not found.')
|
|
64
|
-
return func(self, *args, **kwargs)
|
|
65
|
-
elif self.model_type.is_nvidia:
|
|
66
|
-
if not self._api_key and 'NVIDIA_API_KEY' not in os.environ:
|
|
67
|
-
raise ValueError('NVIDIA API key not found.')
|
|
68
|
-
return func(self, *args, **kwargs)
|
|
69
|
-
else:
|
|
70
|
-
raise ValueError('Unsupported model type.')
|
|
71
|
-
|
|
72
|
-
return cast(F, wrapper)
|
|
73
|
-
|
|
74
|
-
|
|
75
34
|
def print_text_animated(text, delay: float = 0.02, end: str = ""):
|
|
76
35
|
r"""Prints the given text with an animated effect.
|
|
77
36
|
|
|
@@ -260,7 +219,7 @@ def is_module_available(module_name: str) -> bool:
|
|
|
260
219
|
|
|
261
220
|
def api_keys_required(*required_keys: str) -> Callable[[F], F]:
|
|
262
221
|
r"""A decorator to check if the required API keys are
|
|
263
|
-
|
|
222
|
+
presented in the environment variables or as an instance attribute.
|
|
264
223
|
|
|
265
224
|
Args:
|
|
266
225
|
required_keys (str): The required API keys to be checked.
|
|
@@ -271,7 +230,7 @@ def api_keys_required(*required_keys: str) -> Callable[[F], F]:
|
|
|
271
230
|
|
|
272
231
|
Raises:
|
|
273
232
|
ValueError: If any of the required API keys are missing in the
|
|
274
|
-
environment variables.
|
|
233
|
+
environment variables and the instance attribute.
|
|
275
234
|
|
|
276
235
|
Example:
|
|
277
236
|
::
|
|
@@ -283,13 +242,18 @@ def api_keys_required(*required_keys: str) -> Callable[[F], F]:
|
|
|
283
242
|
|
|
284
243
|
def decorator(func: F) -> F:
|
|
285
244
|
@wraps(func)
|
|
286
|
-
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
|
287
|
-
|
|
288
|
-
|
|
245
|
+
def wrapper(self, *args: Any, **kwargs: Any) -> Any:
|
|
246
|
+
missing_environment_keys = [
|
|
247
|
+
k for k in required_keys if k not in os.environ
|
|
248
|
+
]
|
|
249
|
+
if (
|
|
250
|
+
not getattr(self, '_api_key', None)
|
|
251
|
+
and missing_environment_keys
|
|
252
|
+
):
|
|
289
253
|
raise ValueError(
|
|
290
|
-
f"Missing API keys: {', '.join(
|
|
254
|
+
f"Missing API keys: {', '.join(missing_environment_keys)}"
|
|
291
255
|
)
|
|
292
|
-
return func(*args, **kwargs)
|
|
256
|
+
return func(self, *args, **kwargs)
|
|
293
257
|
|
|
294
258
|
return cast(F, wrapper)
|
|
295
259
|
|
|
@@ -400,3 +364,21 @@ def create_chunks(text: str, n: int) -> List[str]:
|
|
|
400
364
|
chunks.append(text[i:j])
|
|
401
365
|
i = j
|
|
402
366
|
return chunks
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def is_docker_running() -> bool:
|
|
370
|
+
r"""Check if the Docker daemon is running.
|
|
371
|
+
|
|
372
|
+
Returns:
|
|
373
|
+
bool: True if the Docker daemon is running, False otherwise.
|
|
374
|
+
"""
|
|
375
|
+
try:
|
|
376
|
+
result = subprocess.run(
|
|
377
|
+
["docker", "info"],
|
|
378
|
+
check=True,
|
|
379
|
+
stdout=subprocess.PIPE,
|
|
380
|
+
stderr=subprocess.PIPE,
|
|
381
|
+
)
|
|
382
|
+
return result.returncode == 0
|
|
383
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
384
|
+
return False
|
camel/utils/token_counting.py
CHANGED
|
@@ -51,7 +51,7 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
|
|
|
51
51
|
system_message = messages[0]["content"]
|
|
52
52
|
|
|
53
53
|
ret: str
|
|
54
|
-
if model == ModelType.LLAMA_2:
|
|
54
|
+
if model == ModelType.LLAMA_2 or model == ModelType.LLAMA_3:
|
|
55
55
|
# reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
|
|
56
56
|
seps = [" ", " </s><s>"]
|
|
57
57
|
role_map = {"user": "[INST]", "assistant": "[/INST]"}
|
|
@@ -93,6 +93,45 @@ def messages_to_prompt(messages: List[OpenAIMessage], model: ModelType) -> str:
|
|
|
93
93
|
else:
|
|
94
94
|
ret += role + ":"
|
|
95
95
|
return ret
|
|
96
|
+
elif model == ModelType.GLM_4_OPEN_SOURCE:
|
|
97
|
+
system_prompt = f"[gMASK]<sop><|system|>\n{system_message}"
|
|
98
|
+
ret = system_prompt
|
|
99
|
+
for msg in messages[1:]:
|
|
100
|
+
role = msg["role"]
|
|
101
|
+
content = msg["content"]
|
|
102
|
+
if not isinstance(content, str):
|
|
103
|
+
raise ValueError(
|
|
104
|
+
"Currently multimodal context is not "
|
|
105
|
+
"supported by the token counter."
|
|
106
|
+
)
|
|
107
|
+
if content:
|
|
108
|
+
ret += "<|" + role + "|>" + "\n" + content
|
|
109
|
+
else:
|
|
110
|
+
ret += "<|" + role + "|>" + "\n"
|
|
111
|
+
return ret
|
|
112
|
+
elif model == ModelType.QWEN_2:
|
|
113
|
+
system_prompt = f"<|im_start|>system\n{system_message}<|im_end|>"
|
|
114
|
+
ret = system_prompt + "\n"
|
|
115
|
+
for msg in messages[1:]:
|
|
116
|
+
role = msg["role"]
|
|
117
|
+
content = msg["content"]
|
|
118
|
+
if not isinstance(content, str):
|
|
119
|
+
raise ValueError(
|
|
120
|
+
"Currently multimodal context is not "
|
|
121
|
+
"supported by the token counter."
|
|
122
|
+
)
|
|
123
|
+
if content:
|
|
124
|
+
ret += (
|
|
125
|
+
'<|im_start|>'
|
|
126
|
+
+ role
|
|
127
|
+
+ '\n'
|
|
128
|
+
+ content
|
|
129
|
+
+ '<|im_end|>'
|
|
130
|
+
+ '\n'
|
|
131
|
+
)
|
|
132
|
+
else:
|
|
133
|
+
ret += '<|im_start|>' + role + '\n'
|
|
134
|
+
return ret
|
|
96
135
|
else:
|
|
97
136
|
raise ValueError(f"Invalid model type: {model}")
|
|
98
137
|
|
|
@@ -303,6 +342,40 @@ class AnthropicTokenCounter(BaseTokenCounter):
|
|
|
303
342
|
return num_tokens
|
|
304
343
|
|
|
305
344
|
|
|
345
|
+
class GeminiTokenCounter(BaseTokenCounter):
|
|
346
|
+
def __init__(self, model_type: ModelType):
|
|
347
|
+
r"""Constructor for the token counter for Gemini models."""
|
|
348
|
+
import google.generativeai as genai
|
|
349
|
+
|
|
350
|
+
self.model_type = model_type
|
|
351
|
+
self._client = genai.GenerativeModel(self.model_type.value)
|
|
352
|
+
|
|
353
|
+
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
354
|
+
r"""Count number of tokens in the provided message list using
|
|
355
|
+
loaded tokenizer specific for this type of model.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
359
|
+
in OpenAI API format.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
int: Number of tokens in the messages.
|
|
363
|
+
"""
|
|
364
|
+
converted_messages = []
|
|
365
|
+
for message in messages:
|
|
366
|
+
role = message.get('role')
|
|
367
|
+
if role == 'assistant':
|
|
368
|
+
role_to_gemini = 'model'
|
|
369
|
+
else:
|
|
370
|
+
role_to_gemini = 'user'
|
|
371
|
+
converted_message = {
|
|
372
|
+
"role": role_to_gemini,
|
|
373
|
+
"parts": message.get("content"),
|
|
374
|
+
}
|
|
375
|
+
converted_messages.append(converted_message)
|
|
376
|
+
return self._client.count_tokens(converted_messages).total_tokens
|
|
377
|
+
|
|
378
|
+
|
|
306
379
|
class LiteLLMTokenCounter:
|
|
307
380
|
def __init__(self, model_type: str):
|
|
308
381
|
r"""Constructor for the token counter for LiteLLM models.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.5.
|
|
3
|
+
Version: 0.1.5.6
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -16,6 +16,7 @@ Provides-Extra: all
|
|
|
16
16
|
Provides-Extra: encoders
|
|
17
17
|
Provides-Extra: graph-storages
|
|
18
18
|
Provides-Extra: huggingface-agent
|
|
19
|
+
Provides-Extra: kv-stroages
|
|
19
20
|
Provides-Extra: model-platforms
|
|
20
21
|
Provides-Extra: retrievers
|
|
21
22
|
Provides-Extra: test
|
|
@@ -31,13 +32,16 @@ Requires-Dist: curl_cffi (==0.6.2)
|
|
|
31
32
|
Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
|
|
32
33
|
Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
33
34
|
Requires-Dist: discord.py (>=2.3.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
35
|
+
Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
|
|
34
36
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
35
37
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
36
38
|
Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
39
|
+
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
37
40
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
38
41
|
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
39
42
|
Requires-Dist: jsonschema (>=4,<5)
|
|
40
43
|
Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
|
|
44
|
+
Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
|
|
41
45
|
Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
42
46
|
Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
|
|
43
47
|
Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
|
|
@@ -59,13 +63,14 @@ Requires-Dist: pytest (>=7,<8) ; extra == "test"
|
|
|
59
63
|
Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
|
|
60
64
|
Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
|
|
61
65
|
Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
|
|
66
|
+
Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
|
|
62
67
|
Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
63
68
|
Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
|
|
64
69
|
Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
65
70
|
Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
|
|
66
71
|
Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
67
72
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
|
68
|
-
Requires-Dist: torch (>=
|
|
73
|
+
Requires-Dist: torch (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
|
|
69
74
|
Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
70
75
|
Requires-Dist: unstructured[all-docs] (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
|
|
71
76
|
Requires-Dist: wikipedia (>=1,<2) ; extra == "tools" or extra == "all"
|
|
@@ -186,7 +191,7 @@ conda create --name camel python=3.9
|
|
|
186
191
|
conda activate camel
|
|
187
192
|
|
|
188
193
|
# Clone github repo
|
|
189
|
-
git clone -b v0.1.5.
|
|
194
|
+
git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
|
|
190
195
|
|
|
191
196
|
# Change directory into project directory
|
|
192
197
|
cd camel
|
|
@@ -198,6 +203,10 @@ pip install -e .
|
|
|
198
203
|
pip install -e .[all] # (Optional)
|
|
199
204
|
```
|
|
200
205
|
|
|
206
|
+
### From Docker
|
|
207
|
+
|
|
208
|
+
Detailed guidance can be find [here](https://github.com/camel-ai/camel/blob/master/.container/README.md)
|
|
209
|
+
|
|
201
210
|
## Documentation
|
|
202
211
|
|
|
203
212
|
[CAMEL package documentation pages](https://camel-ai.github.io/camel/).
|