camel-ai 0.2.3__py3-none-any.whl → 0.2.3a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +69 -93
- camel/agents/knowledge_graph_agent.py +6 -4
- camel/bots/__init__.py +2 -16
- camel/bots/discord_bot.py +206 -0
- camel/configs/__init__.py +2 -1
- camel/configs/anthropic_config.py +5 -2
- camel/configs/base_config.py +6 -6
- camel/configs/groq_config.py +3 -2
- camel/configs/ollama_config.py +2 -1
- camel/configs/openai_config.py +23 -2
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +3 -2
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +0 -2
- camel/loaders/firecrawl_reader.py +3 -3
- camel/loaders/unstructured_io.py +33 -35
- camel/messages/__init__.py +0 -1
- camel/models/__init__.py +4 -2
- camel/models/anthropic_model.py +26 -32
- camel/models/azure_openai_model.py +36 -39
- camel/models/base_model.py +20 -31
- camel/models/gemini_model.py +29 -37
- camel/models/groq_model.py +23 -29
- camel/models/litellm_model.py +61 -44
- camel/models/mistral_model.py +29 -32
- camel/models/model_factory.py +76 -66
- camel/models/nemotron_model.py +23 -33
- camel/models/ollama_model.py +47 -42
- camel/models/open_source_model.py +170 -0
- camel/models/{openai_compatible_model.py → openai_compatibility_model.py} +49 -31
- camel/models/openai_model.py +29 -48
- camel/models/reka_model.py +28 -30
- camel/models/samba_model.py +177 -82
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +43 -37
- camel/models/vllm_model.py +50 -43
- camel/models/zhipuai_model.py +27 -33
- camel/retrievers/auto_retriever.py +10 -28
- camel/retrievers/vector_retriever.py +47 -58
- camel/societies/babyagi_playing.py +3 -6
- camel/societies/role_playing.py +3 -5
- camel/storages/graph_storages/graph_element.py +5 -3
- camel/storages/key_value_storages/json.py +1 -6
- camel/toolkits/__init__.py +7 -20
- camel/toolkits/base.py +3 -2
- camel/toolkits/code_execution.py +7 -6
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/github_toolkit.py +10 -9
- camel/toolkits/google_maps_toolkit.py +7 -7
- camel/toolkits/linkedin_toolkit.py +7 -7
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -5
- camel/toolkits/{function_tool.py → openai_function.py} +11 -34
- camel/toolkits/reddit_toolkit.py +7 -7
- camel/toolkits/retrieval_toolkit.py +5 -5
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -11
- camel/toolkits/twitter_toolkit.py +452 -378
- camel/toolkits/weather_toolkit.py +6 -6
- camel/types/__init__.py +1 -6
- camel/types/enums.py +85 -40
- camel/types/openai_types.py +0 -3
- camel/utils/__init__.py +2 -0
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +3 -32
- camel/utils/token_counting.py +212 -30
- camel/workforce/role_playing_worker.py +1 -1
- camel/workforce/single_agent_worker.py +1 -1
- camel/workforce/task_channel.py +3 -4
- camel/workforce/workforce.py +4 -4
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/METADATA +56 -27
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/RECORD +76 -85
- {camel_ai-0.2.3.dist-info → camel_ai-0.2.3a1.dist-info}/WHEEL +1 -1
- camel/bots/discord_app.py +0 -138
- camel/bots/slack/__init__.py +0 -30
- camel/bots/slack/models.py +0 -158
- camel/bots/slack/slack_app.py +0 -255
- camel/loaders/chunkr_reader.py +0 -163
- camel/toolkits/arxiv_toolkit.py +0 -155
- camel/toolkits/ask_news_toolkit.py +0 -653
- camel/toolkits/google_scholar_toolkit.py +0 -146
- camel/toolkits/whatsapp_toolkit.py +0 -177
- camel/types/unified_model_type.py +0 -104
- camel_ai-0.2.3.dist-info/LICENSE +0 -201
camel/models/zhipuai_model.py
CHANGED
|
@@ -17,14 +17,10 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
17
17
|
|
|
18
18
|
from openai import OpenAI, Stream
|
|
19
19
|
|
|
20
|
-
from camel.configs import ZHIPUAI_API_PARAMS
|
|
20
|
+
from camel.configs import ZHIPUAI_API_PARAMS
|
|
21
21
|
from camel.messages import OpenAIMessage
|
|
22
22
|
from camel.models import BaseModelBackend
|
|
23
|
-
from camel.types import
|
|
24
|
-
ChatCompletion,
|
|
25
|
-
ChatCompletionChunk,
|
|
26
|
-
ModelType,
|
|
27
|
-
)
|
|
23
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
28
24
|
from camel.utils import (
|
|
29
25
|
BaseTokenCounter,
|
|
30
26
|
OpenAITokenCounter,
|
|
@@ -33,42 +29,40 @@ from camel.utils import (
|
|
|
33
29
|
|
|
34
30
|
|
|
35
31
|
class ZhipuAIModel(BaseModelBackend):
|
|
36
|
-
r"""ZhipuAI API in a unified BaseModelBackend interface.
|
|
37
|
-
|
|
38
|
-
Args:
|
|
39
|
-
model_type (Union[ModelType, str]): Model for which a backend is
|
|
40
|
-
created, one of GLM_* series.
|
|
41
|
-
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
42
|
-
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
43
|
-
:obj:`None`, :obj:`ZhipuAIConfig().as_dict()` will be used.
|
|
44
|
-
(default: :obj:`None`)
|
|
45
|
-
api_key (Optional[str], optional): The API key for authenticating with
|
|
46
|
-
the ZhipuAI service. (default: :obj:`None`)
|
|
47
|
-
url (Optional[str], optional): The url to the ZhipuAI service.
|
|
48
|
-
(default: :obj:`https://open.bigmodel.cn/api/paas/v4/`)
|
|
49
|
-
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
50
|
-
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
51
|
-
ModelType.GPT_4O_MINI)` will be used.
|
|
52
|
-
(default: :obj:`None`)
|
|
53
|
-
"""
|
|
32
|
+
r"""ZhipuAI API in a unified BaseModelBackend interface."""
|
|
54
33
|
|
|
55
34
|
def __init__(
|
|
56
35
|
self,
|
|
57
|
-
model_type:
|
|
58
|
-
model_config_dict:
|
|
36
|
+
model_type: ModelType,
|
|
37
|
+
model_config_dict: Dict[str, Any],
|
|
59
38
|
api_key: Optional[str] = None,
|
|
60
39
|
url: Optional[str] = None,
|
|
61
40
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
62
41
|
) -> None:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
42
|
+
r"""Constructor for ZhipuAI backend.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
model_type (ModelType): Model for which a backend is created,
|
|
46
|
+
such as GLM_* series.
|
|
47
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
48
|
+
be fed into openai.ChatCompletion.create().
|
|
49
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
50
|
+
ZhipuAI service. (default: :obj:`None`)
|
|
51
|
+
url (Optional[str]): The url to the ZhipuAI service. (default:
|
|
52
|
+
:obj:`None`)
|
|
53
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
54
|
+
for the model. If not provided, `OpenAITokenCounter(ModelType.
|
|
55
|
+
GPT_4O_MINI)` will be used.
|
|
56
|
+
"""
|
|
69
57
|
super().__init__(
|
|
70
58
|
model_type, model_config_dict, api_key, url, token_counter
|
|
71
59
|
)
|
|
60
|
+
self._url = url or os.environ.get("ZHIPUAI_API_BASE_URL")
|
|
61
|
+
self._api_key = api_key or os.environ.get("ZHIPUAI_API_KEY")
|
|
62
|
+
if not self._url or not self._api_key:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
"ZHIPUAI_API_BASE_URL and ZHIPUAI_API_KEY should be set."
|
|
65
|
+
)
|
|
72
66
|
self._client = OpenAI(
|
|
73
67
|
timeout=60,
|
|
74
68
|
max_retries=3,
|
|
@@ -96,7 +90,7 @@ class ZhipuAIModel(BaseModelBackend):
|
|
|
96
90
|
# Reference: https://open.bigmodel.cn/dev/api#openai_sdk
|
|
97
91
|
response = self._client.chat.completions.create(
|
|
98
92
|
messages=messages,
|
|
99
|
-
model=self.model_type,
|
|
93
|
+
model=self.model_type.value,
|
|
100
94
|
**self.model_config_dict,
|
|
101
95
|
)
|
|
102
96
|
return response
|
|
@@ -14,16 +14,7 @@
|
|
|
14
14
|
import datetime
|
|
15
15
|
import os
|
|
16
16
|
import re
|
|
17
|
-
import
|
|
18
|
-
from typing import (
|
|
19
|
-
TYPE_CHECKING,
|
|
20
|
-
Collection,
|
|
21
|
-
List,
|
|
22
|
-
Optional,
|
|
23
|
-
Sequence,
|
|
24
|
-
Tuple,
|
|
25
|
-
Union,
|
|
26
|
-
)
|
|
17
|
+
from typing import Collection, List, Optional, Sequence, Tuple, Union
|
|
27
18
|
|
|
28
19
|
from camel.embeddings import BaseEmbedding, OpenAIEmbedding
|
|
29
20
|
from camel.retrievers.vector_retriever import VectorRetriever
|
|
@@ -36,8 +27,10 @@ from camel.storages import (
|
|
|
36
27
|
from camel.types import StorageType
|
|
37
28
|
from camel.utils import Constants
|
|
38
29
|
|
|
39
|
-
|
|
30
|
+
try:
|
|
40
31
|
from unstructured.documents.elements import Element
|
|
32
|
+
except ImportError:
|
|
33
|
+
Element = None
|
|
41
34
|
|
|
42
35
|
|
|
43
36
|
class AutoRetriever:
|
|
@@ -105,9 +98,7 @@ class AutoRetriever:
|
|
|
105
98
|
f"Unsupported vector storage type: {self.storage_type}"
|
|
106
99
|
)
|
|
107
100
|
|
|
108
|
-
def _collection_name_generator(
|
|
109
|
-
self, content: Union[str, "Element"]
|
|
110
|
-
) -> str:
|
|
101
|
+
def _collection_name_generator(self, content: Union[str, Element]) -> str:
|
|
111
102
|
r"""Generates a valid collection name from a given file path or URL.
|
|
112
103
|
|
|
113
104
|
Args:
|
|
@@ -117,10 +108,9 @@ class AutoRetriever:
|
|
|
117
108
|
Returns:
|
|
118
109
|
str: A sanitized, valid collection name suitable for use.
|
|
119
110
|
"""
|
|
120
|
-
from unstructured.documents.elements import Element
|
|
121
111
|
|
|
122
112
|
if isinstance(content, Element):
|
|
123
|
-
content = content.metadata.file_directory
|
|
113
|
+
content = content.metadata.file_directory
|
|
124
114
|
|
|
125
115
|
collection_name = re.sub(r'[^a-zA-Z0-9]', '', content)[:20]
|
|
126
116
|
|
|
@@ -185,7 +175,7 @@ class AutoRetriever:
|
|
|
185
175
|
def run_vector_retriever(
|
|
186
176
|
self,
|
|
187
177
|
query: str,
|
|
188
|
-
contents: Union[str, List[str],
|
|
178
|
+
contents: Union[str, List[str], Element, List[Element]],
|
|
189
179
|
top_k: int = Constants.DEFAULT_TOP_K_RESULTS,
|
|
190
180
|
similarity_threshold: float = Constants.DEFAULT_SIMILARITY_THRESHOLD,
|
|
191
181
|
return_detailed_info: bool = False,
|
|
@@ -222,20 +212,12 @@ class AutoRetriever:
|
|
|
222
212
|
`contents` is empty.
|
|
223
213
|
RuntimeError: If any errors occur during the retrieve process.
|
|
224
214
|
"""
|
|
225
|
-
from unstructured.documents.elements import Element
|
|
226
|
-
|
|
227
215
|
if not contents:
|
|
228
216
|
raise ValueError("content cannot be empty.")
|
|
229
217
|
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
elif isinstance(contents, Element):
|
|
234
|
-
contents = [contents]
|
|
235
|
-
elif not isinstance(contents, list):
|
|
236
|
-
raise ValueError(
|
|
237
|
-
"contents must be a string, Element, or a list of them."
|
|
238
|
-
)
|
|
218
|
+
contents = (
|
|
219
|
+
[contents] if isinstance(contents, (str, Element)) else contents
|
|
220
|
+
)
|
|
239
221
|
|
|
240
222
|
all_retrieved_info = []
|
|
241
223
|
for content in contents:
|
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
import os
|
|
15
15
|
import warnings
|
|
16
16
|
from io import IOBase
|
|
17
|
-
from typing import
|
|
17
|
+
from typing import Any, Dict, List, Optional, Union
|
|
18
18
|
from urllib.parse import urlparse
|
|
19
19
|
|
|
20
20
|
from camel.embeddings import BaseEmbedding, OpenAIEmbedding
|
|
@@ -28,8 +28,10 @@ from camel.storages import (
|
|
|
28
28
|
)
|
|
29
29
|
from camel.utils import Constants
|
|
30
30
|
|
|
31
|
-
|
|
31
|
+
try:
|
|
32
32
|
from unstructured.documents.elements import Element
|
|
33
|
+
except ImportError:
|
|
34
|
+
Element = None
|
|
33
35
|
|
|
34
36
|
|
|
35
37
|
class VectorRetriever(BaseRetriever):
|
|
@@ -71,7 +73,7 @@ class VectorRetriever(BaseRetriever):
|
|
|
71
73
|
|
|
72
74
|
def process(
|
|
73
75
|
self,
|
|
74
|
-
content: Union[str,
|
|
76
|
+
content: Union[str, Element, IOBase],
|
|
75
77
|
chunk_type: str = "chunk_by_title",
|
|
76
78
|
max_characters: int = 500,
|
|
77
79
|
embed_batch: int = 50,
|
|
@@ -84,7 +86,7 @@ class VectorRetriever(BaseRetriever):
|
|
|
84
86
|
specified vector storage.
|
|
85
87
|
|
|
86
88
|
Args:
|
|
87
|
-
content (Union[str, Element,
|
|
89
|
+
content (Union[str, Element, IOBase]): Local file path, remote
|
|
88
90
|
URL, string content, Element object, or a binary file object.
|
|
89
91
|
chunk_type (str): Type of chunking going to apply. Defaults to
|
|
90
92
|
"chunk_by_title".
|
|
@@ -95,73 +97,67 @@ class VectorRetriever(BaseRetriever):
|
|
|
95
97
|
otherwise skip chunking. Defaults to True.
|
|
96
98
|
**kwargs (Any): Additional keyword arguments for content parsing.
|
|
97
99
|
"""
|
|
98
|
-
from unstructured.documents.elements import Element
|
|
99
|
-
|
|
100
100
|
if isinstance(content, Element):
|
|
101
101
|
elements = [content]
|
|
102
102
|
elif isinstance(content, IOBase):
|
|
103
103
|
elements = self.uio.parse_bytes(file=content, **kwargs) or []
|
|
104
|
-
|
|
104
|
+
else:
|
|
105
105
|
# Check if the content is URL
|
|
106
106
|
parsed_url = urlparse(content)
|
|
107
107
|
is_url = all([parsed_url.scheme, parsed_url.netloc])
|
|
108
108
|
if is_url or os.path.exists(content):
|
|
109
|
-
elements = (
|
|
110
|
-
self.uio.parse_file_or_url(input_path=content, **kwargs)
|
|
111
|
-
or []
|
|
112
|
-
)
|
|
109
|
+
elements = self.uio.parse_file_or_url(content, **kwargs) or []
|
|
113
110
|
else:
|
|
114
111
|
elements = [self.uio.create_element_from_text(text=content)]
|
|
115
|
-
|
|
116
112
|
if not elements:
|
|
117
113
|
warnings.warn(
|
|
118
114
|
f"No elements were extracted from the content: {content}"
|
|
119
115
|
)
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
if should_chunk
|
|
129
|
-
else elements
|
|
116
|
+
return
|
|
117
|
+
|
|
118
|
+
# Chunk the content if required
|
|
119
|
+
chunks = (
|
|
120
|
+
self.uio.chunk_elements(
|
|
121
|
+
chunk_type=chunk_type,
|
|
122
|
+
elements=elements,
|
|
123
|
+
max_characters=max_characters,
|
|
130
124
|
)
|
|
125
|
+
if should_chunk
|
|
126
|
+
else elements
|
|
127
|
+
)
|
|
131
128
|
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
129
|
+
# Process chunks in batches and store embeddings
|
|
130
|
+
for i in range(0, len(chunks), embed_batch):
|
|
131
|
+
batch_chunks = chunks[i : i + embed_batch]
|
|
132
|
+
batch_vectors = self.embedding_model.embed_list(
|
|
133
|
+
objs=[str(chunk) for chunk in batch_chunks]
|
|
134
|
+
)
|
|
138
135
|
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
or ""
|
|
151
|
-
}
|
|
152
|
-
chunk_metadata = {"metadata": chunk.metadata.to_dict()}
|
|
153
|
-
chunk_text = {"text": str(chunk)}
|
|
154
|
-
combined_dict = {
|
|
155
|
-
**content_path_info,
|
|
156
|
-
**chunk_metadata,
|
|
157
|
-
**chunk_text,
|
|
136
|
+
records = []
|
|
137
|
+
# Prepare the payload for each vector record, includes the content
|
|
138
|
+
# path, chunk metadata, and chunk text
|
|
139
|
+
for vector, chunk in zip(batch_vectors, batch_chunks):
|
|
140
|
+
if isinstance(content, str):
|
|
141
|
+
content_path_info = {"content path": content}
|
|
142
|
+
elif isinstance(content, IOBase):
|
|
143
|
+
content_path_info = {"content path": "From file bytes"}
|
|
144
|
+
elif isinstance(content, Element):
|
|
145
|
+
content_path_info = {
|
|
146
|
+
"content path": content.metadata.file_directory
|
|
158
147
|
}
|
|
148
|
+
chunk_metadata = {"metadata": chunk.metadata.to_dict()}
|
|
149
|
+
chunk_text = {"text": str(chunk)}
|
|
150
|
+
combined_dict = {
|
|
151
|
+
**content_path_info,
|
|
152
|
+
**chunk_metadata,
|
|
153
|
+
**chunk_text,
|
|
154
|
+
}
|
|
159
155
|
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
156
|
+
records.append(
|
|
157
|
+
VectorRecord(vector=vector, payload=combined_dict)
|
|
158
|
+
)
|
|
163
159
|
|
|
164
|
-
|
|
160
|
+
self.storage.add(records=records)
|
|
165
161
|
|
|
166
162
|
def query(
|
|
167
163
|
self,
|
|
@@ -199,13 +195,6 @@ class VectorRetriever(BaseRetriever):
|
|
|
199
195
|
db_query = VectorDBQuery(query_vector=query_vector, top_k=top_k)
|
|
200
196
|
query_results = self.storage.query(query=db_query)
|
|
201
197
|
|
|
202
|
-
# If no results found, raise an error
|
|
203
|
-
if not query_results:
|
|
204
|
-
raise ValueError(
|
|
205
|
-
"Query result is empty, please check if "
|
|
206
|
-
"the vector storage is empty."
|
|
207
|
-
)
|
|
208
|
-
|
|
209
198
|
if query_results[0].record.payload is None:
|
|
210
199
|
raise ValueError(
|
|
211
200
|
"Payload of vector storage is None, please check the "
|
|
@@ -106,7 +106,7 @@ class BabyAGI:
|
|
|
106
106
|
)
|
|
107
107
|
|
|
108
108
|
self.assistant_agent: ChatAgent
|
|
109
|
-
self.assistant_sys_msg:
|
|
109
|
+
self.assistant_sys_msg: BaseMessage
|
|
110
110
|
self.task_creation_agent: TaskCreationAgent
|
|
111
111
|
self.task_prioritization_agent: TaskPrioritizationAgent
|
|
112
112
|
self.init_agents(
|
|
@@ -202,8 +202,7 @@ class BabyAGI:
|
|
|
202
202
|
|
|
203
203
|
self.task_creation_agent = TaskCreationAgent(
|
|
204
204
|
objective=self.specified_task_prompt,
|
|
205
|
-
role_name=
|
|
206
|
-
or "assistant",
|
|
205
|
+
role_name=self.assistant_sys_msg.role_name,
|
|
207
206
|
output_language=output_language,
|
|
208
207
|
message_window_size=message_window_size,
|
|
209
208
|
**(task_creation_agent_kwargs or {}),
|
|
@@ -239,9 +238,7 @@ class BabyAGI:
|
|
|
239
238
|
|
|
240
239
|
task_name = self.subtasks.popleft()
|
|
241
240
|
assistant_msg_msg = BaseMessage.make_user_message(
|
|
242
|
-
role_name=
|
|
243
|
-
or "assistant",
|
|
244
|
-
content=f"{task_name}",
|
|
241
|
+
role_name=self.assistant_sys_msg.role_name, content=f"{task_name}"
|
|
245
242
|
)
|
|
246
243
|
|
|
247
244
|
assistant_response = self.assistant_agent.step(assistant_msg_msg)
|
camel/societies/role_playing.py
CHANGED
|
@@ -149,8 +149,8 @@ class RolePlaying:
|
|
|
149
149
|
|
|
150
150
|
self.assistant_agent: ChatAgent
|
|
151
151
|
self.user_agent: ChatAgent
|
|
152
|
-
self.assistant_sys_msg:
|
|
153
|
-
self.user_sys_msg:
|
|
152
|
+
self.assistant_sys_msg: BaseMessage
|
|
153
|
+
self.user_sys_msg: BaseMessage
|
|
154
154
|
self._init_agents(
|
|
155
155
|
init_assistant_sys_msg,
|
|
156
156
|
init_user_sys_msg,
|
|
@@ -454,11 +454,9 @@ class RolePlaying:
|
|
|
454
454
|
)
|
|
455
455
|
if init_msg_content is None:
|
|
456
456
|
init_msg_content = default_init_msg_content
|
|
457
|
-
|
|
458
457
|
# Initialize a message sent by the assistant
|
|
459
458
|
init_msg = BaseMessage.make_assistant_message(
|
|
460
|
-
role_name=
|
|
461
|
-
or "assistant",
|
|
459
|
+
role_name=self.assistant_sys_msg.role_name,
|
|
462
460
|
content=init_msg_content,
|
|
463
461
|
)
|
|
464
462
|
|
|
@@ -13,12 +13,14 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from typing import
|
|
16
|
+
from typing import List, Union
|
|
17
17
|
|
|
18
18
|
from pydantic import BaseModel, ConfigDict, Field
|
|
19
19
|
|
|
20
|
-
|
|
20
|
+
try:
|
|
21
21
|
from unstructured.documents.elements import Element
|
|
22
|
+
except ImportError:
|
|
23
|
+
Element = None
|
|
22
24
|
|
|
23
25
|
|
|
24
26
|
class Node(BaseModel):
|
|
@@ -71,6 +73,6 @@ class GraphElement(BaseModel):
|
|
|
71
73
|
source: Element
|
|
72
74
|
|
|
73
75
|
def __post_init__(self):
|
|
74
|
-
if
|
|
76
|
+
if Element is None:
|
|
75
77
|
raise ImportError("""The 'unstructured' package is required to use
|
|
76
78
|
the 'source' attribute.""")
|
|
@@ -18,12 +18,7 @@ from pathlib import Path
|
|
|
18
18
|
from typing import Any, ClassVar, Dict, List, Optional
|
|
19
19
|
|
|
20
20
|
from camel.storages.key_value_storages import BaseKeyValueStorage
|
|
21
|
-
from camel.types import
|
|
22
|
-
ModelType,
|
|
23
|
-
OpenAIBackendRole,
|
|
24
|
-
RoleType,
|
|
25
|
-
TaskType,
|
|
26
|
-
)
|
|
21
|
+
from camel.types import ModelType, OpenAIBackendRole, RoleType, TaskType
|
|
27
22
|
|
|
28
23
|
|
|
29
24
|
class _CamelJSONEncoder(json.JSONEncoder):
|
camel/toolkits/__init__.py
CHANGED
|
@@ -12,37 +12,29 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
# ruff: noqa: I001
|
|
15
|
-
from .
|
|
16
|
-
FunctionTool,
|
|
15
|
+
from .openai_function import (
|
|
17
16
|
OpenAIFunction,
|
|
18
17
|
get_openai_function_schema,
|
|
19
18
|
get_openai_tool_schema,
|
|
20
19
|
)
|
|
21
20
|
from .open_api_specs.security_config import openapi_security_config
|
|
22
21
|
|
|
23
|
-
|
|
22
|
+
from .google_maps_toolkit import GoogleMapsToolkit
|
|
24
23
|
from .math_toolkit import MathToolkit, MATH_FUNCS
|
|
24
|
+
from .open_api_toolkit import OpenAPIToolkit
|
|
25
|
+
from .retrieval_toolkit import RetrievalToolkit
|
|
25
26
|
from .search_toolkit import SearchToolkit, SEARCH_FUNCS
|
|
27
|
+
from .twitter_toolkit import TwitterToolkit
|
|
26
28
|
from .weather_toolkit import WeatherToolkit, WEATHER_FUNCS
|
|
29
|
+
from .slack_toolkit import SlackToolkit
|
|
27
30
|
from .dalle_toolkit import DalleToolkit, DALLE_FUNCS
|
|
28
|
-
from .ask_news_toolkit import AskNewsToolkit, AsyncAskNewsToolkit
|
|
29
|
-
|
|
30
31
|
from .linkedin_toolkit import LinkedInToolkit
|
|
31
32
|
from .reddit_toolkit import RedditToolkit
|
|
32
|
-
|
|
33
|
-
from .google_maps_toolkit import GoogleMapsToolkit
|
|
33
|
+
|
|
34
34
|
from .code_execution import CodeExecutionToolkit
|
|
35
35
|
from .github_toolkit import GithubToolkit
|
|
36
|
-
from .google_scholar_toolkit import GoogleScholarToolkit
|
|
37
|
-
from .arxiv_toolkit import ArxivToolkit
|
|
38
|
-
from .slack_toolkit import SlackToolkit
|
|
39
|
-
from .twitter_toolkit import TwitterToolkit, TWITTER_FUNCS
|
|
40
|
-
from .open_api_toolkit import OpenAPIToolkit
|
|
41
|
-
from .retrieval_toolkit import RetrievalToolkit
|
|
42
36
|
|
|
43
37
|
__all__ = [
|
|
44
|
-
'BaseToolkit',
|
|
45
|
-
'FunctionTool',
|
|
46
38
|
'OpenAIFunction',
|
|
47
39
|
'get_openai_function_schema',
|
|
48
40
|
'get_openai_tool_schema',
|
|
@@ -60,13 +52,8 @@ __all__ = [
|
|
|
60
52
|
'LinkedInToolkit',
|
|
61
53
|
'RedditToolkit',
|
|
62
54
|
'CodeExecutionToolkit',
|
|
63
|
-
'AskNewsToolkit',
|
|
64
|
-
'AsyncAskNewsToolkit',
|
|
65
|
-
'GoogleScholarToolkit',
|
|
66
|
-
'ArxivToolkit',
|
|
67
55
|
'MATH_FUNCS',
|
|
68
56
|
'SEARCH_FUNCS',
|
|
69
57
|
'WEATHER_FUNCS',
|
|
70
58
|
'DALLE_FUNCS',
|
|
71
|
-
'TWITTER_FUNCS',
|
|
72
59
|
]
|
camel/toolkits/base.py
CHANGED
|
@@ -14,10 +14,11 @@
|
|
|
14
14
|
|
|
15
15
|
from typing import List
|
|
16
16
|
|
|
17
|
-
from camel.toolkits import FunctionTool
|
|
18
17
|
from camel.utils import AgentOpsMeta
|
|
19
18
|
|
|
19
|
+
from .openai_function import OpenAIFunction
|
|
20
|
+
|
|
20
21
|
|
|
21
22
|
class BaseToolkit(metaclass=AgentOpsMeta):
|
|
22
|
-
def get_tools(self) -> List[
|
|
23
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
23
24
|
raise NotImplementedError("Subclasses must implement this method.")
|
camel/toolkits/code_execution.py
CHANGED
|
@@ -14,8 +14,9 @@
|
|
|
14
14
|
from typing import List, Literal
|
|
15
15
|
|
|
16
16
|
from camel.interpreters import InternalPythonInterpreter
|
|
17
|
-
from camel.toolkits import
|
|
18
|
-
|
|
17
|
+
from camel.toolkits import OpenAIFunction
|
|
18
|
+
|
|
19
|
+
from .base import BaseToolkit
|
|
19
20
|
|
|
20
21
|
|
|
21
22
|
class CodeExecutionToolkit(BaseToolkit):
|
|
@@ -57,12 +58,12 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
57
58
|
print(content)
|
|
58
59
|
return content
|
|
59
60
|
|
|
60
|
-
def get_tools(self) -> List[
|
|
61
|
-
r"""Returns a list of
|
|
61
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
62
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
62
63
|
functions in the toolkit.
|
|
63
64
|
|
|
64
65
|
Returns:
|
|
65
|
-
List[
|
|
66
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
66
67
|
representing the functions in the toolkit.
|
|
67
68
|
"""
|
|
68
|
-
return [
|
|
69
|
+
return [OpenAIFunction(self.execute_code)]
|
camel/toolkits/dalle_toolkit.py
CHANGED
|
@@ -20,7 +20,7 @@ from typing import List, Optional
|
|
|
20
20
|
from openai import OpenAI
|
|
21
21
|
from PIL import Image
|
|
22
22
|
|
|
23
|
-
from camel.toolkits import
|
|
23
|
+
from camel.toolkits import OpenAIFunction
|
|
24
24
|
from camel.toolkits.base import BaseToolkit
|
|
25
25
|
|
|
26
26
|
|
|
@@ -132,15 +132,15 @@ class DalleToolkit(BaseToolkit):
|
|
|
132
132
|
|
|
133
133
|
return image_path
|
|
134
134
|
|
|
135
|
-
def get_tools(self) -> List[
|
|
136
|
-
r"""Returns a list of
|
|
135
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
136
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
137
137
|
functions in the toolkit.
|
|
138
138
|
|
|
139
139
|
Returns:
|
|
140
|
-
List[
|
|
140
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
141
141
|
representing the functions in the toolkit.
|
|
142
142
|
"""
|
|
143
|
-
return [
|
|
143
|
+
return [OpenAIFunction(self.get_dalle_img)]
|
|
144
144
|
|
|
145
145
|
|
|
146
|
-
DALLE_FUNCS: List[
|
|
146
|
+
DALLE_FUNCS: List[OpenAIFunction] = DalleToolkit().get_tools()
|
camel/toolkits/github_toolkit.py
CHANGED
|
@@ -18,10 +18,11 @@ from typing import List, Optional
|
|
|
18
18
|
|
|
19
19
|
from pydantic import BaseModel
|
|
20
20
|
|
|
21
|
-
from camel.toolkits import FunctionTool
|
|
22
|
-
from camel.toolkits.base import BaseToolkit
|
|
23
21
|
from camel.utils import dependencies_required
|
|
24
22
|
|
|
23
|
+
from .base import BaseToolkit
|
|
24
|
+
from .openai_function import OpenAIFunction
|
|
25
|
+
|
|
25
26
|
|
|
26
27
|
class GithubIssue(BaseModel):
|
|
27
28
|
r"""Represents a GitHub issue.
|
|
@@ -130,19 +131,19 @@ class GithubToolkit(BaseToolkit):
|
|
|
130
131
|
self.github = Github(auth=Auth.Token(access_token))
|
|
131
132
|
self.repo = self.github.get_repo(repo_name)
|
|
132
133
|
|
|
133
|
-
def get_tools(self) -> List[
|
|
134
|
-
r"""Returns a list of
|
|
134
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
135
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
135
136
|
functions in the toolkit.
|
|
136
137
|
|
|
137
138
|
Returns:
|
|
138
|
-
List[
|
|
139
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
139
140
|
representing the functions in the toolkit.
|
|
140
141
|
"""
|
|
141
142
|
return [
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
143
|
+
OpenAIFunction(self.retrieve_issue_list),
|
|
144
|
+
OpenAIFunction(self.retrieve_issue),
|
|
145
|
+
OpenAIFunction(self.create_pull_request),
|
|
146
|
+
OpenAIFunction(self.retrieve_pull_requests),
|
|
146
147
|
]
|
|
147
148
|
|
|
148
149
|
def get_github_access_token(self) -> str:
|
|
@@ -16,7 +16,7 @@ from functools import wraps
|
|
|
16
16
|
from typing import Any, Callable, List, Optional, Union
|
|
17
17
|
|
|
18
18
|
from camel.toolkits.base import BaseToolkit
|
|
19
|
-
from camel.toolkits.
|
|
19
|
+
from camel.toolkits.openai_function import OpenAIFunction
|
|
20
20
|
from camel.utils import dependencies_required
|
|
21
21
|
|
|
22
22
|
|
|
@@ -287,16 +287,16 @@ class GoogleMapsToolkit(BaseToolkit):
|
|
|
287
287
|
|
|
288
288
|
return description
|
|
289
289
|
|
|
290
|
-
def get_tools(self) -> List[
|
|
291
|
-
r"""Returns a list of
|
|
290
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
291
|
+
r"""Returns a list of OpenAIFunction objects representing the
|
|
292
292
|
functions in the toolkit.
|
|
293
293
|
|
|
294
294
|
Returns:
|
|
295
|
-
List[
|
|
295
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects
|
|
296
296
|
representing the functions in the toolkit.
|
|
297
297
|
"""
|
|
298
298
|
return [
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
299
|
+
OpenAIFunction(self.get_address_description),
|
|
300
|
+
OpenAIFunction(self.get_elevation),
|
|
301
|
+
OpenAIFunction(self.get_timezone),
|
|
302
302
|
]
|