camel-ai 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +110 -25
- camel/configs/__init__.py +6 -0
- camel/configs/base_config.py +21 -0
- camel/configs/gemini_config.py +17 -9
- camel/configs/qwen_config.py +91 -0
- camel/configs/yi_config.py +58 -0
- camel/generators.py +93 -0
- camel/interpreters/docker_interpreter.py +5 -0
- camel/interpreters/ipython_interpreter.py +2 -1
- camel/loaders/__init__.py +2 -0
- camel/loaders/apify_reader.py +223 -0
- camel/memories/agent_memories.py +24 -1
- camel/messages/base.py +38 -0
- camel/models/__init__.py +4 -0
- camel/models/model_factory.py +6 -0
- camel/models/openai_model.py +1 -1
- camel/models/qwen_model.py +139 -0
- camel/models/yi_model.py +138 -0
- camel/prompts/image_craft.py +8 -0
- camel/prompts/video_description_prompt.py +8 -0
- camel/retrievers/vector_retriever.py +5 -1
- camel/societies/role_playing.py +29 -18
- camel/societies/workforce/base.py +7 -1
- camel/societies/workforce/task_channel.py +10 -0
- camel/societies/workforce/utils.py +6 -0
- camel/societies/workforce/worker.py +2 -0
- camel/storages/vectordb_storages/qdrant.py +147 -24
- camel/tasks/task.py +15 -0
- camel/terminators/base.py +4 -0
- camel/terminators/response_terminator.py +1 -0
- camel/terminators/token_limit_terminator.py +1 -0
- camel/toolkits/__init__.py +4 -1
- camel/toolkits/base.py +9 -0
- camel/toolkits/data_commons_toolkit.py +360 -0
- camel/toolkits/function_tool.py +174 -7
- camel/toolkits/github_toolkit.py +175 -176
- camel/toolkits/google_scholar_toolkit.py +36 -7
- camel/toolkits/notion_toolkit.py +279 -0
- camel/toolkits/search_toolkit.py +164 -36
- camel/types/enums.py +88 -0
- camel/types/unified_model_type.py +10 -0
- camel/utils/commons.py +2 -1
- camel/utils/constants.py +2 -0
- {camel_ai-0.2.6.dist-info → camel_ai-0.2.8.dist-info}/METADATA +129 -79
- {camel_ai-0.2.6.dist-info → camel_ai-0.2.8.dist-info}/RECORD +48 -41
- {camel_ai-0.2.6.dist-info → camel_ai-0.2.8.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.6.dist-info → camel_ai-0.2.8.dist-info}/WHEEL +0 -0
camel/generators.py
CHANGED
|
@@ -154,6 +154,21 @@ class SystemMessageGenerator:
|
|
|
154
154
|
|
|
155
155
|
|
|
156
156
|
class RoleNameGenerator:
|
|
157
|
+
r"""Role name generator for role-playing workers.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
assistant_role_names_path (str, optional): The path to the file
|
|
161
|
+
containing the assistant role names.
|
|
162
|
+
(default: :obj:`"data/ai_society/assistant_roles.txt"`)
|
|
163
|
+
user_role_names_path (str, optional): The path to the file
|
|
164
|
+
containing the user role names.
|
|
165
|
+
(default: :obj:`"data/ai_society/user_roles.txt"`)
|
|
166
|
+
assistant_role_names (Optional[List[str]], optional): The list of
|
|
167
|
+
assistant role names. (default: :obj:`None`)
|
|
168
|
+
user_role_names (Optional[List[str]], optional): The list of user role
|
|
169
|
+
names. (default: :obj:`None`)
|
|
170
|
+
"""
|
|
171
|
+
|
|
157
172
|
def __init__(
|
|
158
173
|
self,
|
|
159
174
|
assistant_role_names_path: str = "data/ai_society/assistant_roles.txt",
|
|
@@ -181,12 +196,25 @@ class RoleNameGenerator:
|
|
|
181
196
|
self.user_role_names = user_role_names
|
|
182
197
|
|
|
183
198
|
def from_role_files(self) -> Generator[Tuple, None, None]:
|
|
199
|
+
r"""Generate role names from the file.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
Generator[Tuple, None, None]: A generator that yields tuples of
|
|
203
|
+
assistant role names and user role names.
|
|
204
|
+
"""
|
|
184
205
|
for assistant_role_name in self.assistant_role_names:
|
|
185
206
|
for user_role_name in self.user_role_names:
|
|
186
207
|
yield (assistant_role_name, user_role_name)
|
|
187
208
|
|
|
188
209
|
|
|
189
210
|
class AISocietyTaskPromptGenerator:
|
|
211
|
+
r"""Task prompt generator for AI society tasks.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
num_tasks (int, optional): The number of tasks to generate.
|
|
215
|
+
(default: :obj:`10`)
|
|
216
|
+
"""
|
|
217
|
+
|
|
190
218
|
def __init__(
|
|
191
219
|
self,
|
|
192
220
|
num_tasks: int = 10,
|
|
@@ -205,6 +233,20 @@ class AISocietyTaskPromptGenerator:
|
|
|
205
233
|
assistant_role_names_path: str = "data/ai_society/assistant_roles.txt",
|
|
206
234
|
user_role_names_path: str = "data/ai_society/user_roles.txt",
|
|
207
235
|
) -> Generator[Tuple[str, Tuple[str, str]], None, None]:
|
|
236
|
+
r"""Generate tasks from role files.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
assistant_role_names_path (str, optional): The path to the file
|
|
240
|
+
containing the assistant role names.
|
|
241
|
+
(default: :obj:`"data/ai_society/assistant_roles.txt"`)
|
|
242
|
+
user_role_names_path (str, optional): The path to the file
|
|
243
|
+
containing the user role names.
|
|
244
|
+
(default: :obj:`"data/ai_society/user_roles.txt"`)
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
Generator[Tuple[str, Tuple[str, str]], None, None]: A generator
|
|
248
|
+
that yields tuples of task prompts and role names.
|
|
249
|
+
"""
|
|
208
250
|
roles_generator = RoleNameGenerator(
|
|
209
251
|
assistant_role_names_path, user_role_names_path
|
|
210
252
|
).from_role_files()
|
|
@@ -220,6 +262,16 @@ class AISocietyTaskPromptGenerator:
|
|
|
220
262
|
def from_role_generator(
|
|
221
263
|
self, role_generator: Generator[Tuple, None, None]
|
|
222
264
|
) -> Generator[Tuple[str, Tuple[str, str]], None, None]:
|
|
265
|
+
r"""Generate tasks from a role generator.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
role_generator (Generator[Tuple, None, None]): A generator that
|
|
269
|
+
yields tuples of role names.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
Generator[Tuple[str, Tuple[str, str]], None, None]: A generator
|
|
273
|
+
that yields tuples of task prompts and role names.
|
|
274
|
+
"""
|
|
223
275
|
for role_1, role_2 in role_generator:
|
|
224
276
|
generate_tasks_prompt = self.generate_tasks_prompt.format(
|
|
225
277
|
assistant_role=role_1,
|
|
@@ -231,6 +283,12 @@ class AISocietyTaskPromptGenerator:
|
|
|
231
283
|
|
|
232
284
|
|
|
233
285
|
class SingleTxtGenerator:
|
|
286
|
+
r"""Single text generator for role-playing workers.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
text_file_path (str): The path to the file containing the text data.
|
|
290
|
+
"""
|
|
291
|
+
|
|
234
292
|
def __init__(
|
|
235
293
|
self,
|
|
236
294
|
text_file_path: str,
|
|
@@ -242,11 +300,23 @@ class SingleTxtGenerator:
|
|
|
242
300
|
]
|
|
243
301
|
|
|
244
302
|
def from_role_files(self) -> Generator[str, None, None]:
|
|
303
|
+
r"""Generate text from the file.
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Generator[str, None, None]: A generator that yields the text data.
|
|
307
|
+
"""
|
|
245
308
|
for data in self.data_list:
|
|
246
309
|
yield data
|
|
247
310
|
|
|
248
311
|
|
|
249
312
|
class CodeTaskPromptGenerator:
|
|
313
|
+
r"""Code task prompt generator for code tasks.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
num_tasks (int, optional): The number of tasks to generate.
|
|
317
|
+
(default: :obj:`50`)
|
|
318
|
+
"""
|
|
319
|
+
|
|
250
320
|
def __init__(
|
|
251
321
|
self,
|
|
252
322
|
num_tasks: int = 50,
|
|
@@ -262,6 +332,19 @@ class CodeTaskPromptGenerator:
|
|
|
262
332
|
languages_path: str = "data/code/languages.txt",
|
|
263
333
|
domains_path: str = "data/code/domains.txt",
|
|
264
334
|
) -> Generator[Tuple[TextPrompt, str, str], None, None]:
|
|
335
|
+
r"""Generate tasks from role files.
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
languages_path (str, optional): The path to the file containing
|
|
339
|
+
the language names. (default: :obj:`"data/code/languages.txt"`)
|
|
340
|
+
domains_path (str, optional): The path to the file containing
|
|
341
|
+
the domain names. (default: :obj:`"data/code/domains.txt"`)
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Generator[Tuple[TextPrompt, str, str], None, None]: A generator
|
|
345
|
+
that yields tuples of task prompts, language names, and domain
|
|
346
|
+
names.
|
|
347
|
+
"""
|
|
265
348
|
language_generator = SingleTxtGenerator(
|
|
266
349
|
languages_path
|
|
267
350
|
).from_role_files()
|
|
@@ -279,4 +362,14 @@ class CodeTaskPromptGenerator:
|
|
|
279
362
|
def from_role_generator(
|
|
280
363
|
self, role_generator: Generator[Tuple, None, None]
|
|
281
364
|
) -> Generator[str, None, None]:
|
|
365
|
+
r"""Generate tasks from a role generator.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
role_generator (Generator[Tuple, None, None]): A generator that
|
|
369
|
+
yields tuples of role names.
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
Generator[str, None, None]: A generator that yields the task
|
|
373
|
+
prompts.
|
|
374
|
+
"""
|
|
282
375
|
raise NotImplementedError
|
|
@@ -80,6 +80,11 @@ class DockerInterpreter(BaseInterpreter):
|
|
|
80
80
|
self._container: Optional[Container] = None
|
|
81
81
|
|
|
82
82
|
def __del__(self) -> None:
|
|
83
|
+
r"""Destructor for the DockerInterpreter class.
|
|
84
|
+
|
|
85
|
+
This method ensures that the Docker container is removed when the
|
|
86
|
+
interpreter is deleted.
|
|
87
|
+
"""
|
|
83
88
|
if self._container is not None:
|
|
84
89
|
self._container.remove(force=True)
|
|
85
90
|
|
|
@@ -105,7 +105,8 @@ class JupyterKernelInterpreter(BaseInterpreter):
|
|
|
105
105
|
outputs.append(msg_content["data"]["text/plain"])
|
|
106
106
|
if "image/png" in msg_content["data"]:
|
|
107
107
|
outputs.append(
|
|
108
|
-
f"\n\n"
|
|
109
110
|
)
|
|
110
111
|
except queue.Empty:
|
|
111
112
|
outputs.append("Time out")
|
camel/loaders/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
|
|
15
|
+
from .apify_reader import Apify
|
|
15
16
|
from .base_io import File
|
|
16
17
|
from .chunkr_reader import ChunkrReader
|
|
17
18
|
from .firecrawl_reader import Firecrawl
|
|
@@ -23,5 +24,6 @@ __all__ = [
|
|
|
23
24
|
'UnstructuredIO',
|
|
24
25
|
'JinaURLReader',
|
|
25
26
|
'Firecrawl',
|
|
27
|
+
'Apify',
|
|
26
28
|
'ChunkrReader',
|
|
27
29
|
]
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from typing import TYPE_CHECKING, List, Optional
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from apify_client.clients import DatasetClient
|
|
19
|
+
|
|
20
|
+
from camel.utils import api_keys_required
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class Apify:
|
|
24
|
+
r"""Apify is a platform that allows you to automate any web workflow.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
api_key (Optional[str]): API key for authenticating with the Apify API.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
@api_keys_required("APIFY_API_KEY")
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
api_key: Optional[str] = None,
|
|
34
|
+
) -> None:
|
|
35
|
+
from apify_client import ApifyClient
|
|
36
|
+
|
|
37
|
+
self._api_key = api_key or os.environ.get("APIFY_API_KEY")
|
|
38
|
+
self.client = ApifyClient(token=self._api_key)
|
|
39
|
+
|
|
40
|
+
def run_actor(
|
|
41
|
+
self,
|
|
42
|
+
actor_id: str,
|
|
43
|
+
run_input: Optional[dict] = None,
|
|
44
|
+
content_type: Optional[str] = None,
|
|
45
|
+
build: Optional[str] = None,
|
|
46
|
+
max_items: Optional[int] = None,
|
|
47
|
+
memory_mbytes: Optional[int] = None,
|
|
48
|
+
timeout_secs: Optional[int] = None,
|
|
49
|
+
webhooks: Optional[list] = None,
|
|
50
|
+
wait_secs: Optional[int] = None,
|
|
51
|
+
) -> Optional[dict]:
|
|
52
|
+
r"""Run an actor on the Apify platform.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
actor_id (str): The ID of the actor to run.
|
|
56
|
+
run_input (Optional[dict]): The input data for the actor. Defaults
|
|
57
|
+
to `None`.
|
|
58
|
+
content_type (str, optional): The content type of the input.
|
|
59
|
+
build (str, optional): Specifies the Actor build to run. It can be
|
|
60
|
+
either a build tag or build number. By default, the run uses
|
|
61
|
+
the build specified in the default run configuration for the
|
|
62
|
+
Actor (typically latest).
|
|
63
|
+
max_items (int, optional): Maximum number of results that will be
|
|
64
|
+
returned by this run. If the Actor is charged per result, you
|
|
65
|
+
will not be charged for more results than the given limit.
|
|
66
|
+
memory_mbytes (int, optional): Memory limit for the run, in
|
|
67
|
+
megabytes. By default, the run uses a memory limit specified in
|
|
68
|
+
the default run configuration for the Actor.
|
|
69
|
+
timeout_secs (int, optional): Optional timeout for the run, in
|
|
70
|
+
seconds. By default, the run uses timeout specified in the
|
|
71
|
+
default run configuration for the Actor.
|
|
72
|
+
webhooks (list, optional): Optional webhooks
|
|
73
|
+
(https://docs.apify.com/webhooks) associated with the Actor
|
|
74
|
+
run, which can be used to receive a notification, e.g. when the
|
|
75
|
+
Actor finished or failed. If you already have a webhook set up
|
|
76
|
+
for the Actor, you do not have to add it again here.
|
|
77
|
+
wait_secs (int, optional): The maximum number of seconds the server
|
|
78
|
+
waits for finish. If not provided, waits indefinitely.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Optional[dict]: The output data from the actor if successful.
|
|
82
|
+
# please use the 'defaultDatasetId' to get the dataset
|
|
83
|
+
|
|
84
|
+
Raises:
|
|
85
|
+
RuntimeError: If the actor fails to run.
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
return self.client.actor(actor_id).call(
|
|
89
|
+
run_input=run_input,
|
|
90
|
+
content_type=content_type,
|
|
91
|
+
build=build,
|
|
92
|
+
max_items=max_items,
|
|
93
|
+
memory_mbytes=memory_mbytes,
|
|
94
|
+
timeout_secs=timeout_secs,
|
|
95
|
+
webhooks=webhooks,
|
|
96
|
+
wait_secs=wait_secs,
|
|
97
|
+
)
|
|
98
|
+
except Exception as e:
|
|
99
|
+
raise RuntimeError(f"Failed to run actor {actor_id}: {e}") from e
|
|
100
|
+
|
|
101
|
+
def get_dataset_client(
|
|
102
|
+
self,
|
|
103
|
+
dataset_id: str,
|
|
104
|
+
) -> "DatasetClient":
|
|
105
|
+
r"""Get a dataset client from the Apify platform.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
dataset_id (str): The ID of the dataset to get the client for.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
DatasetClient: The dataset client.
|
|
112
|
+
|
|
113
|
+
Raises:
|
|
114
|
+
RuntimeError: If the dataset client fails to be retrieved.
|
|
115
|
+
"""
|
|
116
|
+
try:
|
|
117
|
+
return self.client.dataset(dataset_id)
|
|
118
|
+
except Exception as e:
|
|
119
|
+
raise RuntimeError(
|
|
120
|
+
f"Failed to get dataset {dataset_id}: {e}"
|
|
121
|
+
) from e
|
|
122
|
+
|
|
123
|
+
def get_dataset(
|
|
124
|
+
self,
|
|
125
|
+
dataset_id: str,
|
|
126
|
+
) -> Optional[dict]:
|
|
127
|
+
r"""Get a dataset from the Apify platform.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
dataset_id (str): The ID of the dataset to get.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
dict: The dataset.
|
|
134
|
+
|
|
135
|
+
Raises:
|
|
136
|
+
RuntimeError: If the dataset fails to be retrieved.
|
|
137
|
+
"""
|
|
138
|
+
try:
|
|
139
|
+
return self.get_dataset_client(dataset_id).get()
|
|
140
|
+
except Exception as e:
|
|
141
|
+
raise RuntimeError(
|
|
142
|
+
f"Failed to get dataset {dataset_id}: {e}"
|
|
143
|
+
) from e
|
|
144
|
+
|
|
145
|
+
def update_dataset(
|
|
146
|
+
self,
|
|
147
|
+
dataset_id: str,
|
|
148
|
+
name: str,
|
|
149
|
+
) -> dict:
|
|
150
|
+
r"""Update a dataset on the Apify platform.
|
|
151
|
+
|
|
152
|
+
Args:
|
|
153
|
+
dataset_id (str): The ID of the dataset to update.
|
|
154
|
+
name (str): The new name for the dataset.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
dict: The updated dataset.
|
|
158
|
+
|
|
159
|
+
Raises:
|
|
160
|
+
RuntimeError: If the dataset fails to be updated.
|
|
161
|
+
"""
|
|
162
|
+
try:
|
|
163
|
+
return self.get_dataset_client(dataset_id).update(name=name)
|
|
164
|
+
except Exception as e:
|
|
165
|
+
raise RuntimeError(
|
|
166
|
+
f"Failed to update dataset {dataset_id}: {e}"
|
|
167
|
+
) from e
|
|
168
|
+
|
|
169
|
+
def get_dataset_items(
|
|
170
|
+
self,
|
|
171
|
+
dataset_id: str,
|
|
172
|
+
) -> List:
|
|
173
|
+
r"""Get items from a dataset on the Apify platform.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
dataset_id (str): The ID of the dataset to get items from.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
list: The items in the dataset.
|
|
180
|
+
|
|
181
|
+
Raises:
|
|
182
|
+
RuntimeError: If the items fail to be retrieved.
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
items = self.get_dataset_client(dataset_id).list_items().items
|
|
186
|
+
return items
|
|
187
|
+
except Exception as e:
|
|
188
|
+
raise RuntimeError(
|
|
189
|
+
f"Failed to get dataset items {dataset_id}: {e}"
|
|
190
|
+
) from e
|
|
191
|
+
|
|
192
|
+
def get_datasets(
|
|
193
|
+
self,
|
|
194
|
+
unnamed: Optional[bool] = None,
|
|
195
|
+
limit: Optional[int] = None,
|
|
196
|
+
offset: Optional[int] = None,
|
|
197
|
+
desc: Optional[bool] = None,
|
|
198
|
+
) -> List[dict]:
|
|
199
|
+
r"""Get all named datasets from the Apify platform.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
unnamed (bool, optional): Whether to include unnamed key-value
|
|
203
|
+
stores in the list
|
|
204
|
+
limit (int, optional): How many key-value stores to retrieve
|
|
205
|
+
offset (int, optional): What key-value store to include as first
|
|
206
|
+
when retrieving the list
|
|
207
|
+
desc (bool, optional): Whether to sort the key-value stores in
|
|
208
|
+
descending order based on their modification date
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
List[dict]: The datasets.
|
|
212
|
+
|
|
213
|
+
Raises:
|
|
214
|
+
RuntimeError: If the datasets fail to be retrieved.
|
|
215
|
+
"""
|
|
216
|
+
try:
|
|
217
|
+
return (
|
|
218
|
+
self.client.datasets()
|
|
219
|
+
.list(unnamed=unnamed, limit=limit, offset=offset, desc=desc)
|
|
220
|
+
.items
|
|
221
|
+
)
|
|
222
|
+
except Exception as e:
|
|
223
|
+
raise RuntimeError(f"Failed to get datasets: {e}") from e
|
camel/memories/agent_memories.py
CHANGED
|
@@ -106,7 +106,18 @@ class VectorDBMemory(AgentMemory):
|
|
|
106
106
|
|
|
107
107
|
class LongtermAgentMemory(AgentMemory):
|
|
108
108
|
r"""An implementation of the :obj:`AgentMemory` abstract base class for
|
|
109
|
-
|
|
109
|
+
augmenting ChatHistoryMemory with VectorDBMemory.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
context_creator (BaseContextCreator): A model context creator.
|
|
113
|
+
chat_history_block (Optional[ChatHistoryBlock], optional): A chat
|
|
114
|
+
history block. If `None`, a :obj:`ChatHistoryBlock` will be used.
|
|
115
|
+
(default: :obj:`None`)
|
|
116
|
+
vector_db_block (Optional[VectorDBBlock], optional): A vector database
|
|
117
|
+
block. If `None`, a :obj:`VectorDBBlock` will be used.
|
|
118
|
+
(default: :obj:`None`)
|
|
119
|
+
retrieve_limit (int, optional): The maximum number of messages
|
|
120
|
+
to be added into the context. (default: :obj:`3`)
|
|
110
121
|
"""
|
|
111
122
|
|
|
112
123
|
def __init__(
|
|
@@ -123,9 +134,21 @@ class LongtermAgentMemory(AgentMemory):
|
|
|
123
134
|
self._current_topic: str = ""
|
|
124
135
|
|
|
125
136
|
def get_context_creator(self) -> BaseContextCreator:
|
|
137
|
+
r"""Returns the context creator used by the memory.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
BaseContextCreator: The context creator used by the memory.
|
|
141
|
+
"""
|
|
126
142
|
return self._context_creator
|
|
127
143
|
|
|
128
144
|
def retrieve(self) -> List[ContextRecord]:
|
|
145
|
+
r"""Retrieves context records from both the chat history and the vector
|
|
146
|
+
database.
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
List[ContextRecord]: A list of context records retrieved from both
|
|
150
|
+
the chat history and the vector database.
|
|
151
|
+
"""
|
|
129
152
|
chat_history = self.chat_history_block.retrieve()
|
|
130
153
|
vector_db_retrieve = self.vector_db_block.retrieve(
|
|
131
154
|
self._current_topic, self.retrieve_limit
|
camel/messages/base.py
CHANGED
|
@@ -81,6 +81,25 @@ class BaseMessage:
|
|
|
81
81
|
OpenAIVisionDetailType, str
|
|
82
82
|
] = OpenAIVisionDetailType.LOW,
|
|
83
83
|
) -> "BaseMessage":
|
|
84
|
+
r"""Create a new user message.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
role_name (str): The name of the user role.
|
|
88
|
+
content (str): The content of the message.
|
|
89
|
+
meta_dict (Optional[Dict[str, str]]): Additional metadata
|
|
90
|
+
dictionary for the message.
|
|
91
|
+
video_bytes (Optional[bytes]): Optional bytes of a video
|
|
92
|
+
associated with the message.
|
|
93
|
+
image_list (Optional[List[Image.Image]]): Optional list of PIL
|
|
94
|
+
Image objects associated with the message.
|
|
95
|
+
image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
96
|
+
the images associated with the message.
|
|
97
|
+
video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
98
|
+
the videos associated with the message.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
BaseMessage: The new user message.
|
|
102
|
+
"""
|
|
84
103
|
return cls(
|
|
85
104
|
role_name,
|
|
86
105
|
RoleType.USER,
|
|
@@ -107,6 +126,25 @@ class BaseMessage:
|
|
|
107
126
|
OpenAIVisionDetailType, str
|
|
108
127
|
] = OpenAIVisionDetailType.LOW,
|
|
109
128
|
) -> "BaseMessage":
|
|
129
|
+
r"""Create a new assistant message.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
role_name (str): The name of the assistant role.
|
|
133
|
+
content (str): The content of the message.
|
|
134
|
+
meta_dict (Optional[Dict[str, str]]): Additional metadata
|
|
135
|
+
dictionary for the message.
|
|
136
|
+
video_bytes (Optional[bytes]): Optional bytes of a video
|
|
137
|
+
associated with the message.
|
|
138
|
+
image_list (Optional[List[Image.Image]]): Optional list of PIL
|
|
139
|
+
Image objects associated with the message.
|
|
140
|
+
image_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
141
|
+
the images associated with the message.
|
|
142
|
+
video_detail (Union[OpenAIVisionDetailType, str]): Detail level of
|
|
143
|
+
the videos associated with the message.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
BaseMessage: The new assistant message.
|
|
147
|
+
"""
|
|
110
148
|
return cls(
|
|
111
149
|
role_name,
|
|
112
150
|
RoleType.ASSISTANT,
|
camel/models/__init__.py
CHANGED
|
@@ -24,11 +24,13 @@ from .ollama_model import OllamaModel
|
|
|
24
24
|
from .openai_audio_models import OpenAIAudioModels
|
|
25
25
|
from .openai_compatible_model import OpenAICompatibleModel
|
|
26
26
|
from .openai_model import OpenAIModel
|
|
27
|
+
from .qwen_model import QwenModel
|
|
27
28
|
from .reka_model import RekaModel
|
|
28
29
|
from .samba_model import SambaModel
|
|
29
30
|
from .stub_model import StubModel
|
|
30
31
|
from .togetherai_model import TogetherAIModel
|
|
31
32
|
from .vllm_model import VLLMModel
|
|
33
|
+
from .yi_model import YiModel
|
|
32
34
|
from .zhipuai_model import ZhipuAIModel
|
|
33
35
|
|
|
34
36
|
__all__ = [
|
|
@@ -51,4 +53,6 @@ __all__ = [
|
|
|
51
53
|
'RekaModel',
|
|
52
54
|
'SambaModel',
|
|
53
55
|
'TogetherAIModel',
|
|
56
|
+
'YiModel',
|
|
57
|
+
'QwenModel',
|
|
54
58
|
]
|
camel/models/model_factory.py
CHANGED
|
@@ -23,11 +23,13 @@ from camel.models.mistral_model import MistralModel
|
|
|
23
23
|
from camel.models.ollama_model import OllamaModel
|
|
24
24
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
25
25
|
from camel.models.openai_model import OpenAIModel
|
|
26
|
+
from camel.models.qwen_model import QwenModel
|
|
26
27
|
from camel.models.reka_model import RekaModel
|
|
27
28
|
from camel.models.samba_model import SambaModel
|
|
28
29
|
from camel.models.stub_model import StubModel
|
|
29
30
|
from camel.models.togetherai_model import TogetherAIModel
|
|
30
31
|
from camel.models.vllm_model import VLLMModel
|
|
32
|
+
from camel.models.yi_model import YiModel
|
|
31
33
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
32
34
|
from camel.types import ModelPlatformType, ModelType, UnifiedModelType
|
|
33
35
|
from camel.utils import BaseTokenCounter
|
|
@@ -106,6 +108,10 @@ class ModelFactory:
|
|
|
106
108
|
model_class = MistralModel
|
|
107
109
|
elif model_platform.is_reka and model_type.is_reka:
|
|
108
110
|
model_class = RekaModel
|
|
111
|
+
elif model_platform.is_yi and model_type.is_yi:
|
|
112
|
+
model_class = YiModel
|
|
113
|
+
elif model_platform.is_qwen and model_type.is_qwen:
|
|
114
|
+
model_class = QwenModel
|
|
109
115
|
elif model_type == ModelType.STUB:
|
|
110
116
|
model_class = StubModel
|
|
111
117
|
|
camel/models/openai_model.py
CHANGED
|
@@ -123,7 +123,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
123
123
|
|
|
124
124
|
self.model_config_dict["temperature"] = 1.0
|
|
125
125
|
self.model_config_dict["top_p"] = 1.0
|
|
126
|
-
self.model_config_dict["n"] = 1
|
|
126
|
+
self.model_config_dict["n"] = 1
|
|
127
127
|
self.model_config_dict["presence_penalty"] = 0.0
|
|
128
128
|
self.model_config_dict["frequency_penalty"] = 0.0
|
|
129
129
|
|