camel-ai 0.1.1__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -11
- camel/agents/__init__.py +7 -5
- camel/agents/chat_agent.py +134 -86
- camel/agents/critic_agent.py +28 -17
- camel/agents/deductive_reasoner_agent.py +235 -0
- camel/agents/embodied_agent.py +92 -40
- camel/agents/knowledge_graph_agent.py +221 -0
- camel/agents/role_assignment_agent.py +27 -17
- camel/agents/task_agent.py +60 -34
- camel/agents/tool_agents/base.py +0 -1
- camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
- camel/configs/__init__.py +29 -0
- camel/configs/anthropic_config.py +73 -0
- camel/configs/base_config.py +22 -0
- camel/{configs.py → configs/openai_config.py} +37 -64
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/base.py +3 -2
- camel/embeddings/openai_embedding.py +10 -5
- camel/embeddings/sentence_transformers_embeddings.py +65 -0
- camel/functions/__init__.py +18 -3
- camel/functions/google_maps_function.py +335 -0
- camel/functions/math_functions.py +7 -7
- camel/functions/open_api_function.py +380 -0
- camel/functions/open_api_specs/coursera/__init__.py +13 -0
- camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
- camel/functions/open_api_specs/klarna/__init__.py +13 -0
- camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
- camel/functions/open_api_specs/speak/__init__.py +13 -0
- camel/functions/open_api_specs/speak/openapi.yaml +151 -0
- camel/functions/openai_function.py +346 -42
- camel/functions/retrieval_functions.py +61 -0
- camel/functions/search_functions.py +100 -35
- camel/functions/slack_functions.py +275 -0
- camel/functions/twitter_function.py +484 -0
- camel/functions/weather_functions.py +36 -23
- camel/generators.py +65 -46
- camel/human.py +17 -11
- camel/interpreters/__init__.py +25 -0
- camel/interpreters/base.py +49 -0
- camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
- camel/interpreters/interpreter_error.py +19 -0
- camel/interpreters/subprocess_interpreter.py +190 -0
- camel/loaders/__init__.py +22 -0
- camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
- camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
- camel/memories/__init__.py +17 -7
- camel/memories/agent_memories.py +156 -0
- camel/memories/base.py +97 -32
- camel/memories/blocks/__init__.py +21 -0
- camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
- camel/memories/blocks/vectordb_block.py +101 -0
- camel/memories/context_creators/__init__.py +3 -2
- camel/memories/context_creators/score_based.py +32 -20
- camel/memories/records.py +6 -5
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +99 -16
- camel/messages/func_message.py +7 -4
- camel/models/__init__.py +6 -2
- camel/models/anthropic_model.py +146 -0
- camel/models/base_model.py +10 -3
- camel/models/model_factory.py +17 -11
- camel/models/open_source_model.py +25 -13
- camel/models/openai_audio_models.py +251 -0
- camel/models/openai_model.py +20 -13
- camel/models/stub_model.py +10 -5
- camel/prompts/__init__.py +7 -5
- camel/prompts/ai_society.py +21 -14
- camel/prompts/base.py +54 -47
- camel/prompts/code.py +22 -14
- camel/prompts/evaluation.py +8 -5
- camel/prompts/misalignment.py +26 -19
- camel/prompts/object_recognition.py +35 -0
- camel/prompts/prompt_templates.py +14 -8
- camel/prompts/role_description_prompt_template.py +16 -10
- camel/prompts/solution_extraction.py +9 -5
- camel/prompts/task_prompt_template.py +24 -21
- camel/prompts/translation.py +9 -5
- camel/responses/agent_responses.py +5 -2
- camel/retrievers/__init__.py +26 -0
- camel/retrievers/auto_retriever.py +330 -0
- camel/retrievers/base.py +69 -0
- camel/retrievers/bm25_retriever.py +140 -0
- camel/retrievers/cohere_rerank_retriever.py +108 -0
- camel/retrievers/vector_retriever.py +183 -0
- camel/societies/__init__.py +1 -1
- camel/societies/babyagi_playing.py +56 -32
- camel/societies/role_playing.py +188 -133
- camel/storages/__init__.py +18 -0
- camel/storages/graph_storages/__init__.py +23 -0
- camel/storages/graph_storages/base.py +82 -0
- camel/storages/graph_storages/graph_element.py +74 -0
- camel/storages/graph_storages/neo4j_graph.py +582 -0
- camel/storages/key_value_storages/base.py +1 -2
- camel/storages/key_value_storages/in_memory.py +1 -2
- camel/storages/key_value_storages/json.py +8 -13
- camel/storages/vectordb_storages/__init__.py +33 -0
- camel/storages/vectordb_storages/base.py +202 -0
- camel/storages/vectordb_storages/milvus.py +396 -0
- camel/storages/vectordb_storages/qdrant.py +373 -0
- camel/terminators/__init__.py +1 -1
- camel/terminators/base.py +2 -3
- camel/terminators/response_terminator.py +21 -12
- camel/terminators/token_limit_terminator.py +5 -3
- camel/toolkits/__init__.py +21 -0
- camel/toolkits/base.py +22 -0
- camel/toolkits/github_toolkit.py +245 -0
- camel/types/__init__.py +18 -6
- camel/types/enums.py +129 -15
- camel/types/openai_types.py +10 -5
- camel/utils/__init__.py +20 -13
- camel/utils/commons.py +170 -85
- camel/utils/token_counting.py +135 -15
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/METADATA +123 -75
- camel_ai-0.1.4.dist-info/RECORD +119 -0
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/WHEEL +1 -1
- camel/memories/context_creators/base.py +0 -72
- camel_ai-0.1.1.dist-info/RECORD +0 -75
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, List, Optional, Union
|
|
16
|
+
|
|
17
|
+
from openai import OpenAI, _legacy_response
|
|
18
|
+
|
|
19
|
+
from camel.types import AudioModelType, VoiceType
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OpenAIAudioModels:
|
|
23
|
+
r"""Provides access to OpenAI's Text-to-Speech (TTS) and Speech_to_Text
|
|
24
|
+
(STT) models."""
|
|
25
|
+
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
) -> None:
|
|
29
|
+
r"""Initialize an instance of OpenAI."""
|
|
30
|
+
url = os.environ.get('OPENAI_API_BASE_URL')
|
|
31
|
+
self._client = OpenAI(timeout=120, max_retries=3, base_url=url)
|
|
32
|
+
|
|
33
|
+
def text_to_speech(
|
|
34
|
+
self,
|
|
35
|
+
input: str,
|
|
36
|
+
model_type: AudioModelType = AudioModelType.TTS_1,
|
|
37
|
+
voice: VoiceType = VoiceType.ALLOY,
|
|
38
|
+
storage_path: Optional[str] = None,
|
|
39
|
+
**kwargs: Any,
|
|
40
|
+
) -> Union[
|
|
41
|
+
List[_legacy_response.HttpxBinaryResponseContent],
|
|
42
|
+
_legacy_response.HttpxBinaryResponseContent,
|
|
43
|
+
]:
|
|
44
|
+
r"""Convert text to speech using OpenAI's TTS model. This method
|
|
45
|
+
converts the given input text to speech using the specified model and
|
|
46
|
+
voice.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
input (str): The text to be converted to speech.
|
|
50
|
+
model_type (AudioModelType, optional): The TTS model to use.
|
|
51
|
+
Defaults to `AudioModelType.TTS_1`.
|
|
52
|
+
voice (VoiceType, optional): The voice to be used for generating
|
|
53
|
+
speech. Defaults to `VoiceType.ALLOY`.
|
|
54
|
+
storage_path (str, optional): The local path to store the
|
|
55
|
+
generated speech file if provided, defaults to `None`.
|
|
56
|
+
**kwargs (Any): Extra kwargs passed to the TTS API.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Union[List[_legacy_response.HttpxBinaryResponseContent],
|
|
60
|
+
_legacy_response.HttpxBinaryResponseContent]: List of response
|
|
61
|
+
content object from OpenAI if input charaters more than 4096,
|
|
62
|
+
single response content if input charaters less than 4096.
|
|
63
|
+
|
|
64
|
+
Raises:
|
|
65
|
+
Exception: If there's an error during the TTS API call.
|
|
66
|
+
"""
|
|
67
|
+
try:
|
|
68
|
+
# Model only support at most 4096 characters one time.
|
|
69
|
+
max_chunk_size = 4095
|
|
70
|
+
audio_chunks = []
|
|
71
|
+
chunk_index = 0
|
|
72
|
+
if len(input) > max_chunk_size:
|
|
73
|
+
while input:
|
|
74
|
+
if len(input) <= max_chunk_size:
|
|
75
|
+
chunk = input
|
|
76
|
+
input = ''
|
|
77
|
+
else:
|
|
78
|
+
# Find the nearest period before the chunk size limit
|
|
79
|
+
while input[max_chunk_size - 1] != '.':
|
|
80
|
+
max_chunk_size -= 1
|
|
81
|
+
|
|
82
|
+
chunk = input[:max_chunk_size]
|
|
83
|
+
input = input[max_chunk_size:].lstrip()
|
|
84
|
+
|
|
85
|
+
response = self._client.audio.speech.create(
|
|
86
|
+
model=model_type.value,
|
|
87
|
+
voice=voice.value,
|
|
88
|
+
input=chunk,
|
|
89
|
+
**kwargs,
|
|
90
|
+
)
|
|
91
|
+
if storage_path:
|
|
92
|
+
try:
|
|
93
|
+
# Create a new storage path for each chunk
|
|
94
|
+
file_name, file_extension = os.path.splitext(
|
|
95
|
+
storage_path
|
|
96
|
+
)
|
|
97
|
+
new_storage_path = (
|
|
98
|
+
f"{file_name}_{chunk_index}{file_extension}"
|
|
99
|
+
)
|
|
100
|
+
response.write_to_file(new_storage_path)
|
|
101
|
+
chunk_index += 1
|
|
102
|
+
except Exception as e:
|
|
103
|
+
raise Exception(
|
|
104
|
+
"Error during writing the file"
|
|
105
|
+
) from e
|
|
106
|
+
|
|
107
|
+
audio_chunks.append(response)
|
|
108
|
+
return audio_chunks
|
|
109
|
+
|
|
110
|
+
else:
|
|
111
|
+
response = self._client.audio.speech.create(
|
|
112
|
+
model=model_type.value,
|
|
113
|
+
voice=voice.value,
|
|
114
|
+
input=input,
|
|
115
|
+
**kwargs,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if storage_path:
|
|
119
|
+
try:
|
|
120
|
+
response.write_to_file(storage_path)
|
|
121
|
+
except Exception as e:
|
|
122
|
+
raise Exception("Error during write the file") from e
|
|
123
|
+
|
|
124
|
+
return response
|
|
125
|
+
|
|
126
|
+
except Exception as e:
|
|
127
|
+
raise Exception("Error during TTS API call") from e
|
|
128
|
+
|
|
129
|
+
def _split_audio(
|
|
130
|
+
self, audio_file_path: str, chunk_size_mb: int = 24
|
|
131
|
+
) -> list:
|
|
132
|
+
r"""Split the audio file into smaller chunks. Since the Whisper API
|
|
133
|
+
only supports files that are less than 25 MB.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
audio_file_path (str): Path to the input audio file.
|
|
137
|
+
chunk_size_mb (int, optional): Size of each chunk in megabytes.
|
|
138
|
+
Defaults to `24`.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
list: List of paths to the split audio files.
|
|
142
|
+
"""
|
|
143
|
+
from pydub import AudioSegment
|
|
144
|
+
|
|
145
|
+
audio = AudioSegment.from_file(audio_file_path)
|
|
146
|
+
audio_format = os.path.splitext(audio_file_path)[1][1:].lower()
|
|
147
|
+
|
|
148
|
+
# Calculate chunk size in bytes
|
|
149
|
+
chunk_size_bytes = chunk_size_mb * 1024 * 1024
|
|
150
|
+
|
|
151
|
+
# Number of chunks needed
|
|
152
|
+
num_chunks = os.path.getsize(audio_file_path) // chunk_size_bytes + 1
|
|
153
|
+
|
|
154
|
+
# Create a directory to store the chunks
|
|
155
|
+
output_dir = os.path.splitext(audio_file_path)[0] + "_chunks"
|
|
156
|
+
os.makedirs(output_dir, exist_ok=True)
|
|
157
|
+
|
|
158
|
+
# Get audio chunk len in milliseconds
|
|
159
|
+
chunk_size_milliseconds = len(audio) // (num_chunks)
|
|
160
|
+
|
|
161
|
+
# Split the audio into chunks
|
|
162
|
+
split_files = []
|
|
163
|
+
for i in range(num_chunks):
|
|
164
|
+
start = i * chunk_size_milliseconds
|
|
165
|
+
end = (i + 1) * chunk_size_milliseconds
|
|
166
|
+
if i + 1 == num_chunks:
|
|
167
|
+
chunk = audio[start:]
|
|
168
|
+
else:
|
|
169
|
+
chunk = audio[start:end]
|
|
170
|
+
# Create new chunk path
|
|
171
|
+
chunk_path = os.path.join(output_dir, f"chunk_{i}.{audio_format}")
|
|
172
|
+
chunk.export(chunk_path, format=audio_format)
|
|
173
|
+
split_files.append(chunk_path)
|
|
174
|
+
return split_files
|
|
175
|
+
|
|
176
|
+
def speech_to_text(
|
|
177
|
+
self,
|
|
178
|
+
audio_file_path: str,
|
|
179
|
+
translate_into_english: bool = False,
|
|
180
|
+
**kwargs: Any,
|
|
181
|
+
) -> str:
|
|
182
|
+
r"""Convert speech audio to text.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
audio_file_path (str): The audio file path, supporting one of
|
|
186
|
+
these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or
|
|
187
|
+
webm.
|
|
188
|
+
translate_into_english (bool, optional): Whether to translate the
|
|
189
|
+
speech into English. Defaults to `False`.
|
|
190
|
+
**kwargs (Any): Extra keyword arguments passed to the
|
|
191
|
+
Speech-to-Text (STT) API.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
str: The output text.
|
|
195
|
+
|
|
196
|
+
Raises:
|
|
197
|
+
ValueError: If the audio file format is not supported.
|
|
198
|
+
Exception: If there's an error during the STT API call.
|
|
199
|
+
"""
|
|
200
|
+
supported_formats = [
|
|
201
|
+
"flac",
|
|
202
|
+
"mp3",
|
|
203
|
+
"mp4",
|
|
204
|
+
"mpeg",
|
|
205
|
+
"mpga",
|
|
206
|
+
"m4a",
|
|
207
|
+
"ogg",
|
|
208
|
+
"wav",
|
|
209
|
+
"webm",
|
|
210
|
+
]
|
|
211
|
+
file_format = audio_file_path.split(".")[-1].lower()
|
|
212
|
+
|
|
213
|
+
if file_format not in supported_formats:
|
|
214
|
+
raise ValueError(f"Unsupported audio file format: {file_format}")
|
|
215
|
+
try:
|
|
216
|
+
if os.path.getsize(audio_file_path) > 24 * 1024 * 1024:
|
|
217
|
+
# Split audio into chunks
|
|
218
|
+
audio_chunks = self._split_audio(audio_file_path)
|
|
219
|
+
texts = []
|
|
220
|
+
for chunk_path in audio_chunks:
|
|
221
|
+
audio_data = open(chunk_path, "rb")
|
|
222
|
+
if translate_into_english:
|
|
223
|
+
translation = self._client.audio.translations.create(
|
|
224
|
+
model="whisper-1", file=audio_data, **kwargs
|
|
225
|
+
)
|
|
226
|
+
texts.append(translation.text)
|
|
227
|
+
else:
|
|
228
|
+
transcription = (
|
|
229
|
+
self._client.audio.transcriptions.create(
|
|
230
|
+
model="whisper-1", file=audio_data, **kwargs
|
|
231
|
+
)
|
|
232
|
+
)
|
|
233
|
+
texts.append(transcription.text)
|
|
234
|
+
os.remove(chunk_path) # Delete temporary chunk file
|
|
235
|
+
return " ".join(texts)
|
|
236
|
+
else:
|
|
237
|
+
# Process the entire audio file
|
|
238
|
+
audio_data = open(audio_file_path, "rb")
|
|
239
|
+
|
|
240
|
+
if translate_into_english:
|
|
241
|
+
translation = self._client.audio.translations.create(
|
|
242
|
+
model="whisper-1", file=audio_data, **kwargs
|
|
243
|
+
)
|
|
244
|
+
return translation.text
|
|
245
|
+
else:
|
|
246
|
+
transcription = self._client.audio.transcriptions.create(
|
|
247
|
+
model="whisper-1", file=audio_data, **kwargs
|
|
248
|
+
)
|
|
249
|
+
return transcription.text
|
|
250
|
+
except Exception as e:
|
|
251
|
+
raise Exception("Error during STT API call") from e
|
camel/models/openai_model.py
CHANGED
|
@@ -16,22 +16,22 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
16
16
|
|
|
17
17
|
from openai import OpenAI, Stream
|
|
18
18
|
|
|
19
|
-
from camel.configs import
|
|
19
|
+
from camel.configs import OPENAI_API_PARAMS
|
|
20
20
|
from camel.messages import OpenAIMessage
|
|
21
21
|
from camel.models import BaseModelBackend
|
|
22
22
|
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
23
|
-
from camel.utils import
|
|
24
|
-
BaseTokenCounter,
|
|
25
|
-
OpenAITokenCounter,
|
|
26
|
-
openai_api_key_required,
|
|
27
|
-
)
|
|
23
|
+
from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_key_required
|
|
28
24
|
|
|
29
25
|
|
|
30
26
|
class OpenAIModel(BaseModelBackend):
|
|
31
27
|
r"""OpenAI API in a unified BaseModelBackend interface."""
|
|
32
28
|
|
|
33
|
-
def __init__(
|
|
34
|
-
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
model_type: ModelType,
|
|
32
|
+
model_config_dict: Dict[str, Any],
|
|
33
|
+
api_key: Optional[str] = None,
|
|
34
|
+
) -> None:
|
|
35
35
|
r"""Constructor for OpenAI backend.
|
|
36
36
|
|
|
37
37
|
Args:
|
|
@@ -39,10 +39,15 @@ class OpenAIModel(BaseModelBackend):
|
|
|
39
39
|
one of GPT_* series.
|
|
40
40
|
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
41
41
|
be fed into openai.ChatCompletion.create().
|
|
42
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
43
|
+
OpenAI service. (default: :obj:`None`)
|
|
42
44
|
"""
|
|
43
45
|
super().__init__(model_type, model_config_dict)
|
|
44
46
|
url = os.environ.get('OPENAI_API_BASE_URL', None)
|
|
45
|
-
self.
|
|
47
|
+
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
48
|
+
self._client = OpenAI(
|
|
49
|
+
timeout=60, max_retries=3, base_url=url, api_key=self._api_key
|
|
50
|
+
)
|
|
46
51
|
self._token_counter: Optional[BaseTokenCounter] = None
|
|
47
52
|
|
|
48
53
|
@property
|
|
@@ -57,7 +62,7 @@ class OpenAIModel(BaseModelBackend):
|
|
|
57
62
|
self._token_counter = OpenAITokenCounter(self.model_type)
|
|
58
63
|
return self._token_counter
|
|
59
64
|
|
|
60
|
-
@
|
|
65
|
+
@api_key_required
|
|
61
66
|
def run(
|
|
62
67
|
self,
|
|
63
68
|
messages: List[OpenAIMessage],
|
|
@@ -89,9 +94,11 @@ class OpenAIModel(BaseModelBackend):
|
|
|
89
94
|
unexpected arguments to OpenAI API.
|
|
90
95
|
"""
|
|
91
96
|
for param in self.model_config_dict:
|
|
92
|
-
if param not in
|
|
93
|
-
raise ValueError(
|
|
94
|
-
|
|
97
|
+
if param not in OPENAI_API_PARAMS:
|
|
98
|
+
raise ValueError(
|
|
99
|
+
f"Unexpected argument `{param}` is "
|
|
100
|
+
"input into OpenAI model backend."
|
|
101
|
+
)
|
|
95
102
|
|
|
96
103
|
@property
|
|
97
104
|
def stream(self) -> bool:
|
camel/models/stub_model.py
CHANGED
|
@@ -30,7 +30,6 @@ from camel.utils import BaseTokenCounter
|
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
class StubTokenCounter(BaseTokenCounter):
|
|
33
|
-
|
|
34
33
|
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
35
34
|
r"""Token counting for STUB models, directly returning a constant.
|
|
36
35
|
|
|
@@ -47,13 +46,19 @@ class StubTokenCounter(BaseTokenCounter):
|
|
|
47
46
|
|
|
48
47
|
class StubModel(BaseModelBackend):
|
|
49
48
|
r"""A dummy model used for unit tests."""
|
|
49
|
+
|
|
50
50
|
model_type = ModelType.STUB
|
|
51
51
|
|
|
52
|
-
def __init__(
|
|
53
|
-
|
|
52
|
+
def __init__(
|
|
53
|
+
self,
|
|
54
|
+
model_type: ModelType,
|
|
55
|
+
model_config_dict: Dict[str, Any],
|
|
56
|
+
api_key: Optional[str] = None,
|
|
57
|
+
) -> None:
|
|
54
58
|
r"""All arguments are unused for the dummy model."""
|
|
55
59
|
super().__init__(model_type, model_config_dict)
|
|
56
60
|
self._token_counter: Optional[BaseTokenCounter] = None
|
|
61
|
+
self._api_key = api_key
|
|
57
62
|
|
|
58
63
|
@property
|
|
59
64
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -90,6 +95,7 @@ class StubModel(BaseModelBackend):
|
|
|
90
95
|
content=ARBITRARY_STRING,
|
|
91
96
|
role="assistant",
|
|
92
97
|
),
|
|
98
|
+
logprobs=None,
|
|
93
99
|
)
|
|
94
100
|
],
|
|
95
101
|
usage=CompletionUsage(
|
|
@@ -101,6 +107,5 @@ class StubModel(BaseModelBackend):
|
|
|
101
107
|
return response
|
|
102
108
|
|
|
103
109
|
def check_model_config(self):
|
|
104
|
-
r"""Directly pass the check on arguments to STUB model.
|
|
105
|
-
"""
|
|
110
|
+
r"""Directly pass the check on arguments to STUB model."""
|
|
106
111
|
pass
|
camel/prompts/__init__.py
CHANGED
|
@@ -11,16 +11,17 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from .base import TextPrompt, CodePrompt, TextPromptDict
|
|
15
14
|
from .ai_society import AISocietyPromptTemplateDict
|
|
15
|
+
from .base import CodePrompt, TextPrompt, TextPromptDict
|
|
16
16
|
from .code import CodePromptTemplateDict
|
|
17
|
-
from .misalignment import MisalignmentPromptTemplateDict
|
|
18
|
-
from .translation import TranslationPromptTemplateDict
|
|
19
|
-
from .solution_extraction import SolutionExtractionPromptTemplateDict
|
|
20
17
|
from .evaluation import EvaluationPromptTemplateDict
|
|
18
|
+
from .misalignment import MisalignmentPromptTemplateDict
|
|
19
|
+
from .object_recognition import ObjectRecognitionPromptTemplateDict
|
|
20
|
+
from .prompt_templates import PromptTemplateGenerator
|
|
21
21
|
from .role_description_prompt_template import RoleDescriptionPromptTemplateDict
|
|
22
|
+
from .solution_extraction import SolutionExtractionPromptTemplateDict
|
|
22
23
|
from .task_prompt_template import TaskPromptTemplateDict
|
|
23
|
-
from .
|
|
24
|
+
from .translation import TranslationPromptTemplateDict
|
|
24
25
|
|
|
25
26
|
__all__ = [
|
|
26
27
|
'TextPrompt',
|
|
@@ -35,4 +36,5 @@ __all__ = [
|
|
|
35
36
|
'TaskPromptTemplateDict',
|
|
36
37
|
'PromptTemplateGenerator',
|
|
37
38
|
'SolutionExtractionPromptTemplateDict',
|
|
39
|
+
'ObjectRecognitionPromptTemplateDict',
|
|
38
40
|
]
|
camel/prompts/ai_society.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from typing import Any
|
|
15
15
|
|
|
16
|
-
from camel.prompts import TextPrompt, TextPromptDict
|
|
16
|
+
from camel.prompts.base import TextPrompt, TextPromptDict
|
|
17
17
|
from camel.types import RoleType
|
|
18
18
|
|
|
19
19
|
|
|
@@ -38,19 +38,23 @@ class AISocietyPromptTemplateDict(TextPromptDict):
|
|
|
38
38
|
outlines the rules of the conversation and provides instructions
|
|
39
39
|
for giving instructions to the AI assistant.
|
|
40
40
|
"""
|
|
41
|
+
|
|
41
42
|
GENERATE_ASSISTANTS = TextPrompt(
|
|
42
43
|
"""You are a helpful assistant that can play many different roles.
|
|
43
44
|
Now please list {num_roles} different roles that you can play with your expertise in diverse fields.
|
|
44
|
-
Sort them by alphabetical order. No explanation required."""
|
|
45
|
+
Sort them by alphabetical order. No explanation required."""
|
|
46
|
+
)
|
|
45
47
|
|
|
46
48
|
GENERATE_USERS = TextPrompt(
|
|
47
49
|
"""Please list {num_roles} most common and diverse groups of internet users or occupations.
|
|
48
50
|
Use singular form. No explanation.
|
|
49
|
-
Sort them by alphabetical order. No explanation required."""
|
|
51
|
+
Sort them by alphabetical order. No explanation required."""
|
|
52
|
+
)
|
|
50
53
|
|
|
51
54
|
GENERATE_TASKS = TextPrompt(
|
|
52
55
|
"""List {num_tasks} diverse tasks that {assistant_role} can assist {user_role} cooperatively to achieve together.
|
|
53
|
-
Be concise. Be creative."""
|
|
56
|
+
Be concise. Be creative."""
|
|
57
|
+
)
|
|
54
58
|
|
|
55
59
|
TASK_SPECIFY_PROMPT = TextPrompt(
|
|
56
60
|
"""Here is a task that {assistant_role} will help {user_role} to complete: {task}.
|
|
@@ -106,16 +110,19 @@ Never say <CAMEL_TASK_DONE> unless my responses have solved your task.""")
|
|
|
106
110
|
"""You are a {critic_role} who teams up with a {user_role} and a {assistant_role} to solve a task: {task}.
|
|
107
111
|
Your job is to select an option from their proposals and provides your explanations.
|
|
108
112
|
Your selection criteria are {criteria}.
|
|
109
|
-
You always have to choose an option from the proposals."""
|
|
113
|
+
You always have to choose an option from the proposals."""
|
|
114
|
+
)
|
|
110
115
|
|
|
111
116
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
112
117
|
super().__init__(*args, **kwargs)
|
|
113
|
-
self.update(
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
118
|
+
self.update(
|
|
119
|
+
{
|
|
120
|
+
"generate_assistants": self.GENERATE_ASSISTANTS,
|
|
121
|
+
"generate_users": self.GENERATE_USERS,
|
|
122
|
+
"generate_tasks": self.GENERATE_TASKS,
|
|
123
|
+
"task_specify_prompt": self.TASK_SPECIFY_PROMPT,
|
|
124
|
+
RoleType.ASSISTANT: self.ASSISTANT_PROMPT,
|
|
125
|
+
RoleType.USER: self.USER_PROMPT,
|
|
126
|
+
RoleType.CRITIC: self.CRITIC_PROMPT,
|
|
127
|
+
}
|
|
128
|
+
)
|
camel/prompts/base.py
CHANGED
|
@@ -12,20 +12,11 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import inspect
|
|
15
|
-
from typing import
|
|
16
|
-
Any,
|
|
17
|
-
Callable,
|
|
18
|
-
Dict,
|
|
19
|
-
List,
|
|
20
|
-
Optional,
|
|
21
|
-
Set,
|
|
22
|
-
Tuple,
|
|
23
|
-
TypeVar,
|
|
24
|
-
Union,
|
|
25
|
-
)
|
|
15
|
+
from typing import Any, Callable, Dict, Optional, Set, TypeVar, Union
|
|
26
16
|
|
|
17
|
+
from camel.interpreters import BaseInterpreter, SubprocessInterpreter
|
|
27
18
|
from camel.types import RoleType
|
|
28
|
-
from camel.utils import
|
|
19
|
+
from camel.utils import get_system_information
|
|
29
20
|
|
|
30
21
|
T = TypeVar('T')
|
|
31
22
|
|
|
@@ -63,8 +54,11 @@ def return_prompt_wrapper(
|
|
|
63
54
|
return cls(result)
|
|
64
55
|
elif isinstance(result, tuple):
|
|
65
56
|
new_result = tuple(
|
|
66
|
-
cls(item)
|
|
67
|
-
|
|
57
|
+
cls(item)
|
|
58
|
+
if isinstance(item, str) and not isinstance(item, cls)
|
|
59
|
+
else item
|
|
60
|
+
for item in result
|
|
61
|
+
)
|
|
68
62
|
return new_result
|
|
69
63
|
return result
|
|
70
64
|
|
|
@@ -107,9 +101,9 @@ class TextPrompt(str):
|
|
|
107
101
|
|
|
108
102
|
@property
|
|
109
103
|
def key_words(self) -> Set[str]:
|
|
110
|
-
r"""Returns a set of strings representing the keywords in the prompt.
|
|
111
|
-
"""
|
|
104
|
+
r"""Returns a set of strings representing the keywords in the prompt."""
|
|
112
105
|
from camel.utils import get_prompt_template_key_words
|
|
106
|
+
|
|
113
107
|
return get_prompt_template_key_words(self)
|
|
114
108
|
|
|
115
109
|
def format(self, *args: Any, **kwargs: Any) -> 'TextPrompt':
|
|
@@ -172,55 +166,68 @@ class CodePrompt(TextPrompt):
|
|
|
172
166
|
self._code_type = code_type
|
|
173
167
|
|
|
174
168
|
def execute(
|
|
175
|
-
self,
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
169
|
+
self,
|
|
170
|
+
interpreter: Optional[BaseInterpreter] = None,
|
|
171
|
+
**kwargs: Any,
|
|
172
|
+
) -> str:
|
|
173
|
+
r"""Executes the code string using the provided interpreter.
|
|
174
|
+
|
|
175
|
+
This method runs a code string through either a specified interpreter
|
|
176
|
+
or a default one. It supports additional keyword arguments for
|
|
177
|
+
flexibility.
|
|
179
178
|
|
|
180
179
|
Args:
|
|
181
|
-
interpreter (
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
180
|
+
interpreter (Optional[BaseInterpreter]): The interpreter instance
|
|
181
|
+
to use for execution. If `None`, a default interpreter is used.
|
|
182
|
+
(default: :obj:`None`)
|
|
183
|
+
**kwargs: Additional keyword arguments passed to the interpreter to
|
|
184
|
+
run the code.
|
|
186
185
|
|
|
187
186
|
Returns:
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
187
|
+
str: The result of the code execution. If the execution fails, this
|
|
188
|
+
should include sufficient information to diagnose and correct
|
|
189
|
+
the issue.
|
|
190
|
+
|
|
191
|
+
Raises:
|
|
192
|
+
InterpreterError: If the code execution encounters errors that
|
|
193
|
+
could be resolved by modifying or regenerating the code.
|
|
194
|
+
"""
|
|
195
|
+
if interpreter is None:
|
|
196
|
+
execution_res = SubprocessInterpreter().run(
|
|
197
|
+
self, self._code_type, **kwargs
|
|
198
|
+
)
|
|
199
|
+
else:
|
|
200
|
+
execution_res = interpreter.run(self, self._code_type, **kwargs)
|
|
201
|
+
return execution_res
|
|
200
202
|
|
|
201
203
|
|
|
202
204
|
# flake8: noqa :E501
|
|
203
205
|
class TextPromptDict(Dict[Any, TextPrompt]):
|
|
204
|
-
r"""A dictionary class that maps from key to :obj:`TextPrompt` object.
|
|
205
|
-
|
|
206
|
+
r"""A dictionary class that maps from key to :obj:`TextPrompt` object."""
|
|
207
|
+
|
|
206
208
|
EMBODIMENT_PROMPT = TextPrompt(
|
|
207
|
-
"
|
|
209
|
+
"System information :"
|
|
210
|
+
+ "\n".join(
|
|
211
|
+
f"{key}: {value}" for key, value in get_system_information().items()
|
|
212
|
+
)
|
|
213
|
+
+ "\n"
|
|
214
|
+
+ """You are the physical embodiment of the {role} who is working on solving a task: {task}.
|
|
208
215
|
You can do things in the physical world including browsing the Internet, reading documents, drawing images, creating videos, executing code and so on.
|
|
209
216
|
Your job is to perform the physical actions necessary to interact with the physical world.
|
|
210
217
|
You will receive thoughts from the {role} and you will need to perform the actions described in the thoughts.
|
|
211
|
-
You can write a series of simple commands in
|
|
212
|
-
You can perform a set of actions by calling the available
|
|
218
|
+
You can write a series of simple commands in to act.
|
|
219
|
+
You can perform a set of actions by calling the available functions.
|
|
213
220
|
You should perform actions based on the descriptions of the functions.
|
|
214
221
|
|
|
215
|
-
Here is your action space:
|
|
222
|
+
Here is your action space but it is not limited:
|
|
216
223
|
{action_space}
|
|
217
224
|
|
|
218
|
-
You should only perform actions in the action space.
|
|
219
225
|
You can perform multiple actions.
|
|
220
226
|
You can perform actions in any order.
|
|
221
|
-
First, explain the actions you will perform and your reasons, then write
|
|
222
|
-
If you decide to perform actions, you must write
|
|
223
|
-
You may print intermediate results if necessary."""
|
|
227
|
+
First, explain the actions you will perform and your reasons, then write code to implement your actions.
|
|
228
|
+
If you decide to perform actions, you must write code to implement the actions.
|
|
229
|
+
You may print intermediate results if necessary."""
|
|
230
|
+
)
|
|
224
231
|
|
|
225
232
|
def __init__(self, *args: Any, **kwargs: Any) -> None:
|
|
226
233
|
super().__init__(*args, **kwargs)
|