camel-ai 0.1.1__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -11
- camel/agents/__init__.py +7 -5
- camel/agents/chat_agent.py +134 -86
- camel/agents/critic_agent.py +28 -17
- camel/agents/deductive_reasoner_agent.py +235 -0
- camel/agents/embodied_agent.py +92 -40
- camel/agents/knowledge_graph_agent.py +221 -0
- camel/agents/role_assignment_agent.py +27 -17
- camel/agents/task_agent.py +60 -34
- camel/agents/tool_agents/base.py +0 -1
- camel/agents/tool_agents/hugging_face_tool_agent.py +7 -4
- camel/configs/__init__.py +29 -0
- camel/configs/anthropic_config.py +73 -0
- camel/configs/base_config.py +22 -0
- camel/{configs.py → configs/openai_config.py} +37 -64
- camel/embeddings/__init__.py +2 -0
- camel/embeddings/base.py +3 -2
- camel/embeddings/openai_embedding.py +10 -5
- camel/embeddings/sentence_transformers_embeddings.py +65 -0
- camel/functions/__init__.py +18 -3
- camel/functions/google_maps_function.py +335 -0
- camel/functions/math_functions.py +7 -7
- camel/functions/open_api_function.py +380 -0
- camel/functions/open_api_specs/coursera/__init__.py +13 -0
- camel/functions/open_api_specs/coursera/openapi.yaml +82 -0
- camel/functions/open_api_specs/klarna/__init__.py +13 -0
- camel/functions/open_api_specs/klarna/openapi.yaml +87 -0
- camel/functions/open_api_specs/speak/__init__.py +13 -0
- camel/functions/open_api_specs/speak/openapi.yaml +151 -0
- camel/functions/openai_function.py +346 -42
- camel/functions/retrieval_functions.py +61 -0
- camel/functions/search_functions.py +100 -35
- camel/functions/slack_functions.py +275 -0
- camel/functions/twitter_function.py +484 -0
- camel/functions/weather_functions.py +36 -23
- camel/generators.py +65 -46
- camel/human.py +17 -11
- camel/interpreters/__init__.py +25 -0
- camel/interpreters/base.py +49 -0
- camel/{utils/python_interpreter.py → interpreters/internal_python_interpreter.py} +129 -48
- camel/interpreters/interpreter_error.py +19 -0
- camel/interpreters/subprocess_interpreter.py +190 -0
- camel/loaders/__init__.py +22 -0
- camel/{functions/base_io_functions.py → loaders/base_io.py} +38 -35
- camel/{functions/unstructured_io_fuctions.py → loaders/unstructured_io.py} +199 -110
- camel/memories/__init__.py +17 -7
- camel/memories/agent_memories.py +156 -0
- camel/memories/base.py +97 -32
- camel/memories/blocks/__init__.py +21 -0
- camel/memories/{chat_history_memory.py → blocks/chat_history_block.py} +34 -34
- camel/memories/blocks/vectordb_block.py +101 -0
- camel/memories/context_creators/__init__.py +3 -2
- camel/memories/context_creators/score_based.py +32 -20
- camel/memories/records.py +6 -5
- camel/messages/__init__.py +2 -2
- camel/messages/base.py +99 -16
- camel/messages/func_message.py +7 -4
- camel/models/__init__.py +6 -2
- camel/models/anthropic_model.py +146 -0
- camel/models/base_model.py +10 -3
- camel/models/model_factory.py +17 -11
- camel/models/open_source_model.py +25 -13
- camel/models/openai_audio_models.py +251 -0
- camel/models/openai_model.py +20 -13
- camel/models/stub_model.py +10 -5
- camel/prompts/__init__.py +7 -5
- camel/prompts/ai_society.py +21 -14
- camel/prompts/base.py +54 -47
- camel/prompts/code.py +22 -14
- camel/prompts/evaluation.py +8 -5
- camel/prompts/misalignment.py +26 -19
- camel/prompts/object_recognition.py +35 -0
- camel/prompts/prompt_templates.py +14 -8
- camel/prompts/role_description_prompt_template.py +16 -10
- camel/prompts/solution_extraction.py +9 -5
- camel/prompts/task_prompt_template.py +24 -21
- camel/prompts/translation.py +9 -5
- camel/responses/agent_responses.py +5 -2
- camel/retrievers/__init__.py +26 -0
- camel/retrievers/auto_retriever.py +330 -0
- camel/retrievers/base.py +69 -0
- camel/retrievers/bm25_retriever.py +140 -0
- camel/retrievers/cohere_rerank_retriever.py +108 -0
- camel/retrievers/vector_retriever.py +183 -0
- camel/societies/__init__.py +1 -1
- camel/societies/babyagi_playing.py +56 -32
- camel/societies/role_playing.py +188 -133
- camel/storages/__init__.py +18 -0
- camel/storages/graph_storages/__init__.py +23 -0
- camel/storages/graph_storages/base.py +82 -0
- camel/storages/graph_storages/graph_element.py +74 -0
- camel/storages/graph_storages/neo4j_graph.py +582 -0
- camel/storages/key_value_storages/base.py +1 -2
- camel/storages/key_value_storages/in_memory.py +1 -2
- camel/storages/key_value_storages/json.py +8 -13
- camel/storages/vectordb_storages/__init__.py +33 -0
- camel/storages/vectordb_storages/base.py +202 -0
- camel/storages/vectordb_storages/milvus.py +396 -0
- camel/storages/vectordb_storages/qdrant.py +373 -0
- camel/terminators/__init__.py +1 -1
- camel/terminators/base.py +2 -3
- camel/terminators/response_terminator.py +21 -12
- camel/terminators/token_limit_terminator.py +5 -3
- camel/toolkits/__init__.py +21 -0
- camel/toolkits/base.py +22 -0
- camel/toolkits/github_toolkit.py +245 -0
- camel/types/__init__.py +18 -6
- camel/types/enums.py +129 -15
- camel/types/openai_types.py +10 -5
- camel/utils/__init__.py +20 -13
- camel/utils/commons.py +170 -85
- camel/utils/token_counting.py +135 -15
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/METADATA +123 -75
- camel_ai-0.1.4.dist-info/RECORD +119 -0
- {camel_ai-0.1.1.dist-info → camel_ai-0.1.4.dist-info}/WHEEL +1 -1
- camel/memories/context_creators/base.py +0 -72
- camel_ai-0.1.1.dist-info/RECORD +0 -75
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from typing import List, Optional
|
|
18
|
+
|
|
19
|
+
from camel.functions import OpenAIFunction
|
|
20
|
+
|
|
21
|
+
from .base import BaseToolkit
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class GithubIssue:
|
|
26
|
+
r"""Represents a GitHub issue.
|
|
27
|
+
|
|
28
|
+
Attributes:
|
|
29
|
+
title (str): The title of the issue.
|
|
30
|
+
body (str): The body/content of the issue.
|
|
31
|
+
number (int): The issue number.
|
|
32
|
+
file_path (str): The path of the file associated with the issue.
|
|
33
|
+
file_content (str): The content of the file associated with the issue.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
title: str,
|
|
39
|
+
body: str,
|
|
40
|
+
number: int,
|
|
41
|
+
file_path: str,
|
|
42
|
+
file_content: str,
|
|
43
|
+
) -> None:
|
|
44
|
+
r"""Initialize a GithubIssue object.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
title (str): The title of the GitHub issue.
|
|
48
|
+
body (str): The body/content of the GitHub issue.
|
|
49
|
+
number (int): The issue number.
|
|
50
|
+
file_path (str): The path of the file associated with the issue.
|
|
51
|
+
file_content (str): The content of the file associated with the issue.
|
|
52
|
+
"""
|
|
53
|
+
self.title = title
|
|
54
|
+
self.body = body
|
|
55
|
+
self.number = number
|
|
56
|
+
self.file_path = file_path
|
|
57
|
+
self.file_content = file_content
|
|
58
|
+
|
|
59
|
+
def summary(self) -> str:
|
|
60
|
+
r"""Returns a summary of the issue.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
str: A string containing the title, body, number, file path, and file content of the issue.
|
|
64
|
+
"""
|
|
65
|
+
return (
|
|
66
|
+
f"Title: {self.title}\n"
|
|
67
|
+
f"Body: {self.body}\n"
|
|
68
|
+
f"Number: {self.number}\n"
|
|
69
|
+
f"File Path: {self.file_path}\n"
|
|
70
|
+
f"File Content: {self.file_content}"
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class GithubToolkit(BaseToolkit):
|
|
75
|
+
r"""A class representing a toolkit for interacting with GitHub repositories.
|
|
76
|
+
|
|
77
|
+
This class provides methods for retrieving open issues, retrieving specific issues,
|
|
78
|
+
and creating pull requests in a GitHub repository.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
repo_name (str): The name of the GitHub repository.
|
|
82
|
+
access_token (str, optional): The access token to authenticate with GitHub.
|
|
83
|
+
If not provided, it will be obtained using the `get_github_access_token` method.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
def __init__(
|
|
87
|
+
self, repo_name: str, access_token: Optional[str] = None
|
|
88
|
+
) -> None:
|
|
89
|
+
r"""Initializes a new instance of the GitHubToolkit class.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
repo_name (str): The name of the GitHub repository.
|
|
93
|
+
access_token (str, optional): The access token to authenticate with GitHub.
|
|
94
|
+
If not provided, it will be obtained using the `get_github_access_token` method.
|
|
95
|
+
"""
|
|
96
|
+
if access_token is None:
|
|
97
|
+
access_token = self.get_github_access_token()
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
from github import Auth, Github
|
|
101
|
+
except ImportError:
|
|
102
|
+
raise ImportError(
|
|
103
|
+
"Please install `github` first. You can install it by running "
|
|
104
|
+
"`pip install wikipedia`."
|
|
105
|
+
)
|
|
106
|
+
self.github = Github(auth=Auth.Token(access_token))
|
|
107
|
+
self.repo = self.github.get_repo(repo_name)
|
|
108
|
+
|
|
109
|
+
def get_tools(self) -> List[OpenAIFunction]:
|
|
110
|
+
r"""Returns a list of OpenAIFunction objects representing the functions in the toolkit.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
List[OpenAIFunction]: A list of OpenAIFunction objects representing the functions in the toolkit.
|
|
114
|
+
"""
|
|
115
|
+
return [
|
|
116
|
+
OpenAIFunction(self.retrieve_issue_list),
|
|
117
|
+
OpenAIFunction(self.retrieve_issue),
|
|
118
|
+
OpenAIFunction(self.create_pull_request),
|
|
119
|
+
]
|
|
120
|
+
|
|
121
|
+
def get_github_access_token(self) -> str:
|
|
122
|
+
r"""Retrieve the GitHub access token from environment variables.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
str: A string containing the GitHub access token.
|
|
126
|
+
|
|
127
|
+
Raises:
|
|
128
|
+
ValueError: If the API key or secret is not found in the environment variables.
|
|
129
|
+
"""
|
|
130
|
+
# Get `GITHUB_ACCESS_TOKEN` here: https://github.com/settings/tokens
|
|
131
|
+
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN")
|
|
132
|
+
|
|
133
|
+
if not GITHUB_ACCESS_TOKEN:
|
|
134
|
+
raise ValueError(
|
|
135
|
+
"`GITHUB_ACCESS_TOKEN` not found in environment variables. Get it "
|
|
136
|
+
"here: `https://github.com/settings/tokens`."
|
|
137
|
+
)
|
|
138
|
+
return GITHUB_ACCESS_TOKEN
|
|
139
|
+
|
|
140
|
+
def retrieve_issue_list(self) -> List[GithubIssue]:
|
|
141
|
+
r"""Retrieve a list of open issues from the repository.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
A list of GithubIssue objects representing the open issues.
|
|
145
|
+
"""
|
|
146
|
+
issues = self.repo.get_issues(state='open')
|
|
147
|
+
return [
|
|
148
|
+
GithubIssue(
|
|
149
|
+
title=issue.title,
|
|
150
|
+
body=issue.body,
|
|
151
|
+
number=issue.number,
|
|
152
|
+
file_path=issue.labels[
|
|
153
|
+
0
|
|
154
|
+
].name, # for now we require file path to be the first label in the PR
|
|
155
|
+
file_content=self.retrieve_file_content(issue.labels[0].name),
|
|
156
|
+
)
|
|
157
|
+
for issue in issues
|
|
158
|
+
if not issue.pull_request
|
|
159
|
+
]
|
|
160
|
+
|
|
161
|
+
def retrieve_issue(self, issue_number: int) -> Optional[str]:
|
|
162
|
+
r"""Retrieves an issue from a GitHub repository.
|
|
163
|
+
|
|
164
|
+
This function retrieves an issue from a specified repository using the
|
|
165
|
+
issue number.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
issue_number (int): The number of the issue to retrieve.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
str: A formatted report of the retrieved issue.
|
|
172
|
+
"""
|
|
173
|
+
issues = self.retrieve_issue_list()
|
|
174
|
+
for issue in issues:
|
|
175
|
+
if issue.number == issue_number:
|
|
176
|
+
return issue.summary()
|
|
177
|
+
return None
|
|
178
|
+
|
|
179
|
+
def create_pull_request(
|
|
180
|
+
self,
|
|
181
|
+
file_path: str,
|
|
182
|
+
new_content: str,
|
|
183
|
+
pr_title: str,
|
|
184
|
+
body: str,
|
|
185
|
+
branch_name: str,
|
|
186
|
+
) -> str:
|
|
187
|
+
r"""Creates a pull request.
|
|
188
|
+
|
|
189
|
+
This function creates a pull request in specified repository, which updates a
|
|
190
|
+
file in the specific path with new content. The pull request description
|
|
191
|
+
contains information about the issue title and number.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
file_path (str): The path of the file to be updated in the repository.
|
|
195
|
+
new_content (str): The specified new content of the specified file.
|
|
196
|
+
pr_title (str): The title of the issue that is solved by this pull request.
|
|
197
|
+
body (str): The commit message for the pull request.
|
|
198
|
+
branch_name (str): The name of the branch to create and submit the pull request from.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
str: A formatted report of whether the pull request was created successfully or not.
|
|
202
|
+
"""
|
|
203
|
+
sb = self.repo.get_branch(self.repo.default_branch)
|
|
204
|
+
self.repo.create_git_ref(
|
|
205
|
+
ref=f"refs/heads/{branch_name}", sha=sb.commit.sha
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
file = self.repo.get_contents(file_path)
|
|
209
|
+
from github.ContentFile import ContentFile
|
|
210
|
+
|
|
211
|
+
if isinstance(file, ContentFile):
|
|
212
|
+
self.repo.update_file(
|
|
213
|
+
file.path, body, new_content, file.sha, branch=branch_name
|
|
214
|
+
)
|
|
215
|
+
pr = self.repo.create_pull(
|
|
216
|
+
title=pr_title,
|
|
217
|
+
body=body,
|
|
218
|
+
head=branch_name,
|
|
219
|
+
base=self.repo.default_branch,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
if pr is not None:
|
|
223
|
+
return f"Title: {pr.title}\n" f"Body: {pr.body}\n"
|
|
224
|
+
else:
|
|
225
|
+
return "Failed to create pull request."
|
|
226
|
+
else:
|
|
227
|
+
raise ValueError("PRs with multiple files aren't supported yet.")
|
|
228
|
+
|
|
229
|
+
def retrieve_file_content(self, file_path: str) -> str:
|
|
230
|
+
r"""Retrieves the content of a file from the GitHub repository.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
file_path (str): The path of the file to retrieve.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
str: The decoded content of the file.
|
|
237
|
+
"""
|
|
238
|
+
file_content = self.repo.get_contents(file_path)
|
|
239
|
+
|
|
240
|
+
from github.ContentFile import ContentFile
|
|
241
|
+
|
|
242
|
+
if isinstance(file_content, ContentFile):
|
|
243
|
+
return file_content.decoded_content.decode()
|
|
244
|
+
else:
|
|
245
|
+
raise ValueError("PRs with multiple files aren't supported yet.")
|
camel/types/__init__.py
CHANGED
|
@@ -12,24 +12,30 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .enums import (
|
|
15
|
-
|
|
15
|
+
AudioModelType,
|
|
16
|
+
EmbeddingModelType,
|
|
16
17
|
ModelType,
|
|
18
|
+
OpenAIBackendRole,
|
|
19
|
+
OpenAIImageDetailType,
|
|
20
|
+
OpenAIImageType,
|
|
21
|
+
OpenAPIName,
|
|
22
|
+
RoleType,
|
|
23
|
+
StorageType,
|
|
17
24
|
TaskType,
|
|
18
25
|
TerminationMode,
|
|
19
|
-
OpenAIBackendRole,
|
|
20
|
-
EmbeddingModelType,
|
|
21
26
|
VectorDistance,
|
|
27
|
+
VoiceType,
|
|
22
28
|
)
|
|
23
29
|
from .openai_types import (
|
|
24
|
-
Choice,
|
|
25
30
|
ChatCompletion,
|
|
31
|
+
ChatCompletionAssistantMessageParam,
|
|
26
32
|
ChatCompletionChunk,
|
|
33
|
+
ChatCompletionFunctionMessageParam,
|
|
27
34
|
ChatCompletionMessage,
|
|
28
35
|
ChatCompletionMessageParam,
|
|
29
36
|
ChatCompletionSystemMessageParam,
|
|
30
37
|
ChatCompletionUserMessageParam,
|
|
31
|
-
|
|
32
|
-
ChatCompletionFunctionMessageParam,
|
|
38
|
+
Choice,
|
|
33
39
|
CompletionUsage,
|
|
34
40
|
)
|
|
35
41
|
|
|
@@ -41,6 +47,7 @@ __all__ = [
|
|
|
41
47
|
'OpenAIBackendRole',
|
|
42
48
|
'EmbeddingModelType',
|
|
43
49
|
'VectorDistance',
|
|
50
|
+
'StorageType',
|
|
44
51
|
'Choice',
|
|
45
52
|
'ChatCompletion',
|
|
46
53
|
'ChatCompletionChunk',
|
|
@@ -51,4 +58,9 @@ __all__ = [
|
|
|
51
58
|
'ChatCompletionAssistantMessageParam',
|
|
52
59
|
'ChatCompletionFunctionMessageParam',
|
|
53
60
|
'CompletionUsage',
|
|
61
|
+
'OpenAIImageType',
|
|
62
|
+
'OpenAIImageDetailType',
|
|
63
|
+
'OpenAPIName',
|
|
64
|
+
'AudioModelType',
|
|
65
|
+
'VoiceType',
|
|
54
66
|
]
|
camel/types/enums.py
CHANGED
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import re
|
|
15
|
-
from enum import Enum
|
|
15
|
+
from enum import Enum, EnumMeta
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
class RoleType(Enum):
|
|
@@ -24,12 +24,11 @@ class RoleType(Enum):
|
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
class ModelType(Enum):
|
|
27
|
-
GPT_3_5_TURBO = "gpt-3.5-turbo
|
|
28
|
-
GPT_3_5_TURBO_16K = "gpt-3.5-turbo-1106"
|
|
27
|
+
GPT_3_5_TURBO = "gpt-3.5-turbo"
|
|
29
28
|
GPT_4 = "gpt-4"
|
|
30
29
|
GPT_4_32K = "gpt-4-32k"
|
|
31
|
-
GPT_4_TURBO = "gpt-4-
|
|
32
|
-
|
|
30
|
+
GPT_4_TURBO = "gpt-4-turbo"
|
|
31
|
+
GPT_4O = "gpt-4o"
|
|
33
32
|
|
|
34
33
|
STUB = "stub"
|
|
35
34
|
|
|
@@ -37,6 +36,17 @@ class ModelType(Enum):
|
|
|
37
36
|
VICUNA = "vicuna"
|
|
38
37
|
VICUNA_16K = "vicuna-16k"
|
|
39
38
|
|
|
39
|
+
# Legacy anthropic models
|
|
40
|
+
# NOTE: anthropic lagecy models only Claude 2.1 has system prompt support
|
|
41
|
+
CLAUDE_2_1 = "claude-2.1"
|
|
42
|
+
CLAUDE_2_0 = "claude-2.0"
|
|
43
|
+
CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
|
|
44
|
+
|
|
45
|
+
# 3 models
|
|
46
|
+
CLAUDE_3_OPUS = "claude-3-opus-20240229"
|
|
47
|
+
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
|
|
48
|
+
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
|
49
|
+
|
|
40
50
|
@property
|
|
41
51
|
def value_for_tiktoken(self) -> str:
|
|
42
52
|
return self.value if self is not ModelType.STUB else "gpt-3.5-turbo"
|
|
@@ -46,11 +56,10 @@ class ModelType(Enum):
|
|
|
46
56
|
r"""Returns whether this type of models is an OpenAI-released model."""
|
|
47
57
|
return self in {
|
|
48
58
|
ModelType.GPT_3_5_TURBO,
|
|
49
|
-
ModelType.GPT_3_5_TURBO_16K,
|
|
50
59
|
ModelType.GPT_4,
|
|
51
60
|
ModelType.GPT_4_32K,
|
|
52
61
|
ModelType.GPT_4_TURBO,
|
|
53
|
-
ModelType.
|
|
62
|
+
ModelType.GPT_4O,
|
|
54
63
|
}
|
|
55
64
|
|
|
56
65
|
@property
|
|
@@ -62,6 +71,22 @@ class ModelType(Enum):
|
|
|
62
71
|
ModelType.VICUNA_16K,
|
|
63
72
|
}
|
|
64
73
|
|
|
74
|
+
@property
|
|
75
|
+
def is_anthropic(self) -> bool:
|
|
76
|
+
r"""Returns whether this type of models is Anthropic-released model.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
bool: Whether this type of models is anthropic.
|
|
80
|
+
"""
|
|
81
|
+
return self in {
|
|
82
|
+
ModelType.CLAUDE_INSTANT_1_2,
|
|
83
|
+
ModelType.CLAUDE_2_0,
|
|
84
|
+
ModelType.CLAUDE_2_1,
|
|
85
|
+
ModelType.CLAUDE_3_OPUS,
|
|
86
|
+
ModelType.CLAUDE_3_SONNET,
|
|
87
|
+
ModelType.CLAUDE_3_HAIKU,
|
|
88
|
+
}
|
|
89
|
+
|
|
65
90
|
@property
|
|
66
91
|
def token_limit(self) -> int:
|
|
67
92
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -70,15 +95,13 @@ class ModelType(Enum):
|
|
|
70
95
|
"""
|
|
71
96
|
if self is ModelType.GPT_3_5_TURBO:
|
|
72
97
|
return 16385
|
|
73
|
-
elif self is ModelType.GPT_3_5_TURBO_16K:
|
|
74
|
-
return 16385
|
|
75
98
|
elif self is ModelType.GPT_4:
|
|
76
99
|
return 8192
|
|
77
100
|
elif self is ModelType.GPT_4_32K:
|
|
78
101
|
return 32768
|
|
79
102
|
elif self is ModelType.GPT_4_TURBO:
|
|
80
103
|
return 128000
|
|
81
|
-
elif self is ModelType.
|
|
104
|
+
elif self is ModelType.GPT_4O:
|
|
82
105
|
return 128000
|
|
83
106
|
elif self is ModelType.STUB:
|
|
84
107
|
return 4096
|
|
@@ -89,6 +112,15 @@ class ModelType(Enum):
|
|
|
89
112
|
return 2048
|
|
90
113
|
elif self is ModelType.VICUNA_16K:
|
|
91
114
|
return 16384
|
|
115
|
+
if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
|
|
116
|
+
return 100_000
|
|
117
|
+
elif self in {
|
|
118
|
+
ModelType.CLAUDE_2_1,
|
|
119
|
+
ModelType.CLAUDE_3_OPUS,
|
|
120
|
+
ModelType.CLAUDE_3_SONNET,
|
|
121
|
+
ModelType.CLAUDE_3_HAIKU,
|
|
122
|
+
}:
|
|
123
|
+
return 200_000
|
|
92
124
|
else:
|
|
93
125
|
raise ValueError("Unknown model type")
|
|
94
126
|
|
|
@@ -107,8 +139,10 @@ class ModelType(Enum):
|
|
|
107
139
|
pattern = r'^vicuna-\d+b-v\d+\.\d+-16k$'
|
|
108
140
|
return bool(re.match(pattern, model_name))
|
|
109
141
|
elif self is ModelType.LLAMA_2:
|
|
110
|
-
return (
|
|
111
|
-
|
|
142
|
+
return (
|
|
143
|
+
self.value in model_name.lower()
|
|
144
|
+
or "llama2" in model_name.lower()
|
|
145
|
+
)
|
|
112
146
|
else:
|
|
113
147
|
return self.value in model_name.lower()
|
|
114
148
|
|
|
@@ -155,13 +189,21 @@ class TaskType(Enum):
|
|
|
155
189
|
EVALUATION = "evaluation"
|
|
156
190
|
SOLUTION_EXTRACTION = "solution_extraction"
|
|
157
191
|
ROLE_DESCRIPTION = "role_description"
|
|
192
|
+
OBJECT_RECOGNITION = "object_recognition"
|
|
158
193
|
DEFAULT = "default"
|
|
159
194
|
|
|
160
195
|
|
|
161
196
|
class VectorDistance(Enum):
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
197
|
+
r"""Distance metrics used in a vector database."""
|
|
198
|
+
|
|
199
|
+
DOT = "dot"
|
|
200
|
+
r"""Dot product. https://en.wikipedia.org/wiki/Dot_product"""
|
|
201
|
+
|
|
202
|
+
COSINE = "cosine"
|
|
203
|
+
r"""Cosine similarity. https://en.wikipedia.org/wiki/Cosine_similarity"""
|
|
204
|
+
|
|
205
|
+
EUCLIDEAN = "euclidean"
|
|
206
|
+
r"""Euclidean distance. https://en.wikipedia.org/wiki/Euclidean_distance"""
|
|
165
207
|
|
|
166
208
|
|
|
167
209
|
class OpenAIBackendRole(Enum):
|
|
@@ -174,3 +216,75 @@ class OpenAIBackendRole(Enum):
|
|
|
174
216
|
class TerminationMode(Enum):
|
|
175
217
|
ANY = "any"
|
|
176
218
|
ALL = "all"
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
class OpenAIImageTypeMeta(EnumMeta):
|
|
222
|
+
def __contains__(cls, image_type: object) -> bool:
|
|
223
|
+
try:
|
|
224
|
+
cls(image_type)
|
|
225
|
+
except ValueError:
|
|
226
|
+
return False
|
|
227
|
+
return True
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class OpenAIImageType(Enum, metaclass=OpenAIImageTypeMeta):
|
|
231
|
+
r"""Image types supported by OpenAI vision model."""
|
|
232
|
+
|
|
233
|
+
# https://platform.openai.com/docs/guides/vision
|
|
234
|
+
PNG = "png"
|
|
235
|
+
JPEG = "jpeg"
|
|
236
|
+
JPG = "jpg"
|
|
237
|
+
WEBP = "webp"
|
|
238
|
+
GIF = "gif"
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
class OpenAIImageDetailType(Enum):
|
|
242
|
+
AUTO = "auto"
|
|
243
|
+
LOW = "low"
|
|
244
|
+
HIGH = "high"
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
class StorageType(Enum):
|
|
248
|
+
MILVUS = "milvus"
|
|
249
|
+
QDRANT = "qdrant"
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class OpenAPIName(Enum):
|
|
253
|
+
COURSERA = "coursera"
|
|
254
|
+
KLARNA = "klarna"
|
|
255
|
+
SPEAK = "speak"
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class AudioModelType(Enum):
|
|
259
|
+
TTS_1 = "tts-1"
|
|
260
|
+
TTS_1_HD = "tts-1-hd"
|
|
261
|
+
|
|
262
|
+
@property
|
|
263
|
+
def is_openai(self) -> bool:
|
|
264
|
+
r"""Returns whether this type of audio models is an OpenAI-released
|
|
265
|
+
model."""
|
|
266
|
+
return self in {
|
|
267
|
+
AudioModelType.TTS_1,
|
|
268
|
+
AudioModelType.TTS_1_HD,
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class VoiceType(Enum):
|
|
273
|
+
ALLOY = "alloy"
|
|
274
|
+
ECHO = "echo"
|
|
275
|
+
FABLE = "fable"
|
|
276
|
+
ONYX = "onyx"
|
|
277
|
+
NOVA = "nova"
|
|
278
|
+
SHIMMER = "shimmer"
|
|
279
|
+
|
|
280
|
+
@property
|
|
281
|
+
def is_openai(self) -> bool:
|
|
282
|
+
r"""Returns whether this type of voice is an OpenAI-released voice."""
|
|
283
|
+
return self in {
|
|
284
|
+
VoiceType.ALLOY,
|
|
285
|
+
VoiceType.ECHO,
|
|
286
|
+
VoiceType.FABLE,
|
|
287
|
+
VoiceType.ONYX,
|
|
288
|
+
VoiceType.NOVA,
|
|
289
|
+
VoiceType.SHIMMER,
|
|
290
|
+
}
|
camel/types/openai_types.py
CHANGED
|
@@ -14,17 +14,22 @@
|
|
|
14
14
|
# isort: skip_file
|
|
15
15
|
from openai.types.chat.chat_completion import ChatCompletion, Choice
|
|
16
16
|
from openai.types.chat.chat_completion_assistant_message_param import (
|
|
17
|
-
ChatCompletionAssistantMessageParam,
|
|
17
|
+
ChatCompletionAssistantMessageParam,
|
|
18
|
+
)
|
|
18
19
|
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
|
|
19
20
|
from openai.types.chat.chat_completion_function_message_param import (
|
|
20
|
-
ChatCompletionFunctionMessageParam,
|
|
21
|
+
ChatCompletionFunctionMessageParam,
|
|
22
|
+
)
|
|
21
23
|
from openai.types.chat.chat_completion_message import ChatCompletionMessage
|
|
22
24
|
from openai.types.chat.chat_completion_message_param import (
|
|
23
|
-
ChatCompletionMessageParam,
|
|
25
|
+
ChatCompletionMessageParam,
|
|
26
|
+
)
|
|
24
27
|
from openai.types.chat.chat_completion_system_message_param import (
|
|
25
|
-
ChatCompletionSystemMessageParam,
|
|
28
|
+
ChatCompletionSystemMessageParam,
|
|
29
|
+
)
|
|
26
30
|
from openai.types.chat.chat_completion_user_message_param import (
|
|
27
|
-
ChatCompletionUserMessageParam,
|
|
31
|
+
ChatCompletionUserMessageParam,
|
|
32
|
+
)
|
|
28
33
|
from openai.types.completion_usage import CompletionUsage
|
|
29
34
|
|
|
30
35
|
Choice = Choice
|
camel/utils/__init__.py
CHANGED
|
@@ -11,37 +11,44 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from .python_interpreter import PythonInterpreter
|
|
15
14
|
from .commons import (
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
15
|
+
PYDANTIC_V2,
|
|
16
|
+
api_key_required,
|
|
17
|
+
api_keys_required,
|
|
18
|
+
check_server_running,
|
|
19
|
+
dependencies_required,
|
|
20
20
|
download_tasks,
|
|
21
|
-
|
|
21
|
+
get_first_int,
|
|
22
|
+
get_prompt_template_key_words,
|
|
23
|
+
get_system_information,
|
|
22
24
|
get_task_list,
|
|
23
|
-
|
|
25
|
+
print_text_animated,
|
|
26
|
+
to_pascal,
|
|
24
27
|
)
|
|
25
28
|
from .token_counting import (
|
|
26
|
-
|
|
29
|
+
AnthropicTokenCounter,
|
|
27
30
|
BaseTokenCounter,
|
|
28
31
|
OpenAITokenCounter,
|
|
29
32
|
OpenSourceTokenCounter,
|
|
33
|
+
get_model_encoding,
|
|
30
34
|
)
|
|
31
35
|
|
|
32
36
|
__all__ = [
|
|
33
|
-
'
|
|
34
|
-
'openai_api_key_required',
|
|
37
|
+
'api_key_required',
|
|
35
38
|
'print_text_animated',
|
|
36
39
|
'get_prompt_template_key_words',
|
|
37
40
|
'get_first_int',
|
|
38
41
|
'download_tasks',
|
|
39
|
-
'PythonInterpreter',
|
|
40
|
-
'parse_doc',
|
|
41
42
|
'get_task_list',
|
|
42
|
-
'get_model_encoding',
|
|
43
43
|
'check_server_running',
|
|
44
|
+
'AnthropicTokenCounter',
|
|
45
|
+
'get_system_information',
|
|
46
|
+
'to_pascal',
|
|
47
|
+
'PYDANTIC_V2',
|
|
48
|
+
'get_model_encoding',
|
|
44
49
|
'BaseTokenCounter',
|
|
45
50
|
'OpenAITokenCounter',
|
|
46
51
|
'OpenSourceTokenCounter',
|
|
52
|
+
'dependencies_required',
|
|
53
|
+
'api_keys_required',
|
|
47
54
|
]
|