camel-ai 0.2.3a1__py3-none-any.whl → 0.2.3a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +93 -69
- camel/agents/knowledge_graph_agent.py +4 -6
- camel/bots/__init__.py +16 -2
- camel/bots/discord_app.py +138 -0
- camel/bots/slack/__init__.py +30 -0
- camel/bots/slack/models.py +158 -0
- camel/bots/slack/slack_app.py +255 -0
- camel/configs/__init__.py +1 -2
- camel/configs/anthropic_config.py +2 -5
- camel/configs/base_config.py +6 -6
- camel/configs/groq_config.py +2 -3
- camel/configs/ollama_config.py +1 -2
- camel/configs/openai_config.py +2 -23
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +2 -3
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +2 -0
- camel/loaders/chunkr_reader.py +163 -0
- camel/loaders/firecrawl_reader.py +3 -3
- camel/loaders/unstructured_io.py +35 -33
- camel/messages/__init__.py +1 -0
- camel/models/__init__.py +2 -4
- camel/models/anthropic_model.py +32 -26
- camel/models/azure_openai_model.py +39 -36
- camel/models/base_model.py +31 -20
- camel/models/gemini_model.py +37 -29
- camel/models/groq_model.py +29 -23
- camel/models/litellm_model.py +44 -61
- camel/models/mistral_model.py +32 -29
- camel/models/model_factory.py +66 -76
- camel/models/nemotron_model.py +33 -23
- camel/models/ollama_model.py +42 -47
- camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +31 -49
- camel/models/openai_model.py +48 -29
- camel/models/reka_model.py +30 -28
- camel/models/samba_model.py +82 -177
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +37 -43
- camel/models/vllm_model.py +43 -50
- camel/models/zhipuai_model.py +33 -27
- camel/retrievers/auto_retriever.py +28 -10
- camel/retrievers/vector_retriever.py +58 -47
- camel/societies/babyagi_playing.py +6 -3
- camel/societies/role_playing.py +5 -3
- camel/storages/graph_storages/graph_element.py +3 -5
- camel/storages/key_value_storages/json.py +6 -1
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/arxiv_toolkit.py +155 -0
- camel/toolkits/ask_news_toolkit.py +653 -0
- camel/toolkits/base.py +2 -3
- camel/toolkits/code_execution.py +6 -7
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/{openai_function.py → function_tool.py} +34 -11
- camel/toolkits/github_toolkit.py +9 -10
- camel/toolkits/google_maps_toolkit.py +7 -7
- camel/toolkits/google_scholar_toolkit.py +146 -0
- camel/toolkits/linkedin_toolkit.py +7 -7
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -5
- camel/toolkits/reddit_toolkit.py +7 -7
- camel/toolkits/retrieval_toolkit.py +5 -5
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -11
- camel/toolkits/twitter_toolkit.py +378 -452
- camel/toolkits/weather_toolkit.py +6 -6
- camel/toolkits/whatsapp_toolkit.py +177 -0
- camel/types/__init__.py +6 -1
- camel/types/enums.py +40 -85
- camel/types/openai_types.py +3 -0
- camel/types/unified_model_type.py +104 -0
- camel/utils/__init__.py +0 -2
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +32 -3
- camel/utils/token_counting.py +30 -212
- camel/workforce/role_playing_worker.py +1 -1
- camel/workforce/single_agent_worker.py +1 -1
- camel/workforce/task_channel.py +4 -3
- camel/workforce/workforce.py +4 -4
- camel_ai-0.2.3a2.dist-info/LICENSE +201 -0
- {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.3a2.dist-info}/METADATA +27 -56
- {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.3a2.dist-info}/RECORD +85 -76
- {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.3a2.dist-info}/WHEEL +1 -1
- camel/bots/discord_bot.py +0 -206
- camel/models/open_source_model.py +0 -170
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
import os
|
|
18
|
+
import time
|
|
19
|
+
from typing import IO, Any, Optional, Union
|
|
20
|
+
|
|
21
|
+
import requests
|
|
22
|
+
|
|
23
|
+
from camel.utils import api_keys_required
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ChunkrReader:
|
|
29
|
+
r"""Chunkr Reader for processing documents and returning content
|
|
30
|
+
in various formats.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
api_key (Optional[str], optional): The API key for Chunkr API. If not
|
|
34
|
+
provided, it will be retrieved from the environment variable
|
|
35
|
+
`CHUNKR_API_KEY`. (default: :obj:`None`)
|
|
36
|
+
url (Optional[str], optional): The url to the Chunkr service.
|
|
37
|
+
(default: :obj:`https://api.chunkr.ai/api/v1/task`)
|
|
38
|
+
timeout (int, optional): The maximum time in seconds to wait for the
|
|
39
|
+
API responses. (default: :obj:`30`)
|
|
40
|
+
**kwargs (Any): Additional keyword arguments for request headers.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
@api_keys_required("CHUNKR_API_KEY")
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
api_key: Optional[str] = None,
|
|
47
|
+
url: Optional[str] = "https://api.chunkr.ai/api/v1/task",
|
|
48
|
+
timeout: int = 30,
|
|
49
|
+
**kwargs: Any,
|
|
50
|
+
) -> None:
|
|
51
|
+
self._api_key = api_key or os.getenv('CHUNKR_API_KEY')
|
|
52
|
+
self._url = os.getenv('CHUNKR_API_URL') or url
|
|
53
|
+
self._headers = {
|
|
54
|
+
"Authorization": f"{self._api_key}",
|
|
55
|
+
**kwargs,
|
|
56
|
+
}
|
|
57
|
+
self.timeout = timeout
|
|
58
|
+
|
|
59
|
+
def submit_task(
|
|
60
|
+
self,
|
|
61
|
+
file_path: str,
|
|
62
|
+
model: str = "Fast",
|
|
63
|
+
ocr_strategy: str = "Auto",
|
|
64
|
+
target_chunk_length: str = "512",
|
|
65
|
+
) -> str:
|
|
66
|
+
r"""Submits a file to the Chunkr API and returns the task ID.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
file_path (str): The path to the file to be uploaded.
|
|
70
|
+
model (str, optional): The model to be used for the task.
|
|
71
|
+
(default: :obj:`Fast`)
|
|
72
|
+
ocr_strategy (str, optional): The OCR strategy. Defaults to 'Auto'.
|
|
73
|
+
target_chunk_length (str, optional): The target chunk length.
|
|
74
|
+
(default: :obj:`512`)
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
str: The task ID.
|
|
78
|
+
"""
|
|
79
|
+
with open(file_path, 'rb') as file:
|
|
80
|
+
files: dict[
|
|
81
|
+
str, Union[tuple[None, IO[bytes]], tuple[None, str]]
|
|
82
|
+
] = {
|
|
83
|
+
'file': (
|
|
84
|
+
None,
|
|
85
|
+
file,
|
|
86
|
+
), # Properly pass the file as a binary stream
|
|
87
|
+
'model': (None, model),
|
|
88
|
+
'ocr_strategy': (None, ocr_strategy),
|
|
89
|
+
'target_chunk_length': (None, target_chunk_length),
|
|
90
|
+
}
|
|
91
|
+
try:
|
|
92
|
+
response = requests.post(
|
|
93
|
+
self._url, # type: ignore[arg-type]
|
|
94
|
+
headers=self._headers,
|
|
95
|
+
files=files,
|
|
96
|
+
timeout=self.timeout,
|
|
97
|
+
)
|
|
98
|
+
response.raise_for_status()
|
|
99
|
+
task_id = response.json().get('task_id')
|
|
100
|
+
if not task_id:
|
|
101
|
+
raise ValueError("Task ID not returned in the response.")
|
|
102
|
+
logger.info(f"Task submitted successfully. Task ID: {task_id}")
|
|
103
|
+
return task_id
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"Failed to submit task: {e}")
|
|
106
|
+
raise ValueError(f"Failed to submit task: {e}") from e
|
|
107
|
+
|
|
108
|
+
def get_task_output(self, task_id: str, max_retries: int = 5) -> str:
|
|
109
|
+
r"""Polls the Chunkr API to check the task status and returns the task
|
|
110
|
+
result.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
task_id (str): The task ID to check the status for.
|
|
114
|
+
max_retries (int, optional): Maximum number of retry attempts.
|
|
115
|
+
(default: :obj:`5`)
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
str: The formatted task result in JSON format.
|
|
119
|
+
|
|
120
|
+
Raises:
|
|
121
|
+
ValueError: If the task status cannot be retrieved.
|
|
122
|
+
RuntimeError: If the maximum number of retries is reached without
|
|
123
|
+
a successful task completion.
|
|
124
|
+
"""
|
|
125
|
+
url_get = f"{self._url}/{task_id}"
|
|
126
|
+
attempts = 0
|
|
127
|
+
|
|
128
|
+
while attempts < max_retries:
|
|
129
|
+
try:
|
|
130
|
+
response = requests.get(
|
|
131
|
+
url_get, headers=self._headers, timeout=self.timeout
|
|
132
|
+
)
|
|
133
|
+
response.raise_for_status()
|
|
134
|
+
task_status = response.json().get('status')
|
|
135
|
+
|
|
136
|
+
if task_status == "Succeeded":
|
|
137
|
+
logger.info(f"Task {task_id} completed successfully.")
|
|
138
|
+
return self._pretty_print_response(response.json())
|
|
139
|
+
else:
|
|
140
|
+
logger.info(
|
|
141
|
+
f"Task {task_id} is still {task_status}. Retrying "
|
|
142
|
+
"in 5 seconds..."
|
|
143
|
+
)
|
|
144
|
+
except Exception as e:
|
|
145
|
+
logger.error(f"Failed to retrieve task status: {e}")
|
|
146
|
+
raise ValueError(f"Failed to retrieve task status: {e}") from e
|
|
147
|
+
|
|
148
|
+
attempts += 1
|
|
149
|
+
time.sleep(5)
|
|
150
|
+
|
|
151
|
+
logger.error(f"Max retries reached for task {task_id}.")
|
|
152
|
+
raise RuntimeError(f"Max retries reached for task {task_id}.")
|
|
153
|
+
|
|
154
|
+
def _pretty_print_response(self, response_json: dict) -> str:
|
|
155
|
+
r"""Pretty prints the JSON response.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
response_json (dict): The response JSON to pretty print.
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
str: Formatted JSON as a string.
|
|
162
|
+
"""
|
|
163
|
+
return json.dumps(response_json, indent=4)
|
|
@@ -155,12 +155,12 @@ class Firecrawl:
|
|
|
155
155
|
except Exception as e:
|
|
156
156
|
raise RuntimeError(f"Failed to scrape the URL: {e}")
|
|
157
157
|
|
|
158
|
-
def structured_scrape(self, url: str,
|
|
158
|
+
def structured_scrape(self, url: str, response_format: BaseModel) -> Dict:
|
|
159
159
|
r"""Use LLM to extract structured data from given URL.
|
|
160
160
|
|
|
161
161
|
Args:
|
|
162
162
|
url (str): The URL to read.
|
|
163
|
-
|
|
163
|
+
response_format (BaseModel): A pydantic model
|
|
164
164
|
that includes value types and field descriptions used to
|
|
165
165
|
generate a structured response by LLM. This schema helps
|
|
166
166
|
in defining the expected output format.
|
|
@@ -176,7 +176,7 @@ class Firecrawl:
|
|
|
176
176
|
url,
|
|
177
177
|
{
|
|
178
178
|
'formats': ['extract'],
|
|
179
|
-
'extract': {'schema':
|
|
179
|
+
'extract': {'schema': response_format.model_json_schema()},
|
|
180
180
|
},
|
|
181
181
|
)
|
|
182
182
|
return data.get("extract", {})
|
camel/loaders/unstructured_io.py
CHANGED
|
@@ -13,8 +13,9 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import uuid
|
|
15
15
|
import warnings
|
|
16
|
-
from io import IOBase
|
|
17
16
|
from typing import (
|
|
17
|
+
IO,
|
|
18
|
+
TYPE_CHECKING,
|
|
18
19
|
Any,
|
|
19
20
|
Dict,
|
|
20
21
|
List,
|
|
@@ -24,7 +25,8 @@ from typing import (
|
|
|
24
25
|
Union,
|
|
25
26
|
)
|
|
26
27
|
|
|
27
|
-
|
|
28
|
+
if TYPE_CHECKING:
|
|
29
|
+
from unstructured.documents.elements import Element
|
|
28
30
|
|
|
29
31
|
|
|
30
32
|
class UnstructuredIO:
|
|
@@ -40,33 +42,34 @@ class UnstructuredIO:
|
|
|
40
42
|
@staticmethod
|
|
41
43
|
def create_element_from_text(
|
|
42
44
|
text: str,
|
|
43
|
-
element_id: Optional[
|
|
45
|
+
element_id: Optional[str] = None,
|
|
44
46
|
embeddings: Optional[List[float]] = None,
|
|
45
47
|
filename: Optional[str] = None,
|
|
46
48
|
file_directory: Optional[str] = None,
|
|
47
49
|
last_modified: Optional[str] = None,
|
|
48
50
|
filetype: Optional[str] = None,
|
|
49
|
-
parent_id: Optional[
|
|
50
|
-
) -> Element:
|
|
51
|
+
parent_id: Optional[str] = None,
|
|
52
|
+
) -> "Element":
|
|
51
53
|
r"""Creates a Text element from a given text input, with optional
|
|
52
54
|
metadata and embeddings.
|
|
53
55
|
|
|
54
56
|
Args:
|
|
55
57
|
text (str): The text content for the element.
|
|
56
|
-
element_id (Optional[
|
|
57
|
-
|
|
58
|
-
embeddings (
|
|
59
|
-
numbers representing the text embeddings.
|
|
58
|
+
element_id (Optional[str], optional): Unique identifier for the
|
|
59
|
+
element. (default: :obj:`None`)
|
|
60
|
+
embeddings (List[float], optional): A list of float
|
|
61
|
+
numbers representing the text embeddings.
|
|
62
|
+
(default: :obj:`None`)
|
|
60
63
|
filename (Optional[str], optional): The name of the file the
|
|
61
|
-
element is associated with.
|
|
64
|
+
element is associated with. (default: :obj:`None`)
|
|
62
65
|
file_directory (Optional[str], optional): The directory path where
|
|
63
|
-
the file is located.
|
|
66
|
+
the file is located. (default: :obj:`None`)
|
|
64
67
|
last_modified (Optional[str], optional): The last modified date of
|
|
65
|
-
the file.
|
|
66
|
-
filetype (Optional[str], optional): The type of the file.
|
|
67
|
-
|
|
68
|
-
parent_id (Optional[
|
|
69
|
-
|
|
68
|
+
the file. (default: :obj:`None`)
|
|
69
|
+
filetype (Optional[str], optional): The type of the file.
|
|
70
|
+
(default: :obj:`None`)
|
|
71
|
+
parent_id (Optional[str], optional): The identifier of the parent
|
|
72
|
+
element. (default: :obj:`None`)
|
|
70
73
|
|
|
71
74
|
Returns:
|
|
72
75
|
Element: An instance of Text with the provided content and
|
|
@@ -84,7 +87,7 @@ class UnstructuredIO:
|
|
|
84
87
|
|
|
85
88
|
return Text(
|
|
86
89
|
text=text,
|
|
87
|
-
element_id=element_id or uuid.uuid4(),
|
|
90
|
+
element_id=element_id or str(uuid.uuid4()),
|
|
88
91
|
metadata=metadata,
|
|
89
92
|
embeddings=embeddings,
|
|
90
93
|
)
|
|
@@ -93,7 +96,7 @@ class UnstructuredIO:
|
|
|
93
96
|
def parse_file_or_url(
|
|
94
97
|
input_path: str,
|
|
95
98
|
**kwargs: Any,
|
|
96
|
-
) -> Union[List[Element], None]:
|
|
99
|
+
) -> Union[List["Element"], None]:
|
|
97
100
|
r"""Loads a file or a URL and parses its contents into elements.
|
|
98
101
|
|
|
99
102
|
Args:
|
|
@@ -119,25 +122,23 @@ class UnstructuredIO:
|
|
|
119
122
|
import os
|
|
120
123
|
from urllib.parse import urlparse
|
|
121
124
|
|
|
125
|
+
from unstructured.partition.auto import partition
|
|
126
|
+
|
|
122
127
|
# Check if the input is a URL
|
|
123
128
|
parsed_url = urlparse(input_path)
|
|
124
129
|
is_url = all([parsed_url.scheme, parsed_url.netloc])
|
|
125
130
|
|
|
131
|
+
# Handling URL
|
|
126
132
|
if is_url:
|
|
127
|
-
# Handling URL
|
|
128
|
-
from unstructured.partition.html import partition_html
|
|
129
|
-
|
|
130
133
|
try:
|
|
131
|
-
elements =
|
|
134
|
+
elements = partition(url=input_path, **kwargs)
|
|
132
135
|
return elements
|
|
133
136
|
except Exception:
|
|
134
137
|
warnings.warn(f"Failed to parse the URL: {input_path}")
|
|
135
138
|
return None
|
|
136
139
|
|
|
140
|
+
# Handling file
|
|
137
141
|
else:
|
|
138
|
-
# Handling file
|
|
139
|
-
from unstructured.partition.auto import partition
|
|
140
|
-
|
|
141
142
|
# Check if the file exists
|
|
142
143
|
if not os.path.exists(input_path):
|
|
143
144
|
raise FileNotFoundError(
|
|
@@ -154,11 +155,13 @@ class UnstructuredIO:
|
|
|
154
155
|
return None
|
|
155
156
|
|
|
156
157
|
@staticmethod
|
|
157
|
-
def parse_bytes(
|
|
158
|
+
def parse_bytes(
|
|
159
|
+
file: IO[bytes], **kwargs: Any
|
|
160
|
+
) -> Union[List["Element"], None]:
|
|
158
161
|
r"""Parses a bytes stream and converts its contents into elements.
|
|
159
162
|
|
|
160
163
|
Args:
|
|
161
|
-
file (
|
|
164
|
+
file (IO[bytes]): The file in bytes format to be parsed.
|
|
162
165
|
**kwargs: Extra kwargs passed to the partition function.
|
|
163
166
|
|
|
164
167
|
Returns:
|
|
@@ -181,8 +184,6 @@ class UnstructuredIO:
|
|
|
181
184
|
elements = partition(file=file, **kwargs)
|
|
182
185
|
return elements
|
|
183
186
|
except Exception as e:
|
|
184
|
-
import warnings
|
|
185
|
-
|
|
186
187
|
warnings.warn(f"Failed to partition the file stream: {e}")
|
|
187
188
|
return None
|
|
188
189
|
|
|
@@ -196,7 +197,7 @@ class UnstructuredIO:
|
|
|
196
197
|
|
|
197
198
|
This function applies multiple text cleaning utilities by calling the
|
|
198
199
|
`unstructured` library's cleaning bricks for operations like
|
|
199
|
-
replacing
|
|
200
|
+
replacing Unicode quotes, removing extra whitespace, dashes, non-ascii
|
|
200
201
|
characters, and more.
|
|
201
202
|
|
|
202
203
|
If no cleaning options are provided, a default set of cleaning
|
|
@@ -283,7 +284,8 @@ class UnstructuredIO:
|
|
|
283
284
|
)
|
|
284
285
|
else:
|
|
285
286
|
raise ValueError(
|
|
286
|
-
f"'{func_name}' is not a valid function in
|
|
287
|
+
f"'{func_name}' is not a valid function in "
|
|
288
|
+
"`Unstructured IO`."
|
|
287
289
|
)
|
|
288
290
|
|
|
289
291
|
return cleaned_text
|
|
@@ -440,8 +442,8 @@ class UnstructuredIO:
|
|
|
440
442
|
|
|
441
443
|
@staticmethod
|
|
442
444
|
def chunk_elements(
|
|
443
|
-
elements: List[
|
|
444
|
-
) -> List[Element]:
|
|
445
|
+
elements: List["Element"], chunk_type: str, **kwargs
|
|
446
|
+
) -> List["Element"]:
|
|
445
447
|
r"""Chunks elements by titles.
|
|
446
448
|
|
|
447
449
|
Args:
|
camel/messages/__init__.py
CHANGED
camel/models/__init__.py
CHANGED
|
@@ -21,9 +21,8 @@ from .mistral_model import MistralModel
|
|
|
21
21
|
from .model_factory import ModelFactory
|
|
22
22
|
from .nemotron_model import NemotronModel
|
|
23
23
|
from .ollama_model import OllamaModel
|
|
24
|
-
from .open_source_model import OpenSourceModel
|
|
25
24
|
from .openai_audio_models import OpenAIAudioModels
|
|
26
|
-
from .
|
|
25
|
+
from .openai_compatible_model import OpenAICompatibleModel
|
|
27
26
|
from .openai_model import OpenAIModel
|
|
28
27
|
from .reka_model import RekaModel
|
|
29
28
|
from .samba_model import SambaModel
|
|
@@ -41,7 +40,6 @@ __all__ = [
|
|
|
41
40
|
'GroqModel',
|
|
42
41
|
'StubModel',
|
|
43
42
|
'ZhipuAIModel',
|
|
44
|
-
'OpenSourceModel',
|
|
45
43
|
'ModelFactory',
|
|
46
44
|
'LiteLLMModel',
|
|
47
45
|
'OpenAIAudioModels',
|
|
@@ -49,7 +47,7 @@ __all__ = [
|
|
|
49
47
|
'OllamaModel',
|
|
50
48
|
'VLLMModel',
|
|
51
49
|
'GeminiModel',
|
|
52
|
-
'
|
|
50
|
+
'OpenAICompatibleModel',
|
|
53
51
|
'RekaModel',
|
|
54
52
|
'SambaModel',
|
|
55
53
|
'TogetherAIModel',
|
camel/models/anthropic_model.py
CHANGED
|
@@ -12,11 +12,9 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional
|
|
15
|
+
from typing import Any, Dict, List, Optional, Union
|
|
16
16
|
|
|
17
|
-
from
|
|
18
|
-
|
|
19
|
-
from camel.configs import ANTHROPIC_API_PARAMS
|
|
17
|
+
from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
20
18
|
from camel.messages import OpenAIMessage
|
|
21
19
|
from camel.models.base_model import BaseModelBackend
|
|
22
20
|
from camel.types import ChatCompletion, ModelType
|
|
@@ -24,40 +22,47 @@ from camel.utils import (
|
|
|
24
22
|
AnthropicTokenCounter,
|
|
25
23
|
BaseTokenCounter,
|
|
26
24
|
api_keys_required,
|
|
25
|
+
dependencies_required,
|
|
27
26
|
)
|
|
28
27
|
|
|
29
28
|
|
|
30
29
|
class AnthropicModel(BaseModelBackend):
|
|
31
|
-
r"""Anthropic API in a unified BaseModelBackend interface.
|
|
32
|
-
|
|
30
|
+
r"""Anthropic API in a unified BaseModelBackend interface.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
34
|
+
created, one of CLAUDE_* series.
|
|
35
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
36
|
+
that will be fed into Anthropic.messages.create(). If
|
|
37
|
+
:obj:`None`, :obj:`AnthropicConfig().as_dict()` will be used.
|
|
38
|
+
(default::obj:`None`)
|
|
39
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
40
|
+
the Anthropic service. (default: :obj:`None`)
|
|
41
|
+
url (Optional[str], optional): The url to the Anthropic service.
|
|
42
|
+
(default: :obj:`None`)
|
|
43
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
44
|
+
use for the model. If not provided, :obj:`AnthropicTokenCounter`
|
|
45
|
+
will be used. (default: :obj:`None`)
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
@dependencies_required('anthropic')
|
|
33
49
|
def __init__(
|
|
34
50
|
self,
|
|
35
|
-
model_type: ModelType,
|
|
36
|
-
model_config_dict: Dict[str, Any],
|
|
51
|
+
model_type: Union[ModelType, str],
|
|
52
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
37
53
|
api_key: Optional[str] = None,
|
|
38
54
|
url: Optional[str] = None,
|
|
39
55
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
40
56
|
) -> None:
|
|
41
|
-
|
|
57
|
+
from anthropic import Anthropic
|
|
42
58
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
be fed into Anthropic.messages.create().
|
|
48
|
-
api_key (Optional[str]): The API key for authenticating with the
|
|
49
|
-
Anthropic service. (default: :obj:`None`)
|
|
50
|
-
url (Optional[str]): The url to the Anthropic service. (default:
|
|
51
|
-
:obj:`None`)
|
|
52
|
-
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
53
|
-
for the model. If not provided, `AnthropicTokenCounter` will
|
|
54
|
-
be used.
|
|
55
|
-
"""
|
|
59
|
+
if model_config_dict is None:
|
|
60
|
+
model_config_dict = AnthropicConfig().as_dict()
|
|
61
|
+
api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
|
62
|
+
url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
|
|
56
63
|
super().__init__(
|
|
57
64
|
model_type, model_config_dict, api_key, url, token_counter
|
|
58
65
|
)
|
|
59
|
-
self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
|
60
|
-
self._url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
|
|
61
66
|
self.client = Anthropic(api_key=self._api_key, base_url=self._url)
|
|
62
67
|
|
|
63
68
|
def _convert_response_from_anthropic_to_openai(self, response):
|
|
@@ -89,7 +94,7 @@ class AnthropicModel(BaseModelBackend):
|
|
|
89
94
|
tokenization style.
|
|
90
95
|
"""
|
|
91
96
|
if not self._token_counter:
|
|
92
|
-
self._token_counter = AnthropicTokenCounter(
|
|
97
|
+
self._token_counter = AnthropicTokenCounter()
|
|
93
98
|
return self._token_counter
|
|
94
99
|
|
|
95
100
|
def count_tokens_from_prompt(self, prompt: str) -> int:
|
|
@@ -117,13 +122,14 @@ class AnthropicModel(BaseModelBackend):
|
|
|
117
122
|
Returns:
|
|
118
123
|
ChatCompletion: Response in the OpenAI API format.
|
|
119
124
|
"""
|
|
125
|
+
from anthropic import NOT_GIVEN
|
|
120
126
|
|
|
121
127
|
if messages[0]["role"] == "system":
|
|
122
128
|
sys_msg = str(messages.pop(0)["content"])
|
|
123
129
|
else:
|
|
124
130
|
sys_msg = NOT_GIVEN # type: ignore[assignment]
|
|
125
131
|
response = self.client.messages.create(
|
|
126
|
-
model=self.model_type
|
|
132
|
+
model=self.model_type,
|
|
127
133
|
system=sys_msg,
|
|
128
134
|
messages=messages, # type: ignore[arg-type]
|
|
129
135
|
**self.model_config_dict,
|
|
@@ -16,60 +16,65 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
16
16
|
|
|
17
17
|
from openai import AzureOpenAI, Stream
|
|
18
18
|
|
|
19
|
-
from camel.configs import OPENAI_API_PARAMS
|
|
19
|
+
from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
|
|
20
20
|
from camel.messages import OpenAIMessage
|
|
21
21
|
from camel.models.base_model import BaseModelBackend
|
|
22
|
-
from camel.types import
|
|
22
|
+
from camel.types import (
|
|
23
|
+
ChatCompletion,
|
|
24
|
+
ChatCompletionChunk,
|
|
25
|
+
ModelType,
|
|
26
|
+
)
|
|
23
27
|
from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
|
|
24
28
|
|
|
25
29
|
|
|
26
30
|
class AzureOpenAIModel(BaseModelBackend):
|
|
27
31
|
r"""Azure OpenAI API in a unified BaseModelBackend interface.
|
|
28
|
-
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
35
|
+
created, one of GPT_* series.
|
|
36
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
37
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
38
|
+
:obj:`None`, :obj:`ChatGPTConfig().as_dict()` will be used.
|
|
39
|
+
(default: :obj:`None`)
|
|
40
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
41
|
+
the OpenAI service. (default: :obj:`None`)
|
|
42
|
+
url (Optional[str], optional): The url to the OpenAI service.
|
|
43
|
+
(default: :obj:`None`)
|
|
44
|
+
api_version (Optional[str], optional): The api version for the model.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
azure_deployment_name (Optional[str], optional): The deployment name
|
|
47
|
+
you chose when you deployed an azure model. (default: :obj:`None`)
|
|
48
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
49
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter`
|
|
50
|
+
will be used. (default: :obj:`None`)
|
|
51
|
+
|
|
52
|
+
References:
|
|
53
|
+
https://learn.microsoft.com/en-us/azure/ai-services/openai/
|
|
29
54
|
"""
|
|
30
55
|
|
|
31
56
|
def __init__(
|
|
32
57
|
self,
|
|
33
|
-
model_type: ModelType,
|
|
34
|
-
model_config_dict: Dict[str, Any],
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
35
60
|
api_key: Optional[str] = None,
|
|
36
61
|
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
37
63
|
api_version: Optional[str] = None,
|
|
38
64
|
azure_deployment_name: Optional[str] = None,
|
|
39
65
|
) -> None:
|
|
40
|
-
|
|
66
|
+
if model_config_dict is None:
|
|
67
|
+
model_config_dict = ChatGPTConfig().as_dict()
|
|
68
|
+
api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
|
|
69
|
+
url = url or os.environ.get("AZURE_OPENAI_BASE_URL")
|
|
70
|
+
super().__init__(
|
|
71
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
72
|
+
)
|
|
41
73
|
|
|
42
|
-
Args:
|
|
43
|
-
model_type (ModelType): Model for which a backend is created,
|
|
44
|
-
one of GPT_* series.
|
|
45
|
-
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
46
|
-
be fed into openai.ChatCompletion.create().
|
|
47
|
-
api_key (Optional[str]): The API key for authenticating with the
|
|
48
|
-
OpenAI service. (default: :obj:`None`)
|
|
49
|
-
url (Optional[str]): The url to the OpenAI service. (default:
|
|
50
|
-
:obj:`None`)
|
|
51
|
-
api_version (Optional[str]): The api version for the model.
|
|
52
|
-
azure_deployment_name (Optional[str]): The deployment name you
|
|
53
|
-
chose when you deployed an azure model. (default: :obj:`None`)
|
|
54
|
-
"""
|
|
55
|
-
super().__init__(model_type, model_config_dict, api_key, url)
|
|
56
|
-
self._url = url or os.environ.get("AZURE_OPENAI_ENDPOINT")
|
|
57
|
-
self._api_key = api_key or os.environ.get("AZURE_OPENAI_API_KEY")
|
|
58
74
|
self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
|
|
59
75
|
self.azure_deployment_name = azure_deployment_name or os.environ.get(
|
|
60
76
|
"AZURE_DEPLOYMENT_NAME"
|
|
61
77
|
)
|
|
62
|
-
|
|
63
|
-
if self._url is None:
|
|
64
|
-
raise ValueError(
|
|
65
|
-
"Must provide either the `url` argument "
|
|
66
|
-
"or `AZURE_OPENAI_ENDPOINT` environment variable."
|
|
67
|
-
)
|
|
68
|
-
if self._api_key is None:
|
|
69
|
-
raise ValueError(
|
|
70
|
-
"Must provide either the `api_key` argument "
|
|
71
|
-
"or `AZURE_OPENAI_API_KEY` environment variable."
|
|
72
|
-
)
|
|
73
78
|
if self.api_version is None:
|
|
74
79
|
raise ValueError(
|
|
75
80
|
"Must provide either the `api_version` argument "
|
|
@@ -80,7 +85,6 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
80
85
|
"Must provide either the `azure_deployment_name` argument "
|
|
81
86
|
"or `AZURE_DEPLOYMENT_NAME` environment variable."
|
|
82
87
|
)
|
|
83
|
-
self.model = str(self.azure_deployment_name)
|
|
84
88
|
|
|
85
89
|
self._client = AzureOpenAI(
|
|
86
90
|
azure_endpoint=str(self._url),
|
|
@@ -90,7 +94,6 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
90
94
|
timeout=60,
|
|
91
95
|
max_retries=3,
|
|
92
96
|
)
|
|
93
|
-
self._token_counter: Optional[BaseTokenCounter] = None
|
|
94
97
|
|
|
95
98
|
@property
|
|
96
99
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -122,7 +125,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
122
125
|
"""
|
|
123
126
|
response = self._client.chat.completions.create(
|
|
124
127
|
messages=messages,
|
|
125
|
-
model=self.
|
|
128
|
+
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
126
129
|
**self.model_config_dict,
|
|
127
130
|
)
|
|
128
131
|
return response
|