camel-ai 0.2.12__py3-none-any.whl → 0.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +1 -1
- camel/embeddings/openai_compatible_embedding.py +1 -1
- camel/embeddings/openai_embedding.py +1 -1
- camel/messages/base.py +5 -5
- camel/models/__init__.py +2 -0
- camel/models/anthropic_model.py +1 -1
- camel/models/azure_openai_model.py +1 -1
- camel/models/deepseek_model.py +1 -1
- camel/models/fish_audio_model.py +146 -0
- camel/models/gemini_model.py +1 -1
- camel/models/groq_model.py +1 -1
- camel/models/nemotron_model.py +1 -1
- camel/models/nvidia_model.py +1 -1
- camel/models/ollama_model.py +1 -1
- camel/models/openai_compatible_model.py +1 -1
- camel/models/openai_model.py +24 -12
- camel/models/qwen_model.py +1 -1
- camel/models/reward/nemotron_model.py +1 -1
- camel/models/samba_model.py +1 -1
- camel/models/sglang_model.py +2 -2
- camel/models/togetherai_model.py +1 -1
- camel/models/vllm_model.py +1 -1
- camel/models/yi_model.py +1 -1
- camel/models/zhipuai_model.py +1 -1
- camel/runtime/configs.py +12 -12
- camel/runtime/docker_runtime.py +7 -7
- camel/runtime/llm_guard_runtime.py +3 -3
- camel/runtime/remote_http_runtime.py +5 -5
- camel/runtime/utils/function_risk_toolkit.py +1 -1
- camel/runtime/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/__init__.py +2 -1
- camel/schemas/base.py +2 -4
- camel/schemas/outlines_converter.py +249 -0
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/github_toolkit.py +3 -3
- camel/toolkits/google_scholar_toolkit.py +16 -2
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/search_toolkit.py +2 -2
- camel/types/enums.py +3 -0
- camel/utils/commons.py +4 -22
- camel/utils/token_counting.py +10 -2
- {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/METADATA +14 -11
- {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/RECORD +47 -45
- {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.12.dist-info → camel_ai-0.2.14.dist-info}/WHEEL +0 -0
camel/schemas/base.py
CHANGED
|
@@ -15,8 +15,6 @@
|
|
|
15
15
|
from abc import ABC, abstractmethod
|
|
16
16
|
from typing import Any, Dict
|
|
17
17
|
|
|
18
|
-
from pydantic import BaseModel
|
|
19
|
-
|
|
20
18
|
|
|
21
19
|
class BaseConverter(ABC):
|
|
22
20
|
r"""A base class for schema outputs that includes functionality
|
|
@@ -30,7 +28,7 @@ class BaseConverter(ABC):
|
|
|
30
28
|
@abstractmethod
|
|
31
29
|
def convert(
|
|
32
30
|
self, content: str, *args: Any, **kwargs: Dict[str, Any]
|
|
33
|
-
) ->
|
|
31
|
+
) -> Any:
|
|
34
32
|
r"""Structures the input text into the expected response format.
|
|
35
33
|
|
|
36
34
|
Args:
|
|
@@ -40,6 +38,6 @@ class BaseConverter(ABC):
|
|
|
40
38
|
prompt (Optional[str], optional): The prompt to be used.
|
|
41
39
|
|
|
42
40
|
Returns:
|
|
43
|
-
|
|
41
|
+
Any: The converted response.
|
|
44
42
|
"""
|
|
45
43
|
pass
|
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
from typing import Any, Callable, List, Literal, Type, Union
|
|
16
|
+
|
|
17
|
+
from pydantic import BaseModel
|
|
18
|
+
|
|
19
|
+
from .base import BaseConverter
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OutlinesConverter(BaseConverter):
|
|
23
|
+
r"""OutlinesConverter is a class that converts a string or a function
|
|
24
|
+
into a BaseModel schema.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
model_type (str, optional): The model type to be used.
|
|
28
|
+
platform (str, optional): The platform to be used.
|
|
29
|
+
1. transformers
|
|
30
|
+
2. mamba
|
|
31
|
+
3. vllm
|
|
32
|
+
4. llamacpp
|
|
33
|
+
5. mlx
|
|
34
|
+
(default: "transformers")
|
|
35
|
+
**kwargs: The keyword arguments to be used. See the outlines
|
|
36
|
+
documentation for more details. See
|
|
37
|
+
https://dottxt-ai.github.io/outlines/latest/reference/models/models/
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
model_type: str,
|
|
43
|
+
platform: Literal[
|
|
44
|
+
"vllm", "transformers", "mamba", "llamacpp", "mlx"
|
|
45
|
+
] = "transformers",
|
|
46
|
+
**kwargs: Any,
|
|
47
|
+
):
|
|
48
|
+
self.model_type = model_type
|
|
49
|
+
from outlines import models
|
|
50
|
+
|
|
51
|
+
match platform:
|
|
52
|
+
case "vllm":
|
|
53
|
+
self._outlines_model = models.vllm(model_type, **kwargs)
|
|
54
|
+
case "transformers":
|
|
55
|
+
self._outlines_model = models.transformers(
|
|
56
|
+
model_type, **kwargs
|
|
57
|
+
)
|
|
58
|
+
case "mamba":
|
|
59
|
+
self._outlines_model = models.mamba(model_type, **kwargs)
|
|
60
|
+
case "llamacpp":
|
|
61
|
+
self._outlines_model = models.llamacpp(model_type, **kwargs)
|
|
62
|
+
case "mlx":
|
|
63
|
+
self._outlines_model = models.mlxlm(model_type, **kwargs)
|
|
64
|
+
case _:
|
|
65
|
+
raise ValueError(f"Unsupported platform: {platform}")
|
|
66
|
+
|
|
67
|
+
def convert_regex(self, content: str, regex_pattern: str) -> str:
|
|
68
|
+
r"""Convert the content to the specified regex pattern.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
content (str): The content to be converted.
|
|
72
|
+
regex_pattern (str): The regex pattern to be used.
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
str: The converted content.
|
|
76
|
+
"""
|
|
77
|
+
import outlines
|
|
78
|
+
|
|
79
|
+
regex_generator = outlines.generate.regex(
|
|
80
|
+
self._outlines_model, regex_pattern
|
|
81
|
+
)
|
|
82
|
+
return regex_generator(content)
|
|
83
|
+
|
|
84
|
+
def convert_json(
|
|
85
|
+
self,
|
|
86
|
+
content: str,
|
|
87
|
+
output_schema: Union[str, Callable],
|
|
88
|
+
) -> dict:
|
|
89
|
+
r"""Convert the content to the specified JSON schema given by
|
|
90
|
+
output_schema.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
content (str): The content to be converted.
|
|
94
|
+
output_schema (Union[str, Callable]): The expected format of the
|
|
95
|
+
response.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
dict: The converted content in JSON format.
|
|
99
|
+
"""
|
|
100
|
+
import outlines
|
|
101
|
+
|
|
102
|
+
json_generator = outlines.generate.json(
|
|
103
|
+
self._outlines_model, output_schema
|
|
104
|
+
)
|
|
105
|
+
return json_generator(content)
|
|
106
|
+
|
|
107
|
+
def convert_pydantic(
|
|
108
|
+
self,
|
|
109
|
+
content: str,
|
|
110
|
+
output_schema: Type[BaseModel],
|
|
111
|
+
) -> BaseModel:
|
|
112
|
+
r"""Convert the content to the specified Pydantic schema.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
content (str): The content to be converted.
|
|
116
|
+
output_schema (Type[BaseModel]): The expected format of the
|
|
117
|
+
response.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
BaseModel: The converted content in pydantic model format.
|
|
121
|
+
"""
|
|
122
|
+
import outlines
|
|
123
|
+
|
|
124
|
+
json_generator = outlines.generate.json(
|
|
125
|
+
self._outlines_model, output_schema
|
|
126
|
+
)
|
|
127
|
+
return json_generator(content)
|
|
128
|
+
|
|
129
|
+
def convert_type(self, content: str, type_name: type) -> str:
|
|
130
|
+
r"""Convert the content to the specified type.
|
|
131
|
+
|
|
132
|
+
The following types are currently available:
|
|
133
|
+
1. int
|
|
134
|
+
2. float
|
|
135
|
+
3. bool
|
|
136
|
+
4. datetime.date
|
|
137
|
+
5. datetime.time
|
|
138
|
+
6. datetime.datetime
|
|
139
|
+
7. custom types (https://dottxt-ai.github.io/outlines/latest/reference/generation/types/)
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
content (str): The content to be converted.
|
|
143
|
+
type_name (type): The type to be used.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
str: The converted content.
|
|
147
|
+
"""
|
|
148
|
+
import outlines
|
|
149
|
+
|
|
150
|
+
type_generator = outlines.generate.format(
|
|
151
|
+
self._outlines_model, type_name
|
|
152
|
+
)
|
|
153
|
+
return type_generator(content)
|
|
154
|
+
|
|
155
|
+
def convert_choice(self, content: str, choices: List[str]) -> str:
|
|
156
|
+
r"""Convert the content to the specified choice.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
content (str): The content to be converted.
|
|
160
|
+
choices (List[str]): The choices to be used.
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
str: The converted content.
|
|
164
|
+
"""
|
|
165
|
+
import outlines
|
|
166
|
+
|
|
167
|
+
choices_generator = outlines.generate.choice(
|
|
168
|
+
self._outlines_model, choices
|
|
169
|
+
)
|
|
170
|
+
return choices_generator(content)
|
|
171
|
+
|
|
172
|
+
def convert_grammar(self, content: str, grammar: str) -> str:
|
|
173
|
+
r"""Convert the content to the specified grammar.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
content (str): The content to be converted.
|
|
177
|
+
grammar (str): The grammar to be used.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
str: The converted content.
|
|
181
|
+
"""
|
|
182
|
+
import outlines
|
|
183
|
+
|
|
184
|
+
grammar_generator = outlines.generate.cfg(
|
|
185
|
+
self._outlines_model, grammar
|
|
186
|
+
)
|
|
187
|
+
return grammar_generator(content)
|
|
188
|
+
|
|
189
|
+
def convert( # type: ignore[override]
|
|
190
|
+
self,
|
|
191
|
+
content: str,
|
|
192
|
+
type: Literal["regex", "json", "type", "choice", "grammar"],
|
|
193
|
+
**kwargs,
|
|
194
|
+
) -> Any:
|
|
195
|
+
r"""Formats the input content into the expected BaseModel.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
type (Literal["regex", "json", "type", "choice", "grammar"]):
|
|
199
|
+
The type of conversion to perform. Options are:
|
|
200
|
+
- "regex": Match the content against a regex pattern.
|
|
201
|
+
- "pydantic": Convert the content into a pydantic model.
|
|
202
|
+
- "json": Convert the content into a JSON based on a
|
|
203
|
+
schema.
|
|
204
|
+
- "type": Convert the content into a specified type.
|
|
205
|
+
- "choice": Match the content against a list of valid
|
|
206
|
+
choices.
|
|
207
|
+
- "grammar": Convert the content using a specified grammar.
|
|
208
|
+
content (str): The content to be formatted.
|
|
209
|
+
**kwargs: Additional keyword arguments specific to the conversion
|
|
210
|
+
type.
|
|
211
|
+
|
|
212
|
+
- For "regex":
|
|
213
|
+
regex_pattern (str): The regex pattern to use for matching.
|
|
214
|
+
|
|
215
|
+
- For "pydantic":
|
|
216
|
+
output_schema (Type[BaseModel]): The schema to validate and
|
|
217
|
+
format the pydantic model.
|
|
218
|
+
|
|
219
|
+
- For "json":
|
|
220
|
+
output_schema (Union[str, Callable]): The schema to validate
|
|
221
|
+
and format the JSON object.
|
|
222
|
+
|
|
223
|
+
- For "type":
|
|
224
|
+
type_name (str): The target type name for the conversion.
|
|
225
|
+
|
|
226
|
+
- For "choice":
|
|
227
|
+
choices (List[str]): A list of valid choices to match against.
|
|
228
|
+
|
|
229
|
+
- For "grammar":
|
|
230
|
+
grammar (str): The grammar definition to use for content
|
|
231
|
+
conversion.
|
|
232
|
+
"""
|
|
233
|
+
match type:
|
|
234
|
+
case "regex":
|
|
235
|
+
return self.convert_regex(content, kwargs.get("regex_pattern")) # type: ignore[arg-type]
|
|
236
|
+
case "pydantic":
|
|
237
|
+
return self.convert_pydantic(
|
|
238
|
+
content, kwargs.get("output_schema")
|
|
239
|
+
) # type: ignore[arg-type]
|
|
240
|
+
case "json":
|
|
241
|
+
return self.convert_json(content, kwargs.get("output_schema")) # type: ignore[arg-type]
|
|
242
|
+
case "type":
|
|
243
|
+
return self.convert_type(content, kwargs.get("type_name")) # type: ignore[arg-type]
|
|
244
|
+
case "choice":
|
|
245
|
+
return self.convert_choice(content, kwargs.get("choices")) # type: ignore[arg-type]
|
|
246
|
+
case "grammar":
|
|
247
|
+
return self.convert_grammar(content, kwargs.get("grammar")) # type: ignore[arg-type]
|
|
248
|
+
case _:
|
|
249
|
+
raise ValueError("Unsupported output schema type")
|
camel/toolkits/arxiv_toolkit.py
CHANGED
|
@@ -44,9 +44,9 @@ class ArxivToolkit(BaseToolkit):
|
|
|
44
44
|
query (str): The search query string used to search for papers on
|
|
45
45
|
arXiv.
|
|
46
46
|
paper_ids (List[str], optional): A list of specific arXiv paper
|
|
47
|
-
IDs to search for. (default
|
|
47
|
+
IDs to search for. (default: :obj: `None`)
|
|
48
48
|
max_results (int, optional): The maximum number of search results
|
|
49
|
-
to retrieve. (default
|
|
49
|
+
to retrieve. (default: :obj: `5`)
|
|
50
50
|
|
|
51
51
|
Returns:
|
|
52
52
|
Generator: A generator that yields results from the arXiv search
|
|
@@ -75,9 +75,9 @@ class ArxivToolkit(BaseToolkit):
|
|
|
75
75
|
Args:
|
|
76
76
|
query (str): The search query string.
|
|
77
77
|
paper_ids (List[str], optional): A list of specific arXiv paper
|
|
78
|
-
IDs to search for. (default
|
|
78
|
+
IDs to search for. (default: :obj: `None`)
|
|
79
79
|
max_results (int, optional): The maximum number of search results
|
|
80
|
-
to return. (default
|
|
80
|
+
to return. (default: :obj: `5`)
|
|
81
81
|
|
|
82
82
|
Returns:
|
|
83
83
|
List[Dict[str, str]]: A list of dictionaries, each containing
|
|
@@ -119,9 +119,9 @@ class ArxivToolkit(BaseToolkit):
|
|
|
119
119
|
Args:
|
|
120
120
|
query (str): The search query string.
|
|
121
121
|
paper_ids (List[str], optional): A list of specific arXiv paper
|
|
122
|
-
IDs to download. (default
|
|
122
|
+
IDs to download. (default: :obj: `None`)
|
|
123
123
|
max_results (int, optional): The maximum number of search results
|
|
124
|
-
to download. (default
|
|
124
|
+
to download. (default: :obj: `5`)
|
|
125
125
|
output_dir (str, optional): The directory to save the downloaded
|
|
126
126
|
PDFs. Defaults to the current directory.
|
|
127
127
|
|
|
@@ -228,7 +228,7 @@ class AskNewsToolkit(BaseToolkit):
|
|
|
228
228
|
return value. (default: :obj:`"string"`)
|
|
229
229
|
method (Literal["nl", "kw"]): The search method, either "nl" for
|
|
230
230
|
natural language or "kw" for keyword search.
|
|
231
|
-
(default
|
|
231
|
+
(default: :obj:`"kw"`)
|
|
232
232
|
|
|
233
233
|
Returns:
|
|
234
234
|
Union[str, dict, Tuple[str, dict]]: The Reddit search
|
|
@@ -523,7 +523,7 @@ class AsyncAskNewsToolkit(BaseToolkit):
|
|
|
523
523
|
return value. (default: :obj:"string")
|
|
524
524
|
method (Literal["nl", "kw"]): The search method, either "nl" for
|
|
525
525
|
natural language or "kw" for keyword search.
|
|
526
|
-
(default
|
|
526
|
+
(default: :obj:"kw")
|
|
527
527
|
|
|
528
528
|
Returns:
|
|
529
529
|
Union[str, dict, Tuple[str, dict]]: The Reddit search
|
camel/toolkits/github_toolkit.py
CHANGED
|
@@ -139,7 +139,7 @@ class GithubToolkit(BaseToolkit):
|
|
|
139
139
|
|
|
140
140
|
Args:
|
|
141
141
|
state (Literal["open", "closed", "all"]): The state of pull
|
|
142
|
-
requests to retrieve. (default
|
|
142
|
+
requests to retrieve. (default: :obj: `all`)
|
|
143
143
|
Options are:
|
|
144
144
|
- "open": Retrieve only open pull requests.
|
|
145
145
|
- "closed": Retrieve only closed pull requests.
|
|
@@ -179,7 +179,7 @@ class GithubToolkit(BaseToolkit):
|
|
|
179
179
|
|
|
180
180
|
Args:
|
|
181
181
|
state (Literal["open", "closed", "all"]): The state of pull
|
|
182
|
-
requests to retrieve. (default
|
|
182
|
+
requests to retrieve. (default: :obj: `all`)
|
|
183
183
|
Options are:
|
|
184
184
|
- "open": Retrieve only open pull requests.
|
|
185
185
|
- "closed": Retrieve only closed pull requests.
|
|
@@ -254,7 +254,7 @@ class GithubToolkit(BaseToolkit):
|
|
|
254
254
|
Args:
|
|
255
255
|
path (str): The repository path to start the traversal from.
|
|
256
256
|
empty string means starts from the root directory.
|
|
257
|
-
(default
|
|
257
|
+
(default: :obj: `""`)
|
|
258
258
|
|
|
259
259
|
Returns:
|
|
260
260
|
List[str]: A list of file paths within the specified directory
|
|
@@ -33,7 +33,11 @@ class GoogleScholarToolkit(BaseToolkit):
|
|
|
33
33
|
"""
|
|
34
34
|
|
|
35
35
|
def __init__(
|
|
36
|
-
self,
|
|
36
|
+
self,
|
|
37
|
+
author_identifier: str,
|
|
38
|
+
is_author_name: bool = False,
|
|
39
|
+
proxy_http: Optional[str] = None,
|
|
40
|
+
proxy_https: Optional[str] = None,
|
|
37
41
|
) -> None:
|
|
38
42
|
r"""Initializes the GoogleScholarToolkit with the author's identifier.
|
|
39
43
|
|
|
@@ -42,8 +46,18 @@ class GoogleScholarToolkit(BaseToolkit):
|
|
|
42
46
|
of the author to search for.
|
|
43
47
|
is_author_name (bool): Flag to indicate if the identifier is a
|
|
44
48
|
name. (default: :obj:`False`)
|
|
49
|
+
proxy_http ( Optional[str]): Proxy http address pass to pg.
|
|
50
|
+
SingleProxy. (default: :obj:`None`)
|
|
51
|
+
proxy_https ( Optional[str]): Proxy https address pass to pg.
|
|
52
|
+
SingleProxy. (default: :obj:`None`)
|
|
45
53
|
"""
|
|
46
|
-
from scholarly import scholarly
|
|
54
|
+
from scholarly import ProxyGenerator, scholarly
|
|
55
|
+
|
|
56
|
+
# Set Proxy is HTTP or HTTPS provided
|
|
57
|
+
if proxy_http or proxy_https:
|
|
58
|
+
pg = ProxyGenerator()
|
|
59
|
+
pg.SingleProxy(http=proxy_http, https=proxy_https)
|
|
60
|
+
scholarly.use_proxy(pg)
|
|
47
61
|
|
|
48
62
|
self.scholarly = scholarly
|
|
49
63
|
self.author_identifier = author_identifier
|
camel/toolkits/meshy_toolkit.py
CHANGED
|
@@ -117,9 +117,9 @@ class MeshyToolkit(BaseToolkit):
|
|
|
117
117
|
Args:
|
|
118
118
|
task_id (str): The ID of the task to monitor.
|
|
119
119
|
polling_interval (int): Seconds to wait between status checks.
|
|
120
|
-
(default
|
|
120
|
+
(default: :obj:`10`)
|
|
121
121
|
timeout (int): Maximum seconds to wait before timing out.
|
|
122
|
-
(default
|
|
122
|
+
(default: :obj:`3600`)
|
|
123
123
|
|
|
124
124
|
Returns:
|
|
125
125
|
Dict[str, Any]: Final response from the API when task completes.
|
camel/toolkits/search_toolkit.py
CHANGED
|
@@ -181,7 +181,7 @@ class SearchToolkit(BaseToolkit):
|
|
|
181
181
|
country (str): The search query country where results come from.
|
|
182
182
|
The country string is limited to 2 character country codes of
|
|
183
183
|
supported countries. For a list of supported values, see
|
|
184
|
-
Country Codes. (default
|
|
184
|
+
Country Codes. (default: :obj:`US `)
|
|
185
185
|
search_lang (str): The search language preference. The 2 or more
|
|
186
186
|
character language code for which search results are provided.
|
|
187
187
|
For a list of possible values, see Language Codes.
|
|
@@ -416,7 +416,7 @@ class SearchToolkit(BaseToolkit):
|
|
|
416
416
|
query (str): The query to send to Wolfram Alpha.
|
|
417
417
|
is_detailed (bool): Whether to include additional details
|
|
418
418
|
including step by step information in the result.
|
|
419
|
-
(default
|
|
419
|
+
(default: :obj:`False`)
|
|
420
420
|
|
|
421
421
|
Returns:
|
|
422
422
|
Union[str, Dict[str, Any]]: The result from Wolfram Alpha.
|
camel/types/enums.py
CHANGED
|
@@ -34,6 +34,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
34
34
|
GPT_4_TURBO = "gpt-4-turbo"
|
|
35
35
|
GPT_4O = "gpt-4o"
|
|
36
36
|
GPT_4O_MINI = "gpt-4o-mini"
|
|
37
|
+
O1 = "o1"
|
|
37
38
|
O1_PREVIEW = "o1-preview"
|
|
38
39
|
O1_MINI = "o1-mini"
|
|
39
40
|
|
|
@@ -166,6 +167,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
166
167
|
ModelType.GPT_4_TURBO,
|
|
167
168
|
ModelType.GPT_4O,
|
|
168
169
|
ModelType.GPT_4O_MINI,
|
|
170
|
+
ModelType.O1,
|
|
169
171
|
ModelType.O1_PREVIEW,
|
|
170
172
|
ModelType.O1_MINI,
|
|
171
173
|
}
|
|
@@ -452,6 +454,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
452
454
|
}:
|
|
453
455
|
return 131_072
|
|
454
456
|
elif self in {
|
|
457
|
+
ModelType.O1,
|
|
455
458
|
ModelType.CLAUDE_2_1,
|
|
456
459
|
ModelType.CLAUDE_3_OPUS,
|
|
457
460
|
ModelType.CLAUDE_3_SONNET,
|
camel/utils/commons.py
CHANGED
|
@@ -12,7 +12,6 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import importlib
|
|
15
|
-
import logging
|
|
16
15
|
import os
|
|
17
16
|
import platform
|
|
18
17
|
import re
|
|
@@ -40,19 +39,14 @@ import pydantic
|
|
|
40
39
|
import requests
|
|
41
40
|
from pydantic import BaseModel
|
|
42
41
|
|
|
43
|
-
from camel.logger import get_logger
|
|
44
42
|
from camel.types import TaskType
|
|
45
43
|
|
|
46
44
|
from .constants import Constants
|
|
47
45
|
|
|
48
46
|
F = TypeVar('F', bound=Callable[..., Any])
|
|
49
47
|
|
|
50
|
-
logger = get_logger(__name__)
|
|
51
48
|
|
|
52
|
-
|
|
53
|
-
def print_text_animated(
|
|
54
|
-
text, delay: float = 0.02, end: str = "", log_level: int = logging.INFO
|
|
55
|
-
):
|
|
49
|
+
def print_text_animated(text, delay: float = 0.02, end: str = ""):
|
|
56
50
|
r"""Prints the given text with an animated effect.
|
|
57
51
|
|
|
58
52
|
Args:
|
|
@@ -61,22 +55,10 @@ def print_text_animated(
|
|
|
61
55
|
(default: :obj:`0.02`)
|
|
62
56
|
end (str, optional): The end character to print after each
|
|
63
57
|
character of text. (default: :obj:`""`)
|
|
64
|
-
log_level (int, optional): The log level to use.
|
|
65
|
-
See https://docs.python.org/3/library/logging.html#levels
|
|
66
|
-
(default: :obj:`logging.INFO`)
|
|
67
58
|
"""
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
for char in text:
|
|
73
|
-
print(char, end=end, flush=True)
|
|
74
|
-
time.sleep(delay)
|
|
75
|
-
# Close the log entry
|
|
76
|
-
logger.log(log_level, '')
|
|
77
|
-
else:
|
|
78
|
-
# This may be relevant for logging frameworks
|
|
79
|
-
logger.log(log_level, text)
|
|
59
|
+
for char in text:
|
|
60
|
+
print(char, end=end, flush=True)
|
|
61
|
+
time.sleep(delay)
|
|
80
62
|
|
|
81
63
|
|
|
82
64
|
def get_prompt_template_key_words(template: str) -> Set[str]:
|
camel/utils/token_counting.py
CHANGED
|
@@ -63,6 +63,7 @@ def get_model_encoding(value_for_tiktoken: str):
|
|
|
63
63
|
encoding = tiktoken.encoding_for_model(value_for_tiktoken)
|
|
64
64
|
except KeyError:
|
|
65
65
|
if value_for_tiktoken in [
|
|
66
|
+
ModelType.O1.value,
|
|
66
67
|
ModelType.O1_MINI.value,
|
|
67
68
|
ModelType.O1_PREVIEW.value,
|
|
68
69
|
]:
|
|
@@ -144,12 +145,19 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
144
145
|
num_tokens += self.tokens_per_message
|
|
145
146
|
for key, value in message.items():
|
|
146
147
|
if not isinstance(value, list):
|
|
147
|
-
num_tokens += len(
|
|
148
|
+
num_tokens += len(
|
|
149
|
+
self.encoding.encode(str(value), disallowed_special=())
|
|
150
|
+
)
|
|
148
151
|
else:
|
|
149
152
|
for item in value:
|
|
150
153
|
if item["type"] == "text":
|
|
151
154
|
num_tokens += len(
|
|
152
|
-
self.encoding.encode(
|
|
155
|
+
self.encoding.encode(
|
|
156
|
+
str(
|
|
157
|
+
item["text"],
|
|
158
|
+
),
|
|
159
|
+
disallowed_special=(),
|
|
160
|
+
)
|
|
153
161
|
)
|
|
154
162
|
elif item["type"] == "image_url":
|
|
155
163
|
image_str: str = item["image_url"]["url"]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.14
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -53,6 +53,7 @@ Requires-Dist: e2b-code-interpreter (>=1.0.3,<2.0.0) ; extra == "tools" or extra
|
|
|
53
53
|
Requires-Dist: eval-type-backport (==0.2.0)
|
|
54
54
|
Requires-Dist: ffmpeg-python (>=0.2.0,<0.3.0) ; extra == "tools" or extra == "all"
|
|
55
55
|
Requires-Dist: firecrawl-py (>=1.0.0,<2.0.0) ; extra == "tools" or extra == "all"
|
|
56
|
+
Requires-Dist: fish-audio-sdk (>=2024.12.5,<2025.0.0) ; extra == "model-platforms" or extra == "all"
|
|
56
57
|
Requires-Dist: google-cloud-storage (>=2.18.0,<3.0.0) ; extra == "object-storages" or extra == "all"
|
|
57
58
|
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
58
59
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
@@ -67,16 +68,16 @@ Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
|
67
68
|
Requires-Dist: nebula3-python (==3.8.2) ; extra == "rag" or extra == "graph-storages" or extra == "all"
|
|
68
69
|
Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "rag" or extra == "graph-storages" or extra == "all"
|
|
69
70
|
Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
|
|
70
|
-
Requires-Dist: nltk (==3.8.1) ; extra == "tools" or extra == "all"
|
|
71
71
|
Requires-Dist: notion-client (>=2.2.1,<3.0.0) ; extra == "tools" or extra == "all"
|
|
72
72
|
Requires-Dist: numpy (>=1,<2)
|
|
73
|
-
Requires-Dist: openai (>=1.
|
|
73
|
+
Requires-Dist: openai (>=1.58.1,<2.0.0)
|
|
74
74
|
Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
|
|
75
75
|
Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
76
|
+
Requires-Dist: outlines (>=0.1.7,<0.2.0) ; extra == "tools" or extra == "all"
|
|
76
77
|
Requires-Dist: pandoc
|
|
77
78
|
Requires-Dist: pathlib (>=1.0.1,<2.0.0)
|
|
78
79
|
Requires-Dist: pdfplumber (>=0.11.0,<0.12.0) ; extra == "tools" or extra == "all"
|
|
79
|
-
Requires-Dist: pillow (>=
|
|
80
|
+
Requires-Dist: pillow (>=11.0.0,<12.0.0) ; extra == "tools" or extra == "all"
|
|
80
81
|
Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
|
|
81
82
|
Requires-Dist: praw (>=7.7.1,<8.0.0) ; extra == "tools" or extra == "all"
|
|
82
83
|
Requires-Dist: protobuf (>=4,<5)
|
|
@@ -103,12 +104,12 @@ Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
|
|
|
103
104
|
Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
104
105
|
Requires-Dist: stripe (>=11.3.0,<12.0.0) ; extra == "tools" or extra == "all"
|
|
105
106
|
Requires-Dist: tavily-python (>=0.5.0,<0.6.0) ; extra == "search-tools" or extra == "all"
|
|
106
|
-
Requires-Dist: textblob (>=0.
|
|
107
|
+
Requires-Dist: textblob (>=0.17.1,<0.18.0) ; extra == "tools" or extra == "all"
|
|
107
108
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
|
108
109
|
Requires-Dist: torch (==2.2.1) ; (platform_system == "Darwin" and platform_machine != "arm64") and (extra == "huggingface-agent" or extra == "all")
|
|
109
110
|
Requires-Dist: torch (>=2,<3) ; (platform_system != "Darwin" or platform_machine == "arm64") and (extra == "huggingface-agent" or extra == "all")
|
|
110
111
|
Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
111
|
-
Requires-Dist: unstructured[all-docs] (
|
|
112
|
+
Requires-Dist: unstructured[all-docs] (==0.16.11) ; extra == "rag" or extra == "tools" or extra == "all" or extra == "all"
|
|
112
113
|
Requires-Dist: wikipedia (>=1,<2) ; extra == "search-tools" or extra == "tools" or extra == "all"
|
|
113
114
|
Requires-Dist: wolframalpha (>=5.0.0,<6.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
|
|
114
115
|
Requires-Dist: yt-dlp (>=2024.11.4,<2025.0.0) ; extra == "tools" or extra == "all"
|
|
@@ -262,7 +263,7 @@ conda create --name camel python=3.10
|
|
|
262
263
|
conda activate camel
|
|
263
264
|
|
|
264
265
|
# Clone github repo
|
|
265
|
-
git clone -b v0.2.
|
|
266
|
+
git clone -b v0.2.14 https://github.com/camel-ai/camel.git
|
|
266
267
|
|
|
267
268
|
# Change directory into project directory
|
|
268
269
|
cd camel
|
|
@@ -441,6 +442,8 @@ Practical guides and tutorials for implementing specific functionalities in CAME
|
|
|
441
442
|
| **[3 Ways to Ingest Data from Websites with Firecrawl](https://docs.camel-ai.org/cookbooks/ingest_data_from_websites_with_Firecrawl.html)** | Explore three methods for extracting and processing data from websites using Firecrawl. |
|
|
442
443
|
| **[Data Deneration with CAMEL and Finetuning with Unsloth](https://docs.camel-ai.org/cookbooks/sft_data_generation_and_unsloth_finetuning.html)** | Learn how to generate data with CAMEL and fine-tune models effectively with Unsloth. |
|
|
443
444
|
| **[Customer Service Discord Bot with Agentic RAG](https://docs.camel-ai.org/cookbooks/customer_service_Discord_bot_with_agentic_RAG.html)** | Learn how to build a robust customer service bot for Discord using Agentic RAG. |
|
|
445
|
+
| **[Create AI Agents that work with your PDFs using Chunkr & Mistral AI](https://docs.camel-ai.org/cookbooks/agent_with_chunkr_for_pdf_parsing.html)** | Learn how to create AI agents that work with your PDFs using Chunkr and Mistral AI. |
|
|
446
|
+
| **[Data Gen with Real Function Calls and Hermes Format](https://docs.camel-ai.org/cookbooks/data_gen_with_real_function_calls_and_hermes_format.html)** | Explore how to generate data with real function calls and the Hermes format. |
|
|
444
447
|
|
|
445
448
|
## Utilize Various LLMs as Backends
|
|
446
449
|
|
|
@@ -480,10 +483,10 @@ We implemented amazing research ideas from other works for you to build, compare
|
|
|
480
483
|
We warmly invite you to use CAMEL for your impactful research.
|
|
481
484
|
|
|
482
485
|
## News
|
|
483
|
-
📢 Added
|
|
484
|
-
-
|
|
485
|
-
- Integrated
|
|
486
|
-
-
|
|
486
|
+
📢 Added support for Qwen models, Deepseek models to the 🐫 CAMEL framework!. (Nov 28, 2024)
|
|
487
|
+
- Integrate SGLang into the 🐫 CAMEL framework. (Dec, 13, 2024)
|
|
488
|
+
- Integrated Reward Model into the 🐫 CAMEL framework. (Dec, 13, 2024)
|
|
489
|
+
- Added GAIA Benchmark! (Dec, 09, 2024)
|
|
487
490
|
- ...
|
|
488
491
|
- Released AI Society and Code dataset (April 2, 2023)
|
|
489
492
|
- Initial release of `CAMEL` python library (March 21, 2023)
|