camel-ai 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +14 -2
- camel/benchmarks/__init__.py +18 -0
- camel/benchmarks/base.py +152 -0
- camel/benchmarks/gaia.py +478 -0
- camel/configs/__init__.py +3 -0
- camel/configs/ollama_config.py +4 -2
- camel/configs/sglang_config.py +71 -0
- camel/data_collector/__init__.py +19 -0
- camel/data_collector/alpaca_collector.py +127 -0
- camel/data_collector/base.py +211 -0
- camel/data_collector/sharegpt_collector.py +205 -0
- camel/datahubs/__init__.py +23 -0
- camel/datahubs/base.py +136 -0
- camel/datahubs/huggingface.py +433 -0
- camel/datahubs/models.py +22 -0
- camel/embeddings/openai_compatible_embedding.py +1 -1
- camel/embeddings/openai_embedding.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/e2b_interpreter.py +136 -0
- camel/loaders/__init__.py +3 -1
- camel/loaders/base_io.py +41 -41
- camel/messages/__init__.py +2 -0
- camel/messages/base.py +5 -5
- camel/models/__init__.py +4 -0
- camel/models/anthropic_model.py +15 -5
- camel/models/azure_openai_model.py +1 -1
- camel/models/base_model.py +28 -0
- camel/models/deepseek_model.py +1 -1
- camel/models/fish_audio_model.py +146 -0
- camel/models/gemini_model.py +1 -1
- camel/models/groq_model.py +2 -2
- camel/models/model_factory.py +3 -0
- camel/models/nemotron_model.py +1 -1
- camel/models/nvidia_model.py +1 -1
- camel/models/ollama_model.py +13 -1
- camel/models/openai_compatible_model.py +1 -1
- camel/models/openai_model.py +1 -27
- camel/models/qwen_model.py +1 -1
- camel/models/reward/__init__.py +22 -0
- camel/models/reward/base_reward_model.py +58 -0
- camel/models/reward/evaluator.py +63 -0
- camel/models/reward/nemotron_model.py +112 -0
- camel/models/samba_model.py +1 -1
- camel/models/sglang_model.py +225 -0
- camel/models/togetherai_model.py +1 -1
- camel/models/vllm_model.py +2 -2
- camel/models/yi_model.py +1 -1
- camel/models/zhipuai_model.py +1 -1
- camel/personas/persona_hub.py +2 -2
- camel/runtime/configs.py +12 -12
- camel/runtime/docker_runtime.py +7 -7
- camel/runtime/llm_guard_runtime.py +3 -3
- camel/runtime/remote_http_runtime.py +5 -5
- camel/runtime/utils/function_risk_toolkit.py +1 -1
- camel/runtime/utils/ignore_risk_toolkit.py +2 -2
- camel/schemas/openai_converter.py +2 -2
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +2 -2
- camel/societies/workforce/workforce.py +3 -3
- camel/storages/object_storages/amazon_s3.py +2 -2
- camel/storages/object_storages/azure_blob.py +2 -2
- camel/storages/object_storages/google_cloud.py +2 -2
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/arxiv_toolkit.py +6 -6
- camel/toolkits/ask_news_toolkit.py +2 -2
- camel/toolkits/code_execution.py +5 -1
- camel/toolkits/function_tool.py +41 -0
- camel/toolkits/github_toolkit.py +3 -3
- camel/toolkits/google_scholar_toolkit.py +16 -2
- camel/toolkits/math_toolkit.py +47 -16
- camel/toolkits/meshy_toolkit.py +2 -2
- camel/toolkits/search_toolkit.py +155 -3
- camel/toolkits/stripe_toolkit.py +273 -0
- camel/types/__init__.py +2 -0
- camel/types/enums.py +27 -2
- camel/utils/token_counting.py +31 -12
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/METADATA +24 -14
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/RECORD +81 -61
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
import os
|
|
18
|
+
from typing import List
|
|
19
|
+
|
|
20
|
+
from camel.toolkits import FunctionTool
|
|
21
|
+
from camel.toolkits.base import BaseToolkit
|
|
22
|
+
from camel.utils import api_keys_required
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class StripeToolkit(BaseToolkit):
|
|
26
|
+
r"""A class representing a toolkit for Stripe operations.
|
|
27
|
+
|
|
28
|
+
This toolkit provides methods to interact with the Stripe API,
|
|
29
|
+
allowing users to operate stripe core resources, including Customer,
|
|
30
|
+
Balance, BalanceTransaction, Payment, Refund
|
|
31
|
+
|
|
32
|
+
Use the Developers Dashboard https://dashboard.stripe.com/test/apikeys to
|
|
33
|
+
create an API keys as STRIPE_API_KEY.
|
|
34
|
+
|
|
35
|
+
Attributes:
|
|
36
|
+
logger (Logger): a logger to write logs.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
@api_keys_required("STRIPE_API_KEY")
|
|
40
|
+
def __init__(self, retries: int = 3):
|
|
41
|
+
r"""Initializes the StripeToolkit with the specified number of
|
|
42
|
+
retries.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
retries (int,optional): Number of times to retry the request in
|
|
46
|
+
case of failure. (default: :obj:`3`)
|
|
47
|
+
"""
|
|
48
|
+
import stripe
|
|
49
|
+
|
|
50
|
+
stripe.max_network_retries = retries
|
|
51
|
+
stripe.log = 'info'
|
|
52
|
+
self.logger = logging.getLogger(__name__)
|
|
53
|
+
self.logger.setLevel(logging.INFO)
|
|
54
|
+
handler = logging.StreamHandler()
|
|
55
|
+
formatter = logging.Formatter(
|
|
56
|
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
57
|
+
)
|
|
58
|
+
handler.setFormatter(formatter)
|
|
59
|
+
if not self.logger.handlers:
|
|
60
|
+
self.logger.addHandler(handler)
|
|
61
|
+
stripe.api_key = os.environ.get("STRIPE_API_KEY")
|
|
62
|
+
|
|
63
|
+
def customer_get(self, customer_id: str) -> str:
|
|
64
|
+
r"""Retrieve a customer by ID.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
customer_id (str): The ID of the customer to retrieve.
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
str: The customer data as a str.
|
|
71
|
+
"""
|
|
72
|
+
import stripe
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
self.logger.info(f"Retrieving customer with ID: {customer_id}")
|
|
76
|
+
customer = stripe.Customer.retrieve(customer_id)
|
|
77
|
+
self.logger.info(f"Retrieved customer: {customer.id}")
|
|
78
|
+
json_string = json.dumps(customer)
|
|
79
|
+
return json_string
|
|
80
|
+
except Exception as e:
|
|
81
|
+
return self.handle_exception("customer_get", e)
|
|
82
|
+
|
|
83
|
+
def customer_list(self, limit: int = 100) -> str:
|
|
84
|
+
r"""List customers.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
limit (int, optional): Number of customers to retrieve. (default:
|
|
88
|
+
:obj:`100`)
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
str: An output str if successful, or an error message string if
|
|
92
|
+
failed.
|
|
93
|
+
"""
|
|
94
|
+
import stripe
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
self.logger.info(f"Listing customers with limit={limit}")
|
|
98
|
+
customers = stripe.Customer.list(limit=limit).data
|
|
99
|
+
self.logger.info(
|
|
100
|
+
f"Successfully retrieved {len(customers)} customers."
|
|
101
|
+
)
|
|
102
|
+
return json.dumps([customer for customer in customers])
|
|
103
|
+
except Exception as e:
|
|
104
|
+
return self.handle_exception("customer_list", e)
|
|
105
|
+
|
|
106
|
+
def balance_get(self) -> str:
|
|
107
|
+
r"""Retrieve your account balance.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
str: A str containing the account balance if successful, or an
|
|
111
|
+
error message string if failed.
|
|
112
|
+
"""
|
|
113
|
+
import stripe
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
self.logger.info("Retrieving account balance.")
|
|
117
|
+
balance = stripe.Balance.retrieve()
|
|
118
|
+
self.logger.info(
|
|
119
|
+
f"Successfully retrieved account balance: {balance}."
|
|
120
|
+
)
|
|
121
|
+
return json.dumps(balance)
|
|
122
|
+
except Exception as e:
|
|
123
|
+
return self.handle_exception("balance_get", e)
|
|
124
|
+
|
|
125
|
+
def balance_transaction_list(self, limit: int = 100) -> str:
|
|
126
|
+
r"""List your balance transactions.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
limit (int, optional): Number of balance transactions to retrieve.
|
|
130
|
+
(default::obj:`100`)
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
str: A list of balance transaction data if successful, or an error
|
|
134
|
+
message string if failed.
|
|
135
|
+
"""
|
|
136
|
+
import stripe
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
self.logger.info(
|
|
140
|
+
f"Listing balance transactions with limit={limit}"
|
|
141
|
+
)
|
|
142
|
+
transactions = stripe.BalanceTransaction.list(limit=limit).data
|
|
143
|
+
self.logger.info(
|
|
144
|
+
f"Successfully retrieved {len(transactions)} "
|
|
145
|
+
"balance transactions."
|
|
146
|
+
)
|
|
147
|
+
return json.dumps([transaction for transaction in transactions])
|
|
148
|
+
except Exception as e:
|
|
149
|
+
return self.handle_exception("balance_transaction_list", e)
|
|
150
|
+
|
|
151
|
+
def payment_get(self, payment_id: str) -> str:
|
|
152
|
+
r"""Retrieve a payment by ID.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
payment_id (str): The ID of the payment to retrieve.
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
str:The payment data as a str if successful, or an error message
|
|
159
|
+
string if failed.
|
|
160
|
+
"""
|
|
161
|
+
import stripe
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
self.logger.info(f"Retrieving payment with ID: {payment_id}")
|
|
165
|
+
payment = stripe.PaymentIntent.retrieve(payment_id)
|
|
166
|
+
self.logger.info(f"Retrieved payment: {payment.id}")
|
|
167
|
+
return json.dumps(payment)
|
|
168
|
+
except Exception as e:
|
|
169
|
+
return self.handle_exception("payment_get", e)
|
|
170
|
+
|
|
171
|
+
def payment_list(self, limit: int = 100) -> str:
|
|
172
|
+
r"""List payments.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
limit (int, optional): Number of payments to retrieve.
|
|
176
|
+
(default::obj:`100`)
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
str: A list of payment data if successful, or an error message
|
|
180
|
+
string if failed.
|
|
181
|
+
"""
|
|
182
|
+
import stripe
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
self.logger.info(f"Listing payments with limit={limit}")
|
|
186
|
+
payments = stripe.PaymentIntent.list(limit=limit).data
|
|
187
|
+
self.logger.info(
|
|
188
|
+
f"Successfully retrieved {len(payments)} payments."
|
|
189
|
+
)
|
|
190
|
+
return json.dumps([payment for payment in payments])
|
|
191
|
+
except Exception as e:
|
|
192
|
+
return self.handle_exception("payment_list", e)
|
|
193
|
+
|
|
194
|
+
def refund_get(self, refund_id: str) -> str:
|
|
195
|
+
r"""Retrieve a refund by ID.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
refund_id (str): The ID of the refund to retrieve.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
str: The refund data as a str if successful, or an error message
|
|
202
|
+
string if failed.
|
|
203
|
+
"""
|
|
204
|
+
import stripe
|
|
205
|
+
|
|
206
|
+
try:
|
|
207
|
+
self.logger.info(f"Retrieving refund with ID: {refund_id}")
|
|
208
|
+
refund = stripe.Refund.retrieve(refund_id)
|
|
209
|
+
self.logger.info(f"Retrieved refund: {refund.id}")
|
|
210
|
+
return json.dumps(refund)
|
|
211
|
+
except Exception as e:
|
|
212
|
+
return self.handle_exception("refund_get", e)
|
|
213
|
+
|
|
214
|
+
def refund_list(self, limit: int = 100) -> str:
|
|
215
|
+
r"""List refunds.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
limit (int, optional): Number of refunds to retrieve.
|
|
219
|
+
(default::obj:`100`)
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
str: A list of refund data as a str if successful, or an error
|
|
223
|
+
message string if failed.
|
|
224
|
+
"""
|
|
225
|
+
import stripe
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
self.logger.info(f"Listing refunds with limit={limit}")
|
|
229
|
+
refunds = stripe.Refund.list(limit=limit).data
|
|
230
|
+
self.logger.info(f"Successfully retrieved {len(refunds)} refunds.")
|
|
231
|
+
return json.dumps([refund for refund in refunds])
|
|
232
|
+
except Exception as e:
|
|
233
|
+
return self.handle_exception("refund_list", e)
|
|
234
|
+
|
|
235
|
+
def handle_exception(self, func_name: str, error: Exception) -> str:
|
|
236
|
+
r"""Handle exceptions by logging and returning an error message.
|
|
237
|
+
|
|
238
|
+
Args:
|
|
239
|
+
func_name (str): The name of the function where the exception
|
|
240
|
+
occurred.
|
|
241
|
+
error (Exception): The exception instance.
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
str: An error message string.
|
|
245
|
+
"""
|
|
246
|
+
from stripe import StripeError
|
|
247
|
+
|
|
248
|
+
if isinstance(error, StripeError):
|
|
249
|
+
message = error.user_message or str(error)
|
|
250
|
+
self.logger.error(f"Stripe error in {func_name}: {message}")
|
|
251
|
+
return f"Stripe error in {func_name}: {message}"
|
|
252
|
+
else:
|
|
253
|
+
self.logger.error(f"Unexpected error in {func_name}: {error!s}")
|
|
254
|
+
return f"Unexpected error in {func_name}: {error!s}"
|
|
255
|
+
|
|
256
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
257
|
+
r"""Returns a list of FunctionTool objects representing the
|
|
258
|
+
functions in the toolkit.
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
List[FunctionTool]: A list of FunctionTool objects for the
|
|
262
|
+
toolkit methods.
|
|
263
|
+
"""
|
|
264
|
+
return [
|
|
265
|
+
FunctionTool(self.customer_get),
|
|
266
|
+
FunctionTool(self.customer_list),
|
|
267
|
+
FunctionTool(self.balance_get),
|
|
268
|
+
FunctionTool(self.balance_transaction_list),
|
|
269
|
+
FunctionTool(self.payment_get),
|
|
270
|
+
FunctionTool(self.payment_list),
|
|
271
|
+
FunctionTool(self.refund_get),
|
|
272
|
+
FunctionTool(self.refund_list),
|
|
273
|
+
]
|
camel/types/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
from .enums import (
|
|
15
15
|
AudioModelType,
|
|
16
16
|
EmbeddingModelType,
|
|
17
|
+
HuggingFaceRepoType,
|
|
17
18
|
ModelPlatformType,
|
|
18
19
|
ModelType,
|
|
19
20
|
OpenAIBackendRole,
|
|
@@ -73,4 +74,5 @@ __all__ = [
|
|
|
73
74
|
'NOT_GIVEN',
|
|
74
75
|
'NotGiven',
|
|
75
76
|
'ParsedChatCompletion',
|
|
77
|
+
'HuggingFaceRepoType',
|
|
76
78
|
]
|
camel/types/enums.py
CHANGED
|
@@ -41,9 +41,12 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
41
41
|
GLM_4V = 'glm-4v'
|
|
42
42
|
GLM_3_TURBO = "glm-3-turbo"
|
|
43
43
|
|
|
44
|
+
# Groq platform models
|
|
44
45
|
GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
|
|
45
46
|
GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
|
|
46
47
|
GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
|
|
48
|
+
GROQ_LLAMA_3_3_70B = "llama-3.3-70b-versatile"
|
|
49
|
+
GROQ_LLAMA_3_3_70B_PREVIEW = "llama-3.3-70b-specdec"
|
|
47
50
|
GROQ_LLAMA_3_8B = "llama3-8b-8192"
|
|
48
51
|
GROQ_LLAMA_3_70B = "llama3-70b-8192"
|
|
49
52
|
GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
|
|
@@ -59,10 +62,11 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
59
62
|
CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
|
|
60
63
|
|
|
61
64
|
# Claude3 models
|
|
62
|
-
CLAUDE_3_OPUS = "claude-3-opus-
|
|
65
|
+
CLAUDE_3_OPUS = "claude-3-opus-latest"
|
|
63
66
|
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
|
|
64
67
|
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
|
65
|
-
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-
|
|
68
|
+
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
|
|
69
|
+
CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest"
|
|
66
70
|
|
|
67
71
|
# Nvidia models
|
|
68
72
|
NVIDIA_NEMOTRON_340B_INSTRUCT = "nvidia/nemotron-4-340b-instruct"
|
|
@@ -76,6 +80,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
76
80
|
NVIDIA_LLAMA3_1_405B_INSTRUCT = "meta/llama-3.1-405b-instruct"
|
|
77
81
|
NVIDIA_LLAMA3_2_1B_INSTRUCT = "meta/llama-3.2-1b-instruct"
|
|
78
82
|
NVIDIA_LLAMA3_2_3B_INSTRUCT = "meta/llama-3.2-3b-instruct"
|
|
83
|
+
NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"
|
|
79
84
|
|
|
80
85
|
# Gemini models
|
|
81
86
|
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
|
@@ -202,6 +207,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
202
207
|
ModelType.CLAUDE_3_SONNET,
|
|
203
208
|
ModelType.CLAUDE_3_HAIKU,
|
|
204
209
|
ModelType.CLAUDE_3_5_SONNET,
|
|
210
|
+
ModelType.CLAUDE_3_5_HAIKU,
|
|
205
211
|
}
|
|
206
212
|
|
|
207
213
|
@property
|
|
@@ -211,6 +217,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
211
217
|
ModelType.GROQ_LLAMA_3_1_8B,
|
|
212
218
|
ModelType.GROQ_LLAMA_3_1_70B,
|
|
213
219
|
ModelType.GROQ_LLAMA_3_1_405B,
|
|
220
|
+
ModelType.GROQ_LLAMA_3_3_70B,
|
|
221
|
+
ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
|
|
214
222
|
ModelType.GROQ_LLAMA_3_8B,
|
|
215
223
|
ModelType.GROQ_LLAMA_3_70B,
|
|
216
224
|
ModelType.GROQ_MIXTRAL_8_7B,
|
|
@@ -249,6 +257,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
249
257
|
ModelType.NVIDIA_LLAMA3_1_405B_INSTRUCT,
|
|
250
258
|
ModelType.NVIDIA_LLAMA3_2_1B_INSTRUCT,
|
|
251
259
|
ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
|
|
260
|
+
ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
|
|
252
261
|
}
|
|
253
262
|
|
|
254
263
|
@property
|
|
@@ -362,6 +371,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
362
371
|
ModelType.GPT_4,
|
|
363
372
|
ModelType.GROQ_LLAMA_3_8B,
|
|
364
373
|
ModelType.GROQ_LLAMA_3_70B,
|
|
374
|
+
ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
|
|
365
375
|
ModelType.GROQ_GEMMA_7B_IT,
|
|
366
376
|
ModelType.GROQ_GEMMA_2_9B_IT,
|
|
367
377
|
ModelType.GLM_3_TURBO,
|
|
@@ -428,6 +438,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
428
438
|
ModelType.NVIDIA_LLAMA3_1_405B_INSTRUCT,
|
|
429
439
|
ModelType.NVIDIA_LLAMA3_2_1B_INSTRUCT,
|
|
430
440
|
ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
|
|
441
|
+
ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
|
|
442
|
+
ModelType.GROQ_LLAMA_3_3_70B,
|
|
431
443
|
}:
|
|
432
444
|
return 128_000
|
|
433
445
|
elif self in {
|
|
@@ -445,6 +457,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
445
457
|
ModelType.CLAUDE_3_SONNET,
|
|
446
458
|
ModelType.CLAUDE_3_HAIKU,
|
|
447
459
|
ModelType.CLAUDE_3_5_SONNET,
|
|
460
|
+
ModelType.CLAUDE_3_5_HAIKU,
|
|
448
461
|
ModelType.YI_MEDIUM_200K,
|
|
449
462
|
}:
|
|
450
463
|
return 200_000
|
|
@@ -611,6 +624,7 @@ class ModelPlatformType(Enum):
|
|
|
611
624
|
QWEN = "tongyi-qianwen"
|
|
612
625
|
NVIDIA = "nvidia"
|
|
613
626
|
DEEPSEEK = "deepseek"
|
|
627
|
+
SGLANG = "sglang"
|
|
614
628
|
|
|
615
629
|
@property
|
|
616
630
|
def is_openai(self) -> bool:
|
|
@@ -642,6 +656,11 @@ class ModelPlatformType(Enum):
|
|
|
642
656
|
r"""Returns whether this platform is vllm."""
|
|
643
657
|
return self is ModelPlatformType.VLLM
|
|
644
658
|
|
|
659
|
+
@property
|
|
660
|
+
def is_sglang(self) -> bool:
|
|
661
|
+
r"""Returns whether this platform is sglang."""
|
|
662
|
+
return self is ModelPlatformType.SGLANG
|
|
663
|
+
|
|
645
664
|
@property
|
|
646
665
|
def is_together(self) -> bool:
|
|
647
666
|
r"""Returns whether this platform is together."""
|
|
@@ -749,3 +768,9 @@ class JinaReturnFormat(Enum):
|
|
|
749
768
|
MARKDOWN = "markdown"
|
|
750
769
|
HTML = "html"
|
|
751
770
|
TEXT = "text"
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
class HuggingFaceRepoType(str, Enum):
|
|
774
|
+
DATASET = "dataset"
|
|
775
|
+
MODEL = "model"
|
|
776
|
+
SPACE = "space"
|
camel/utils/token_counting.py
CHANGED
|
@@ -144,12 +144,19 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
144
144
|
num_tokens += self.tokens_per_message
|
|
145
145
|
for key, value in message.items():
|
|
146
146
|
if not isinstance(value, list):
|
|
147
|
-
num_tokens += len(
|
|
147
|
+
num_tokens += len(
|
|
148
|
+
self.encoding.encode(str(value), disallowed_special=())
|
|
149
|
+
)
|
|
148
150
|
else:
|
|
149
151
|
for item in value:
|
|
150
152
|
if item["type"] == "text":
|
|
151
153
|
num_tokens += len(
|
|
152
|
-
self.encoding.encode(
|
|
154
|
+
self.encoding.encode(
|
|
155
|
+
str(
|
|
156
|
+
item["text"],
|
|
157
|
+
),
|
|
158
|
+
disallowed_special=(),
|
|
159
|
+
)
|
|
153
160
|
)
|
|
154
161
|
elif item["type"] == "image_url":
|
|
155
162
|
image_str: str = item["image_url"]["url"]
|
|
@@ -222,13 +229,18 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
222
229
|
|
|
223
230
|
class AnthropicTokenCounter(BaseTokenCounter):
|
|
224
231
|
@dependencies_required('anthropic')
|
|
225
|
-
def __init__(self):
|
|
226
|
-
r"""Constructor for the token counter for Anthropic models.
|
|
232
|
+
def __init__(self, model: str):
|
|
233
|
+
r"""Constructor for the token counter for Anthropic models.
|
|
234
|
+
|
|
235
|
+
Args:
|
|
236
|
+
model (str): The name of the Anthropic model being used.
|
|
237
|
+
"""
|
|
227
238
|
from anthropic import Anthropic
|
|
228
239
|
|
|
229
240
|
self.client = Anthropic()
|
|
230
|
-
self.
|
|
241
|
+
self.model = model
|
|
231
242
|
|
|
243
|
+
@dependencies_required('anthropic')
|
|
232
244
|
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
233
245
|
r"""Count number of tokens in the provided message list using
|
|
234
246
|
loaded tokenizer specific for this type of model.
|
|
@@ -240,11 +252,18 @@ class AnthropicTokenCounter(BaseTokenCounter):
|
|
|
240
252
|
Returns:
|
|
241
253
|
int: Number of tokens in the messages.
|
|
242
254
|
"""
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
255
|
+
from anthropic.types.beta import BetaMessageParam
|
|
256
|
+
|
|
257
|
+
return self.client.beta.messages.count_tokens(
|
|
258
|
+
messages=[
|
|
259
|
+
BetaMessageParam(
|
|
260
|
+
content=str(msg["content"]),
|
|
261
|
+
role="user" if msg["role"] == "user" else "assistant",
|
|
262
|
+
)
|
|
263
|
+
for msg in messages
|
|
264
|
+
],
|
|
265
|
+
model=self.model,
|
|
266
|
+
).input_tokens
|
|
248
267
|
|
|
249
268
|
|
|
250
269
|
class GeminiTokenCounter(BaseTokenCounter):
|
|
@@ -360,7 +379,7 @@ class MistralTokenCounter(BaseTokenCounter):
|
|
|
360
379
|
ModelType.MISTRAL_CODESTRAL,
|
|
361
380
|
ModelType.MISTRAL_CODESTRAL_MAMBA,
|
|
362
381
|
}
|
|
363
|
-
else self.model_type
|
|
382
|
+
else self.model_type
|
|
364
383
|
)
|
|
365
384
|
|
|
366
385
|
self.tokenizer = MistralTokenizer.from_model(model_name)
|
|
@@ -403,7 +422,7 @@ class MistralTokenCounter(BaseTokenCounter):
|
|
|
403
422
|
)
|
|
404
423
|
|
|
405
424
|
mistral_request = ChatCompletionRequest( # type: ignore[type-var]
|
|
406
|
-
model=self.model_type
|
|
425
|
+
model=self.model_type,
|
|
407
426
|
messages=[openai_msg],
|
|
408
427
|
)
|
|
409
428
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.13
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -28,12 +28,12 @@ Provides-Extra: tools
|
|
|
28
28
|
Provides-Extra: vector-databases
|
|
29
29
|
Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
|
|
30
30
|
Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
31
|
-
Requires-Dist: agentops (>=0.3.
|
|
32
|
-
Requires-Dist: anthropic (>=0.
|
|
31
|
+
Requires-Dist: agentops (>=0.3.21,<0.4.0) ; extra == "tools" or extra == "all"
|
|
32
|
+
Requires-Dist: anthropic (>=0.40.0,<0.41.0) ; extra == "model-platforms" or extra == "all"
|
|
33
33
|
Requires-Dist: apify_client (>=1.8.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
34
34
|
Requires-Dist: arxiv (>=2.1.3,<3.0.0) ; extra == "tools" or extra == "all"
|
|
35
35
|
Requires-Dist: arxiv2text (>=0.1.14,<0.2.0) ; extra == "tools" or extra == "all"
|
|
36
|
-
Requires-Dist: asknews (>=0.7.
|
|
36
|
+
Requires-Dist: asknews (>=0.7.54,<0.8.0) ; extra == "tools" or extra == "all"
|
|
37
37
|
Requires-Dist: azure-storage-blob (>=12.21.0,<13.0.0) ; extra == "object-storages" or extra == "all"
|
|
38
38
|
Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
|
|
39
39
|
Requires-Dist: botocore (>=1.35.3,<2.0.0) ; extra == "object-storages" or extra == "all"
|
|
@@ -49,12 +49,15 @@ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "runtime"
|
|
|
49
49
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
50
50
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
51
51
|
Requires-Dist: duckduckgo-search (>=6.2.12,<7.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
|
|
52
|
+
Requires-Dist: e2b-code-interpreter (>=1.0.3,<2.0.0) ; extra == "tools" or extra == "all"
|
|
52
53
|
Requires-Dist: eval-type-backport (==0.2.0)
|
|
53
54
|
Requires-Dist: ffmpeg-python (>=0.2.0,<0.3.0) ; extra == "tools" or extra == "all"
|
|
54
55
|
Requires-Dist: firecrawl-py (>=1.0.0,<2.0.0) ; extra == "tools" or extra == "all"
|
|
56
|
+
Requires-Dist: fish-audio-sdk (>=2024.12.5,<2025.0.0) ; extra == "model-platforms" or extra == "all"
|
|
55
57
|
Requires-Dist: google-cloud-storage (>=2.18.0,<3.0.0) ; extra == "object-storages" or extra == "all"
|
|
56
58
|
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
57
59
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
60
|
+
Requires-Dist: httpx (>=0.23.0,<0.27.3)
|
|
58
61
|
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
59
62
|
Requires-Dist: ipykernel (>=6.0.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
60
63
|
Requires-Dist: jsonschema (>=4,<5)
|
|
@@ -65,7 +68,7 @@ Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
|
65
68
|
Requires-Dist: nebula3-python (==3.8.2) ; extra == "rag" or extra == "graph-storages" or extra == "all"
|
|
66
69
|
Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "rag" or extra == "graph-storages" or extra == "all"
|
|
67
70
|
Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
|
|
68
|
-
Requires-Dist: nltk (==3.
|
|
71
|
+
Requires-Dist: nltk (==3.9.1) ; extra == "tools" or extra == "all"
|
|
69
72
|
Requires-Dist: notion-client (>=2.2.1,<3.0.0) ; extra == "tools" or extra == "all"
|
|
70
73
|
Requires-Dist: numpy (>=1,<2)
|
|
71
74
|
Requires-Dist: openai (>=1.45.0,<2.0.0)
|
|
@@ -74,7 +77,7 @@ Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra ==
|
|
|
74
77
|
Requires-Dist: pandoc
|
|
75
78
|
Requires-Dist: pathlib (>=1.0.1,<2.0.0)
|
|
76
79
|
Requires-Dist: pdfplumber (>=0.11.0,<0.12.0) ; extra == "tools" or extra == "all"
|
|
77
|
-
Requires-Dist: pillow (>=
|
|
80
|
+
Requires-Dist: pillow (>=11.0.0,<12.0.0) ; extra == "tools" or extra == "all"
|
|
78
81
|
Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
|
|
79
82
|
Requires-Dist: praw (>=7.7.1,<8.0.0) ; extra == "tools" or extra == "all"
|
|
80
83
|
Requires-Dist: protobuf (>=4,<5)
|
|
@@ -86,6 +89,7 @@ Requires-Dist: pymilvus (>=2.4.0,<3.0.0) ; extra == "rag" or extra == "vector-da
|
|
|
86
89
|
Requires-Dist: pyowm (>=3.3.0,<4.0.0) ; extra == "tools" or extra == "all"
|
|
87
90
|
Requires-Dist: pytest (>=7,<8) ; extra == "test"
|
|
88
91
|
Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
|
|
92
|
+
Requires-Dist: pyyaml (>=6.0.2,<7.0.0) ; extra == "tools" or extra == "all"
|
|
89
93
|
Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "rag" or extra == "vector-databases" or extra == "all"
|
|
90
94
|
Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "rag" or extra == "retrievers" or extra == "all"
|
|
91
95
|
Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
|
|
@@ -94,16 +98,18 @@ Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra ==
|
|
|
94
98
|
Requires-Dist: scholarly[tor] (==1.7.11) ; extra == "tools" or extra == "all"
|
|
95
99
|
Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "rag" or extra == "encoders" or extra == "all"
|
|
96
100
|
Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
101
|
+
Requires-Dist: sglang (>=0.4.0,<0.5.0) ; extra == "model-platforms" or extra == "all"
|
|
97
102
|
Requires-Dist: slack-bolt (>=1.20.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
98
103
|
Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
|
|
99
104
|
Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
105
|
+
Requires-Dist: stripe (>=11.3.0,<12.0.0) ; extra == "tools" or extra == "all"
|
|
100
106
|
Requires-Dist: tavily-python (>=0.5.0,<0.6.0) ; extra == "search-tools" or extra == "all"
|
|
101
|
-
Requires-Dist: textblob (>=0.
|
|
107
|
+
Requires-Dist: textblob (>=0.17.1,<0.18.0) ; extra == "tools" or extra == "all"
|
|
102
108
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
|
103
|
-
Requires-Dist: torch (==2.1
|
|
109
|
+
Requires-Dist: torch (==2.2.1) ; (platform_system == "Darwin" and platform_machine != "arm64") and (extra == "huggingface-agent" or extra == "all")
|
|
104
110
|
Requires-Dist: torch (>=2,<3) ; (platform_system != "Darwin" or platform_machine == "arm64") and (extra == "huggingface-agent" or extra == "all")
|
|
105
111
|
Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
106
|
-
Requires-Dist: unstructured[all-docs] (
|
|
112
|
+
Requires-Dist: unstructured[all-docs] (==0.16.11) ; extra == "rag" or extra == "all"
|
|
107
113
|
Requires-Dist: wikipedia (>=1,<2) ; extra == "search-tools" or extra == "tools" or extra == "all"
|
|
108
114
|
Requires-Dist: wolframalpha (>=5.0.0,<6.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
|
|
109
115
|
Requires-Dist: yt-dlp (>=2024.11.4,<2025.0.0) ; extra == "tools" or extra == "all"
|
|
@@ -257,7 +263,7 @@ conda create --name camel python=3.10
|
|
|
257
263
|
conda activate camel
|
|
258
264
|
|
|
259
265
|
# Clone github repo
|
|
260
|
-
git clone -b v0.2.
|
|
266
|
+
git clone -b v0.2.13 https://github.com/camel-ai/camel.git
|
|
261
267
|
|
|
262
268
|
# Change directory into project directory
|
|
263
269
|
cd camel
|
|
@@ -434,6 +440,10 @@ Practical guides and tutorials for implementing specific functionalities in CAME
|
|
|
434
440
|
| **[Track CAMEL Agents with AgentOps](https://docs.camel-ai.org/cookbooks/agents_tracking.html)** | Tools for tracking and managing agents in operations. |
|
|
435
441
|
| **[Create A Hackathon Judge Committee with Workforce](https://docs.camel-ai.org/cookbooks/workforce_judge_committee.html)** | Building a team of agents for collaborative judging. |
|
|
436
442
|
| **[3 Ways to Ingest Data from Websites with Firecrawl](https://docs.camel-ai.org/cookbooks/ingest_data_from_websites_with_Firecrawl.html)** | Explore three methods for extracting and processing data from websites using Firecrawl. |
|
|
443
|
+
| **[Data Deneration with CAMEL and Finetuning with Unsloth](https://docs.camel-ai.org/cookbooks/sft_data_generation_and_unsloth_finetuning.html)** | Learn how to generate data with CAMEL and fine-tune models effectively with Unsloth. |
|
|
444
|
+
| **[Customer Service Discord Bot with Agentic RAG](https://docs.camel-ai.org/cookbooks/customer_service_Discord_bot_with_agentic_RAG.html)** | Learn how to build a robust customer service bot for Discord using Agentic RAG. |
|
|
445
|
+
| **[Create AI Agents that work with your PDFs using Chunkr & Mistral AI](https://docs.camel-ai.org/cookbooks/agent_with_chunkr_for_pdf_parsing.html)** | Learn how to create AI agents that work with your PDFs using Chunkr and Mistral AI. |
|
|
446
|
+
| **[Data Gen with Real Function Calls and Hermes Format](https://docs.camel-ai.org/cookbooks/data_gen_with_real_function_calls_and_hermes_format.html)** | Explore how to generate data with real function calls and the Hermes format. |
|
|
437
447
|
|
|
438
448
|
## Utilize Various LLMs as Backends
|
|
439
449
|
|
|
@@ -473,10 +483,10 @@ We implemented amazing research ideas from other works for you to build, compare
|
|
|
473
483
|
We warmly invite you to use CAMEL for your impactful research.
|
|
474
484
|
|
|
475
485
|
## News
|
|
476
|
-
📢 Added
|
|
477
|
-
-
|
|
478
|
-
- Integrated
|
|
479
|
-
-
|
|
486
|
+
📢 Added support for Qwen models, Deepseek models to the 🐫 CAMEL framework!. (Nov 28, 2024)
|
|
487
|
+
- Integrate SGLang into the 🐫 CAMEL framework. (Dec, 13, 2024)
|
|
488
|
+
- Integrated Reward Model into the 🐫 CAMEL framework. (Dec, 13, 2024)
|
|
489
|
+
- Added GAIA Benchmark! (Dec, 09, 2024)
|
|
480
490
|
- ...
|
|
481
491
|
- Released AI Society and Code dataset (April 2, 2023)
|
|
482
492
|
- Initial release of `CAMEL` python library (March 21, 2023)
|