camel-ai 0.2.10__py3-none-any.whl → 0.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (76) hide show
  1. camel/__init__.py +6 -1
  2. camel/agents/chat_agent.py +87 -6
  3. camel/agents/deductive_reasoner_agent.py +4 -1
  4. camel/benchmarks/__init__.py +18 -0
  5. camel/benchmarks/base.py +152 -0
  6. camel/benchmarks/gaia.py +478 -0
  7. camel/configs/__init__.py +6 -0
  8. camel/configs/mistral_config.py +0 -3
  9. camel/configs/nvidia_config.py +70 -0
  10. camel/configs/ollama_config.py +4 -2
  11. camel/configs/sglang_config.py +71 -0
  12. camel/configs/vllm_config.py +10 -1
  13. camel/data_collector/__init__.py +19 -0
  14. camel/data_collector/alpaca_collector.py +127 -0
  15. camel/data_collector/base.py +211 -0
  16. camel/data_collector/sharegpt_collector.py +205 -0
  17. camel/datahubs/__init__.py +23 -0
  18. camel/datahubs/base.py +136 -0
  19. camel/datahubs/huggingface.py +433 -0
  20. camel/datahubs/models.py +22 -0
  21. camel/embeddings/vlm_embedding.py +4 -1
  22. camel/interpreters/__init__.py +2 -0
  23. camel/interpreters/docker_interpreter.py +7 -2
  24. camel/interpreters/e2b_interpreter.py +136 -0
  25. camel/interpreters/subprocess_interpreter.py +7 -2
  26. camel/loaders/__init__.py +3 -1
  27. camel/loaders/base_io.py +41 -41
  28. camel/loaders/firecrawl_reader.py +0 -3
  29. camel/logger.py +112 -0
  30. camel/messages/__init__.py +3 -1
  31. camel/messages/base.py +10 -7
  32. camel/messages/conversion/__init__.py +3 -1
  33. camel/messages/conversion/alpaca.py +122 -0
  34. camel/models/__init__.py +7 -0
  35. camel/models/anthropic_model.py +14 -4
  36. camel/models/base_model.py +28 -0
  37. camel/models/groq_model.py +1 -1
  38. camel/models/model_factory.py +6 -0
  39. camel/models/model_manager.py +212 -0
  40. camel/models/nvidia_model.py +141 -0
  41. camel/models/ollama_model.py +12 -0
  42. camel/models/openai_model.py +0 -25
  43. camel/models/reward/__init__.py +22 -0
  44. camel/models/reward/base_reward_model.py +58 -0
  45. camel/models/reward/evaluator.py +63 -0
  46. camel/models/reward/nemotron_model.py +112 -0
  47. camel/models/sglang_model.py +225 -0
  48. camel/models/vllm_model.py +1 -1
  49. camel/personas/persona_hub.py +2 -2
  50. camel/retrievers/vector_retriever.py +22 -5
  51. camel/schemas/openai_converter.py +2 -2
  52. camel/societies/babyagi_playing.py +4 -1
  53. camel/societies/workforce/role_playing_worker.py +2 -2
  54. camel/societies/workforce/single_agent_worker.py +2 -2
  55. camel/societies/workforce/workforce.py +3 -3
  56. camel/storages/object_storages/amazon_s3.py +2 -2
  57. camel/storages/object_storages/azure_blob.py +2 -2
  58. camel/storages/object_storages/google_cloud.py +2 -2
  59. camel/toolkits/__init__.py +5 -0
  60. camel/toolkits/code_execution.py +42 -4
  61. camel/toolkits/function_tool.py +41 -0
  62. camel/toolkits/human_toolkit.py +1 -0
  63. camel/toolkits/math_toolkit.py +47 -16
  64. camel/toolkits/meshy_toolkit.py +185 -0
  65. camel/toolkits/search_toolkit.py +154 -2
  66. camel/toolkits/stripe_toolkit.py +273 -0
  67. camel/toolkits/twitter_toolkit.py +3 -0
  68. camel/types/__init__.py +2 -0
  69. camel/types/enums.py +68 -10
  70. camel/utils/commons.py +22 -5
  71. camel/utils/token_counting.py +26 -11
  72. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/METADATA +13 -6
  73. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/RECORD +76 -51
  74. /camel/messages/conversion/{models.py → conversation_models.py} +0 -0
  75. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/LICENSE +0 -0
  76. {camel_ai-0.2.10.dist-info → camel_ai-0.2.12.dist-info}/WHEEL +0 -0
@@ -0,0 +1,273 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import json
16
+ import logging
17
+ import os
18
+ from typing import List
19
+
20
+ from camel.toolkits import FunctionTool
21
+ from camel.toolkits.base import BaseToolkit
22
+ from camel.utils import api_keys_required
23
+
24
+
25
+ class StripeToolkit(BaseToolkit):
26
+ r"""A class representing a toolkit for Stripe operations.
27
+
28
+ This toolkit provides methods to interact with the Stripe API,
29
+ allowing users to operate stripe core resources, including Customer,
30
+ Balance, BalanceTransaction, Payment, Refund
31
+
32
+ Use the Developers Dashboard https://dashboard.stripe.com/test/apikeys to
33
+ create an API keys as STRIPE_API_KEY.
34
+
35
+ Attributes:
36
+ logger (Logger): a logger to write logs.
37
+ """
38
+
39
+ @api_keys_required("STRIPE_API_KEY")
40
+ def __init__(self, retries: int = 3):
41
+ r"""Initializes the StripeToolkit with the specified number of
42
+ retries.
43
+
44
+ Args:
45
+ retries (int,optional): Number of times to retry the request in
46
+ case of failure. (default: :obj:`3`)
47
+ """
48
+ import stripe
49
+
50
+ stripe.max_network_retries = retries
51
+ stripe.log = 'info'
52
+ self.logger = logging.getLogger(__name__)
53
+ self.logger.setLevel(logging.INFO)
54
+ handler = logging.StreamHandler()
55
+ formatter = logging.Formatter(
56
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
57
+ )
58
+ handler.setFormatter(formatter)
59
+ if not self.logger.handlers:
60
+ self.logger.addHandler(handler)
61
+ stripe.api_key = os.environ.get("STRIPE_API_KEY")
62
+
63
+ def customer_get(self, customer_id: str) -> str:
64
+ r"""Retrieve a customer by ID.
65
+
66
+ Args:
67
+ customer_id (str): The ID of the customer to retrieve.
68
+
69
+ Returns:
70
+ str: The customer data as a str.
71
+ """
72
+ import stripe
73
+
74
+ try:
75
+ self.logger.info(f"Retrieving customer with ID: {customer_id}")
76
+ customer = stripe.Customer.retrieve(customer_id)
77
+ self.logger.info(f"Retrieved customer: {customer.id}")
78
+ json_string = json.dumps(customer)
79
+ return json_string
80
+ except Exception as e:
81
+ return self.handle_exception("customer_get", e)
82
+
83
+ def customer_list(self, limit: int = 100) -> str:
84
+ r"""List customers.
85
+
86
+ Args:
87
+ limit (int, optional): Number of customers to retrieve. (default:
88
+ :obj:`100`)
89
+
90
+ Returns:
91
+ str: An output str if successful, or an error message string if
92
+ failed.
93
+ """
94
+ import stripe
95
+
96
+ try:
97
+ self.logger.info(f"Listing customers with limit={limit}")
98
+ customers = stripe.Customer.list(limit=limit).data
99
+ self.logger.info(
100
+ f"Successfully retrieved {len(customers)} customers."
101
+ )
102
+ return json.dumps([customer for customer in customers])
103
+ except Exception as e:
104
+ return self.handle_exception("customer_list", e)
105
+
106
+ def balance_get(self) -> str:
107
+ r"""Retrieve your account balance.
108
+
109
+ Returns:
110
+ str: A str containing the account balance if successful, or an
111
+ error message string if failed.
112
+ """
113
+ import stripe
114
+
115
+ try:
116
+ self.logger.info("Retrieving account balance.")
117
+ balance = stripe.Balance.retrieve()
118
+ self.logger.info(
119
+ f"Successfully retrieved account balance: {balance}."
120
+ )
121
+ return json.dumps(balance)
122
+ except Exception as e:
123
+ return self.handle_exception("balance_get", e)
124
+
125
+ def balance_transaction_list(self, limit: int = 100) -> str:
126
+ r"""List your balance transactions.
127
+
128
+ Args:
129
+ limit (int, optional): Number of balance transactions to retrieve.
130
+ (default::obj:`100`)
131
+
132
+ Returns:
133
+ str: A list of balance transaction data if successful, or an error
134
+ message string if failed.
135
+ """
136
+ import stripe
137
+
138
+ try:
139
+ self.logger.info(
140
+ f"Listing balance transactions with limit={limit}"
141
+ )
142
+ transactions = stripe.BalanceTransaction.list(limit=limit).data
143
+ self.logger.info(
144
+ f"Successfully retrieved {len(transactions)} "
145
+ "balance transactions."
146
+ )
147
+ return json.dumps([transaction for transaction in transactions])
148
+ except Exception as e:
149
+ return self.handle_exception("balance_transaction_list", e)
150
+
151
+ def payment_get(self, payment_id: str) -> str:
152
+ r"""Retrieve a payment by ID.
153
+
154
+ Args:
155
+ payment_id (str): The ID of the payment to retrieve.
156
+
157
+ Returns:
158
+ str:The payment data as a str if successful, or an error message
159
+ string if failed.
160
+ """
161
+ import stripe
162
+
163
+ try:
164
+ self.logger.info(f"Retrieving payment with ID: {payment_id}")
165
+ payment = stripe.PaymentIntent.retrieve(payment_id)
166
+ self.logger.info(f"Retrieved payment: {payment.id}")
167
+ return json.dumps(payment)
168
+ except Exception as e:
169
+ return self.handle_exception("payment_get", e)
170
+
171
+ def payment_list(self, limit: int = 100) -> str:
172
+ r"""List payments.
173
+
174
+ Args:
175
+ limit (int, optional): Number of payments to retrieve.
176
+ (default::obj:`100`)
177
+
178
+ Returns:
179
+ str: A list of payment data if successful, or an error message
180
+ string if failed.
181
+ """
182
+ import stripe
183
+
184
+ try:
185
+ self.logger.info(f"Listing payments with limit={limit}")
186
+ payments = stripe.PaymentIntent.list(limit=limit).data
187
+ self.logger.info(
188
+ f"Successfully retrieved {len(payments)} payments."
189
+ )
190
+ return json.dumps([payment for payment in payments])
191
+ except Exception as e:
192
+ return self.handle_exception("payment_list", e)
193
+
194
+ def refund_get(self, refund_id: str) -> str:
195
+ r"""Retrieve a refund by ID.
196
+
197
+ Args:
198
+ refund_id (str): The ID of the refund to retrieve.
199
+
200
+ Returns:
201
+ str: The refund data as a str if successful, or an error message
202
+ string if failed.
203
+ """
204
+ import stripe
205
+
206
+ try:
207
+ self.logger.info(f"Retrieving refund with ID: {refund_id}")
208
+ refund = stripe.Refund.retrieve(refund_id)
209
+ self.logger.info(f"Retrieved refund: {refund.id}")
210
+ return json.dumps(refund)
211
+ except Exception as e:
212
+ return self.handle_exception("refund_get", e)
213
+
214
+ def refund_list(self, limit: int = 100) -> str:
215
+ r"""List refunds.
216
+
217
+ Args:
218
+ limit (int, optional): Number of refunds to retrieve.
219
+ (default::obj:`100`)
220
+
221
+ Returns:
222
+ str: A list of refund data as a str if successful, or an error
223
+ message string if failed.
224
+ """
225
+ import stripe
226
+
227
+ try:
228
+ self.logger.info(f"Listing refunds with limit={limit}")
229
+ refunds = stripe.Refund.list(limit=limit).data
230
+ self.logger.info(f"Successfully retrieved {len(refunds)} refunds.")
231
+ return json.dumps([refund for refund in refunds])
232
+ except Exception as e:
233
+ return self.handle_exception("refund_list", e)
234
+
235
+ def handle_exception(self, func_name: str, error: Exception) -> str:
236
+ r"""Handle exceptions by logging and returning an error message.
237
+
238
+ Args:
239
+ func_name (str): The name of the function where the exception
240
+ occurred.
241
+ error (Exception): The exception instance.
242
+
243
+ Returns:
244
+ str: An error message string.
245
+ """
246
+ from stripe import StripeError
247
+
248
+ if isinstance(error, StripeError):
249
+ message = error.user_message or str(error)
250
+ self.logger.error(f"Stripe error in {func_name}: {message}")
251
+ return f"Stripe error in {func_name}: {message}"
252
+ else:
253
+ self.logger.error(f"Unexpected error in {func_name}: {error!s}")
254
+ return f"Unexpected error in {func_name}: {error!s}"
255
+
256
+ def get_tools(self) -> List[FunctionTool]:
257
+ r"""Returns a list of FunctionTool objects representing the
258
+ functions in the toolkit.
259
+
260
+ Returns:
261
+ List[FunctionTool]: A list of FunctionTool objects for the
262
+ toolkit methods.
263
+ """
264
+ return [
265
+ FunctionTool(self.customer_get),
266
+ FunctionTool(self.customer_list),
267
+ FunctionTool(self.balance_get),
268
+ FunctionTool(self.balance_transaction_list),
269
+ FunctionTool(self.payment_get),
270
+ FunctionTool(self.payment_list),
271
+ FunctionTool(self.refund_get),
272
+ FunctionTool(self.refund_list),
273
+ ]
@@ -20,12 +20,15 @@ from typing import Any, Dict, List, Optional, Union
20
20
  import requests
21
21
  from requests_oauthlib import OAuth1
22
22
 
23
+ from camel.logger import get_logger
23
24
  from camel.toolkits import FunctionTool
24
25
  from camel.toolkits.base import BaseToolkit
25
26
  from camel.utils import api_keys_required
26
27
 
27
28
  TWEET_TEXT_LIMIT = 280
28
29
 
30
+ logger = get_logger(__name__)
31
+
29
32
 
30
33
  @api_keys_required(
31
34
  "TWITTER_CONSUMER_KEY",
camel/types/__init__.py CHANGED
@@ -14,6 +14,7 @@
14
14
  from .enums import (
15
15
  AudioModelType,
16
16
  EmbeddingModelType,
17
+ HuggingFaceRepoType,
17
18
  ModelPlatformType,
18
19
  ModelType,
19
20
  OpenAIBackendRole,
@@ -73,4 +74,5 @@ __all__ = [
73
74
  'NOT_GIVEN',
74
75
  'NotGiven',
75
76
  'ParsedChatCompletion',
77
+ 'HuggingFaceRepoType',
76
78
  ]
camel/types/enums.py CHANGED
@@ -41,9 +41,12 @@ class ModelType(UnifiedModelType, Enum):
41
41
  GLM_4V = 'glm-4v'
42
42
  GLM_3_TURBO = "glm-3-turbo"
43
43
 
44
+ # Groq platform models
44
45
  GROQ_LLAMA_3_1_8B = "llama-3.1-8b-instant"
45
46
  GROQ_LLAMA_3_1_70B = "llama-3.1-70b-versatile"
46
47
  GROQ_LLAMA_3_1_405B = "llama-3.1-405b-reasoning"
48
+ GROQ_LLAMA_3_3_70B = "llama-3.3-70b-versatile"
49
+ GROQ_LLAMA_3_3_70B_PREVIEW = "llama-3.3-70b-specdec"
47
50
  GROQ_LLAMA_3_8B = "llama3-8b-8192"
48
51
  GROQ_LLAMA_3_70B = "llama3-70b-8192"
49
52
  GROQ_MIXTRAL_8_7B = "mixtral-8x7b-32768"
@@ -59,13 +62,25 @@ class ModelType(UnifiedModelType, Enum):
59
62
  CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
60
63
 
61
64
  # Claude3 models
62
- CLAUDE_3_OPUS = "claude-3-opus-20240229"
65
+ CLAUDE_3_OPUS = "claude-3-opus-latest"
63
66
  CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
64
67
  CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
65
- CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
68
+ CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
69
+ CLAUDE_3_5_HAIKU = "claude-3-5-haiku-latest"
66
70
 
67
71
  # Nvidia models
68
- NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
72
+ NVIDIA_NEMOTRON_340B_INSTRUCT = "nvidia/nemotron-4-340b-instruct"
73
+ NVIDIA_NEMOTRON_340B_REWARD = "nvidia/nemotron-4-340b-reward"
74
+ NVIDIA_YI_LARGE = "01-ai/yi-large"
75
+ NVIDIA_MISTRAL_LARGE = "mistralai/mistral-large"
76
+ NVIDIA_MIXTRAL_8X7B = "mistralai/mixtral-8x7b-instruct"
77
+ NVIDIA_LLAMA3_70B = "meta/llama3-70b"
78
+ NVIDIA_LLAMA3_1_8B_INSTRUCT = "meta/llama-3.1-8b-instruct"
79
+ NVIDIA_LLAMA3_1_70B_INSTRUCT = "meta/llama-3.1-70b-instruct"
80
+ NVIDIA_LLAMA3_1_405B_INSTRUCT = "meta/llama-3.1-405b-instruct"
81
+ NVIDIA_LLAMA3_2_1B_INSTRUCT = "meta/llama-3.2-1b-instruct"
82
+ NVIDIA_LLAMA3_2_3B_INSTRUCT = "meta/llama-3.2-3b-instruct"
83
+ NVIDIA_LLAMA3_3_70B_INSTRUCT = "meta/llama-3.3-70b-instruct"
69
84
 
70
85
  # Gemini models
71
86
  GEMINI_1_5_FLASH = "gemini-1.5-flash"
@@ -165,6 +180,7 @@ class ModelType(UnifiedModelType, Enum):
165
180
  ModelType.GPT_4,
166
181
  ModelType.GPT_4_TURBO,
167
182
  ModelType.GPT_4O,
183
+ ModelType.GPT_4O_MINI,
168
184
  }
169
185
 
170
186
  @property
@@ -191,6 +207,7 @@ class ModelType(UnifiedModelType, Enum):
191
207
  ModelType.CLAUDE_3_SONNET,
192
208
  ModelType.CLAUDE_3_HAIKU,
193
209
  ModelType.CLAUDE_3_5_SONNET,
210
+ ModelType.CLAUDE_3_5_HAIKU,
194
211
  }
195
212
 
196
213
  @property
@@ -200,6 +217,8 @@ class ModelType(UnifiedModelType, Enum):
200
217
  ModelType.GROQ_LLAMA_3_1_8B,
201
218
  ModelType.GROQ_LLAMA_3_1_70B,
202
219
  ModelType.GROQ_LLAMA_3_1_405B,
220
+ ModelType.GROQ_LLAMA_3_3_70B,
221
+ ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
203
222
  ModelType.GROQ_LLAMA_3_8B,
204
223
  ModelType.GROQ_LLAMA_3_70B,
205
224
  ModelType.GROQ_MIXTRAL_8_7B,
@@ -225,13 +244,20 @@ class ModelType(UnifiedModelType, Enum):
225
244
 
226
245
  @property
227
246
  def is_nvidia(self) -> bool:
228
- r"""Returns whether this type of models is Nvidia-released model.
229
-
230
- Returns:
231
- bool: Whether this type of models is nvidia.
232
- """
247
+ r"""Returns whether this type of models is a NVIDIA model."""
233
248
  return self in {
234
- ModelType.NEMOTRON_4_REWARD,
249
+ ModelType.NVIDIA_NEMOTRON_340B_INSTRUCT,
250
+ ModelType.NVIDIA_NEMOTRON_340B_REWARD,
251
+ ModelType.NVIDIA_YI_LARGE,
252
+ ModelType.NVIDIA_MISTRAL_LARGE,
253
+ ModelType.NVIDIA_LLAMA3_70B,
254
+ ModelType.NVIDIA_MIXTRAL_8X7B,
255
+ ModelType.NVIDIA_LLAMA3_1_8B_INSTRUCT,
256
+ ModelType.NVIDIA_LLAMA3_1_70B_INSTRUCT,
257
+ ModelType.NVIDIA_LLAMA3_1_405B_INSTRUCT,
258
+ ModelType.NVIDIA_LLAMA3_2_1B_INSTRUCT,
259
+ ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
260
+ ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
235
261
  }
236
262
 
237
263
  @property
@@ -329,7 +355,6 @@ class ModelType(UnifiedModelType, Enum):
329
355
  if self is ModelType.GLM_4V:
330
356
  return 1024
331
357
  elif self in {
332
- ModelType.NEMOTRON_4_REWARD,
333
358
  ModelType.STUB,
334
359
  ModelType.REKA_CORE,
335
360
  ModelType.REKA_EDGE,
@@ -338,17 +363,21 @@ class ModelType(UnifiedModelType, Enum):
338
363
  ModelType.QWEN_MATH_TURBO,
339
364
  ModelType.COHERE_COMMAND,
340
365
  ModelType.COHERE_COMMAND_LIGHT,
366
+ ModelType.NVIDIA_NEMOTRON_340B_INSTRUCT,
367
+ ModelType.NVIDIA_NEMOTRON_340B_REWARD,
341
368
  }:
342
369
  return 4_096
343
370
  elif self in {
344
371
  ModelType.GPT_4,
345
372
  ModelType.GROQ_LLAMA_3_8B,
346
373
  ModelType.GROQ_LLAMA_3_70B,
374
+ ModelType.GROQ_LLAMA_3_3_70B_PREVIEW,
347
375
  ModelType.GROQ_GEMMA_7B_IT,
348
376
  ModelType.GROQ_GEMMA_2_9B_IT,
349
377
  ModelType.GLM_3_TURBO,
350
378
  ModelType.GLM_4,
351
379
  ModelType.QWEN_VL_PLUS,
380
+ ModelType.NVIDIA_LLAMA3_70B,
352
381
  }:
353
382
  return 8_192
354
383
  elif self in {
@@ -370,6 +399,9 @@ class ModelType(UnifiedModelType, Enum):
370
399
  ModelType.YI_LARGE_FC,
371
400
  ModelType.QWEN_MAX,
372
401
  ModelType.QWEN_VL_MAX,
402
+ ModelType.NVIDIA_YI_LARGE,
403
+ ModelType.NVIDIA_MISTRAL_LARGE,
404
+ ModelType.NVIDIA_MIXTRAL_8X7B,
373
405
  ModelType.QWEN_QWQ_32B,
374
406
  }:
375
407
  return 32_768
@@ -401,6 +433,13 @@ class ModelType(UnifiedModelType, Enum):
401
433
  ModelType.COHERE_COMMAND_R,
402
434
  ModelType.COHERE_COMMAND_R_PLUS,
403
435
  ModelType.COHERE_COMMAND_NIGHTLY,
436
+ ModelType.NVIDIA_LLAMA3_1_8B_INSTRUCT,
437
+ ModelType.NVIDIA_LLAMA3_1_70B_INSTRUCT,
438
+ ModelType.NVIDIA_LLAMA3_1_405B_INSTRUCT,
439
+ ModelType.NVIDIA_LLAMA3_2_1B_INSTRUCT,
440
+ ModelType.NVIDIA_LLAMA3_2_3B_INSTRUCT,
441
+ ModelType.NVIDIA_LLAMA3_3_70B_INSTRUCT,
442
+ ModelType.GROQ_LLAMA_3_3_70B,
404
443
  }:
405
444
  return 128_000
406
445
  elif self in {
@@ -418,6 +457,7 @@ class ModelType(UnifiedModelType, Enum):
418
457
  ModelType.CLAUDE_3_SONNET,
419
458
  ModelType.CLAUDE_3_HAIKU,
420
459
  ModelType.CLAUDE_3_5_SONNET,
460
+ ModelType.CLAUDE_3_5_HAIKU,
421
461
  ModelType.YI_MEDIUM_200K,
422
462
  }:
423
463
  return 200_000
@@ -582,7 +622,9 @@ class ModelPlatformType(Enum):
582
622
  COHERE = "cohere"
583
623
  YI = "lingyiwanwu"
584
624
  QWEN = "tongyi-qianwen"
625
+ NVIDIA = "nvidia"
585
626
  DEEPSEEK = "deepseek"
627
+ SGLANG = "sglang"
586
628
 
587
629
  @property
588
630
  def is_openai(self) -> bool:
@@ -614,6 +656,11 @@ class ModelPlatformType(Enum):
614
656
  r"""Returns whether this platform is vllm."""
615
657
  return self is ModelPlatformType.VLLM
616
658
 
659
+ @property
660
+ def is_sglang(self) -> bool:
661
+ r"""Returns whether this platform is sglang."""
662
+ return self is ModelPlatformType.SGLANG
663
+
617
664
  @property
618
665
  def is_together(self) -> bool:
619
666
  r"""Returns whether this platform is together."""
@@ -670,6 +717,11 @@ class ModelPlatformType(Enum):
670
717
  r"""Returns whether this platform is Qwen."""
671
718
  return self is ModelPlatformType.QWEN
672
719
 
720
+ @property
721
+ def is_nvidia(self) -> bool:
722
+ r"""Returns whether this platform is Nvidia."""
723
+ return self is ModelPlatformType.NVIDIA
724
+
673
725
  @property
674
726
  def is_deepseek(self) -> bool:
675
727
  r"""Returns whether this platform is DeepSeek."""
@@ -716,3 +768,9 @@ class JinaReturnFormat(Enum):
716
768
  MARKDOWN = "markdown"
717
769
  HTML = "html"
718
770
  TEXT = "text"
771
+
772
+
773
+ class HuggingFaceRepoType(str, Enum):
774
+ DATASET = "dataset"
775
+ MODEL = "model"
776
+ SPACE = "space"
camel/utils/commons.py CHANGED
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import importlib
15
+ import logging
15
16
  import os
16
17
  import platform
17
18
  import re
@@ -39,14 +40,19 @@ import pydantic
39
40
  import requests
40
41
  from pydantic import BaseModel
41
42
 
43
+ from camel.logger import get_logger
42
44
  from camel.types import TaskType
43
45
 
44
46
  from .constants import Constants
45
47
 
46
48
  F = TypeVar('F', bound=Callable[..., Any])
47
49
 
50
+ logger = get_logger(__name__)
48
51
 
49
- def print_text_animated(text, delay: float = 0.02, end: str = ""):
52
+
53
+ def print_text_animated(
54
+ text, delay: float = 0.02, end: str = "", log_level: int = logging.INFO
55
+ ):
50
56
  r"""Prints the given text with an animated effect.
51
57
 
52
58
  Args:
@@ -55,11 +61,22 @@ def print_text_animated(text, delay: float = 0.02, end: str = ""):
55
61
  (default: :obj:`0.02`)
56
62
  end (str, optional): The end character to print after each
57
63
  character of text. (default: :obj:`""`)
64
+ log_level (int, optional): The log level to use.
65
+ See https://docs.python.org/3/library/logging.html#levels
66
+ (default: :obj:`logging.INFO`)
58
67
  """
59
- for char in text:
60
- print(char, end=end, flush=True)
61
- time.sleep(delay)
62
- print('\n')
68
+ if logger.isEnabledFor(log_level):
69
+ # timestamp and other prefixes
70
+ logger.log(log_level, '')
71
+
72
+ for char in text:
73
+ print(char, end=end, flush=True)
74
+ time.sleep(delay)
75
+ # Close the log entry
76
+ logger.log(log_level, '')
77
+ else:
78
+ # This may be relevant for logging frameworks
79
+ logger.log(log_level, text)
63
80
 
64
81
 
65
82
  def get_prompt_template_key_words(template: str) -> Set[str]:
@@ -22,6 +22,7 @@ from typing import TYPE_CHECKING, List, Optional
22
22
 
23
23
  from PIL import Image
24
24
 
25
+ from camel.logger import get_logger
25
26
  from camel.types import (
26
27
  ModelType,
27
28
  OpenAIImageType,
@@ -44,6 +45,8 @@ SQUARE_PIXELS = 512
44
45
  SQUARE_TOKENS = 170
45
46
  EXTRA_TOKENS = 85
46
47
 
48
+ logger = get_logger(__name__)
49
+
47
50
 
48
51
  def get_model_encoding(value_for_tiktoken: str):
49
52
  r"""Get model encoding from tiktoken.
@@ -65,7 +68,7 @@ def get_model_encoding(value_for_tiktoken: str):
65
68
  ]:
66
69
  encoding = tiktoken.get_encoding("o200k_base")
67
70
  else:
68
- print("Model not found. Using cl100k_base encoding.")
71
+ logger.info("Model not found. Using cl100k_base encoding.")
69
72
  encoding = tiktoken.get_encoding("cl100k_base")
70
73
  return encoding
71
74
 
@@ -219,13 +222,18 @@ class OpenAITokenCounter(BaseTokenCounter):
219
222
 
220
223
  class AnthropicTokenCounter(BaseTokenCounter):
221
224
  @dependencies_required('anthropic')
222
- def __init__(self):
223
- r"""Constructor for the token counter for Anthropic models."""
225
+ def __init__(self, model: str):
226
+ r"""Constructor for the token counter for Anthropic models.
227
+
228
+ Args:
229
+ model (str): The name of the Anthropic model being used.
230
+ """
224
231
  from anthropic import Anthropic
225
232
 
226
233
  self.client = Anthropic()
227
- self.tokenizer = self.client.get_tokenizer()
234
+ self.model = model
228
235
 
236
+ @dependencies_required('anthropic')
229
237
  def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
230
238
  r"""Count number of tokens in the provided message list using
231
239
  loaded tokenizer specific for this type of model.
@@ -237,11 +245,18 @@ class AnthropicTokenCounter(BaseTokenCounter):
237
245
  Returns:
238
246
  int: Number of tokens in the messages.
239
247
  """
240
- num_tokens = 0
241
- for message in messages:
242
- content = str(message["content"])
243
- num_tokens += self.client.count_tokens(content)
244
- return num_tokens
248
+ from anthropic.types.beta import BetaMessageParam
249
+
250
+ return self.client.beta.messages.count_tokens(
251
+ messages=[
252
+ BetaMessageParam(
253
+ content=str(msg["content"]),
254
+ role="user" if msg["role"] == "user" else "assistant",
255
+ )
256
+ for msg in messages
257
+ ],
258
+ model=self.model,
259
+ ).input_tokens
245
260
 
246
261
 
247
262
  class GeminiTokenCounter(BaseTokenCounter):
@@ -357,7 +372,7 @@ class MistralTokenCounter(BaseTokenCounter):
357
372
  ModelType.MISTRAL_CODESTRAL,
358
373
  ModelType.MISTRAL_CODESTRAL_MAMBA,
359
374
  }
360
- else self.model_type.value
375
+ else self.model_type
361
376
  )
362
377
 
363
378
  self.tokenizer = MistralTokenizer.from_model(model_name)
@@ -400,7 +415,7 @@ class MistralTokenCounter(BaseTokenCounter):
400
415
  )
401
416
 
402
417
  mistral_request = ChatCompletionRequest( # type: ignore[type-var]
403
- model=self.model_type.value,
418
+ model=self.model_type,
404
419
  messages=[openai_msg],
405
420
  )
406
421