camel-ai 0.1.5.2__py3-none-any.whl → 0.1.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (39) hide show
  1. camel/agents/chat_agent.py +21 -17
  2. camel/agents/critic_agent.py +6 -9
  3. camel/agents/deductive_reasoner_agent.py +7 -9
  4. camel/agents/embodied_agent.py +6 -9
  5. camel/agents/knowledge_graph_agent.py +12 -10
  6. camel/agents/role_assignment_agent.py +10 -11
  7. camel/agents/search_agent.py +5 -5
  8. camel/agents/task_agent.py +26 -38
  9. camel/configs/openai_config.py +14 -0
  10. camel/embeddings/base.py +10 -9
  11. camel/embeddings/openai_embedding.py +25 -12
  12. camel/embeddings/sentence_transformers_embeddings.py +28 -14
  13. camel/functions/open_api_function.py +11 -4
  14. camel/functions/slack_functions.py +14 -2
  15. camel/models/__init__.py +4 -0
  16. camel/models/anthropic_model.py +4 -2
  17. camel/models/base_model.py +4 -1
  18. camel/models/model_factory.py +42 -21
  19. camel/models/nemotron_model.py +71 -0
  20. camel/models/ollama_model.py +121 -0
  21. camel/models/open_source_model.py +7 -2
  22. camel/models/openai_model.py +8 -3
  23. camel/models/stub_model.py +3 -1
  24. camel/prompts/__init__.py +4 -0
  25. camel/prompts/generate_text_embedding_data.py +79 -0
  26. camel/prompts/task_prompt_template.py +4 -0
  27. camel/retrievers/auto_retriever.py +2 -2
  28. camel/societies/role_playing.py +16 -19
  29. camel/storages/graph_storages/graph_element.py +9 -1
  30. camel/types/__init__.py +2 -0
  31. camel/types/enums.py +84 -22
  32. camel/utils/commons.py +4 -0
  33. camel/utils/token_counting.py +5 -3
  34. {camel_ai-0.1.5.2.dist-info → camel_ai-0.1.5.4.dist-info}/METADATA +60 -47
  35. {camel_ai-0.1.5.2.dist-info → camel_ai-0.1.5.4.dist-info}/RECORD +36 -36
  36. camel/bots/__init__.py +0 -20
  37. camel/bots/discord_bot.py +0 -103
  38. camel/bots/telegram_bot.py +0 -84
  39. {camel_ai-0.1.5.2.dist-info → camel_ai-0.1.5.4.dist-info}/WHEEL +0 -0
@@ -35,6 +35,7 @@ class OpenAIModel(BaseModelBackend):
35
35
  model_type: ModelType,
36
36
  model_config_dict: Dict[str, Any],
37
37
  api_key: Optional[str] = None,
38
+ url: Optional[str] = None,
38
39
  ) -> None:
39
40
  r"""Constructor for OpenAI backend.
40
41
 
@@ -45,12 +46,16 @@ class OpenAIModel(BaseModelBackend):
45
46
  be fed into openai.ChatCompletion.create().
46
47
  api_key (Optional[str]): The API key for authenticating with the
47
48
  OpenAI service. (default: :obj:`None`)
49
+ url (Optional[str]): The url to the OpenAI service.
48
50
  """
49
- super().__init__(model_type, model_config_dict)
50
- url = os.environ.get('OPENAI_API_BASE_URL', None)
51
+ super().__init__(model_type, model_config_dict, api_key, url)
52
+ self._url = url or os.environ.get("OPENAI_API_BASE_URL")
51
53
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
52
54
  self._client = OpenAI(
53
- timeout=60, max_retries=3, base_url=url, api_key=self._api_key
55
+ timeout=60,
56
+ max_retries=3,
57
+ base_url=self._url,
58
+ api_key=self._api_key,
54
59
  )
55
60
  self._token_counter: Optional[BaseTokenCounter] = None
56
61
 
@@ -54,11 +54,13 @@ class StubModel(BaseModelBackend):
54
54
  model_type: ModelType,
55
55
  model_config_dict: Dict[str, Any],
56
56
  api_key: Optional[str] = None,
57
+ url: Optional[str] = None,
57
58
  ) -> None:
58
59
  r"""All arguments are unused for the dummy model."""
59
- super().__init__(model_type, model_config_dict)
60
+ super().__init__(model_type, model_config_dict, api_key, url)
60
61
  self._token_counter: Optional[BaseTokenCounter] = None
61
62
  self._api_key = api_key
63
+ self._url = url
62
64
 
63
65
  @property
64
66
  def token_counter(self) -> BaseTokenCounter:
camel/prompts/__init__.py CHANGED
@@ -16,6 +16,9 @@ from .base import CodePrompt, TextPrompt, TextPromptDict
16
16
  from .code import CodePromptTemplateDict
17
17
  from .descripte_video_prompt import DescriptionVideoPromptTemplateDict
18
18
  from .evaluation import EvaluationPromptTemplateDict
19
+ from .generate_text_embedding_data import (
20
+ GenerateTextEmbeddingDataPromptTemplateDict,
21
+ )
19
22
  from .misalignment import MisalignmentPromptTemplateDict
20
23
  from .object_recognition import ObjectRecognitionPromptTemplateDict
21
24
  from .prompt_templates import PromptTemplateGenerator
@@ -37,6 +40,7 @@ __all__ = [
37
40
  'TaskPromptTemplateDict',
38
41
  'PromptTemplateGenerator',
39
42
  'SolutionExtractionPromptTemplateDict',
43
+ 'GenerateTextEmbeddingDataPromptTemplateDict',
40
44
  'ObjectRecognitionPromptTemplateDict',
41
45
  'DescriptionVideoPromptTemplateDict',
42
46
  ]
@@ -0,0 +1,79 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import Any
15
+
16
+ from camel.prompts import TextPrompt, TextPromptDict
17
+ from camel.types import RoleType
18
+
19
+
20
+ # flake8: noqa :E501
21
+ class GenerateTextEmbeddingDataPromptTemplateDict(TextPromptDict):
22
+ r"""A :obj:`TextPrompt` dictionary containing text embedding tasks
23
+ generation, query, positive and hard negative samples generation,
24
+ from the `"Improving Text Embeddings with Large Language Models"
25
+ <https://arxiv.org/abs/2401.00368>`_ paper.
26
+
27
+
28
+ Attributes:
29
+ GENERATE_TASKS (TextPrompt): A prompt to generate a list
30
+ of :obj:`num_tasks` synthetic text_embedding tasks.
31
+ ASSISTANT_PROMPT (TextPrompt): A system prompt for the AI assistant
32
+ to generate synthetic :obj:`user_query`, :obj:`positive document`,
33
+ and :obj:`hard_negative_document` for a specific :obj:`task` with
34
+ specified parameters including :obj:`query_type`,
35
+ :obj:`query_length`, :obj:`clarity`, :obj:`num_words`,
36
+ :obj:`language` and :obj:`difficulty`.
37
+ """
38
+
39
+ GENERATE_TASKS = TextPrompt(
40
+ """You are an expert to brainstorm a list of {num_tasks} potentially useful text retrieval tasks
41
+ Here are a few examples for your reference:
42
+ - Provided a scientific claim as query, retrieve documents that help verify or refute the claim.
43
+ - Search for documents that answers a FAQ-style query on children's nutrition.
44
+ Please adhere to the following guidelines:
45
+ - Specify what the query is, and what the desired documents are.
46
+ - Each retrieval task should cover a wide range of queries, and should not be too specific.
47
+ Your output should always be a python list of strings starting with `1.`, `2.` etc.
48
+ And each element corresponds to a distinct retrieval task in one sentence.
49
+ Do not explain yourself or output anything else.
50
+ Be creative!"""
51
+ )
52
+
53
+ ASSISTANT_PROMPT = TextPrompt(
54
+ """You have been assigned a retrieval task: {task}
55
+ Your mission is to write one text retrieval example for this task in JSON format. The JSON object must
56
+ contain the following keys:
57
+ - "user_query": a string, a random user search query specified by the retrieval task.
58
+ - "positive_document": a string, a relevant document for the user query.
59
+ - "hard_negative_document": a string, a hard negative document that only appears relevant to the query.
60
+ Please adhere to the following guidelines:
61
+ - The "user_query" should be {query_type}, {query_length}, {clarity}, and diverse in topic.
62
+ - All documents must be created independent of the query. Avoid copying the query verbatim.
63
+ It's acceptable if some parts of the "positive_document" are not topically related to the query.
64
+ - All documents should be at least {num_words} words long.
65
+ - The "hard_negative_document" contains some useful information, but it should be less useful or comprehensive compared to the "positive_document".
66
+ - Both the query and documents should be in {language}.
67
+ - Do not provide any explanation in any document on why it is relevant or not relevant to the query.
68
+ - Both the query and documents require {difficulty} level education to understand.
69
+ Your output must always be a JSON object only (starting and ending with curly brackets), do not explain yourself or output anything else. Be creative!"""
70
+ )
71
+
72
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
73
+ super().__init__(*args, **kwargs)
74
+ self.update(
75
+ {
76
+ "generate_tasks": self.GENERATE_TASKS,
77
+ RoleType.ASSISTANT: self.ASSISTANT_PROMPT,
78
+ }
79
+ )
@@ -24,6 +24,9 @@ from camel.prompts.descripte_video_prompt import (
24
24
  from camel.prompts.evaluation import (
25
25
  EvaluationPromptTemplateDict,
26
26
  )
27
+ from camel.prompts.generate_text_embedding_data import (
28
+ GenerateTextEmbeddingDataPromptTemplateDict,
29
+ )
27
30
  from camel.prompts.misalignment import MisalignmentPromptTemplateDict
28
31
  from camel.prompts.object_recognition import (
29
32
  ObjectRecognitionPromptTemplateDict,
@@ -60,6 +63,7 @@ class TaskPromptTemplateDict(Dict[Any, TextPromptDict]):
60
63
  TaskType.SOLUTION_EXTRACTION: SolutionExtractionPromptTemplateDict(), # noqa: E501
61
64
  TaskType.ROLE_DESCRIPTION: RoleDescriptionPromptTemplateDict(),
62
65
  TaskType.OBJECT_RECOGNITION: ObjectRecognitionPromptTemplateDict(), # noqa: E501
66
+ TaskType.GENERATE_TEXT_EMBEDDING_DATA: GenerateTextEmbeddingDataPromptTemplateDict(), # noqa: E501
63
67
  TaskType.VIDEO_DESCRIPTION: DescriptionVideoPromptTemplateDict(), # noqa: E501
64
68
  }
65
69
  )
@@ -159,11 +159,11 @@ class AutoRetriever:
159
159
  ) -> str:
160
160
  r"""Retrieves the last modified date and time of a given file. This
161
161
  function takes vector storage instance as input and returns the last
162
- modified date from the meta data.
162
+ modified date from the metadata.
163
163
 
164
164
  Args:
165
165
  vector_storage_instance (BaseVectorStorage): The vector storage
166
- where modified date is to be retrieved from meta data.
166
+ where modified date is to be retrieved from metadata.
167
167
 
168
168
  Returns:
169
169
  str: The last modified date from vector storage.
@@ -22,9 +22,10 @@ from camel.agents import (
22
22
  from camel.generators import SystemMessageGenerator
23
23
  from camel.human import Human
24
24
  from camel.messages import BaseMessage
25
+ from camel.models import BaseModelBackend
25
26
  from camel.prompts import TextPrompt
26
27
  from camel.responses import ChatAgentResponse
27
- from camel.types import ModelType, RoleType, TaskType
28
+ from camel.types import RoleType, TaskType
28
29
 
29
30
 
30
31
  class RolePlaying:
@@ -48,9 +49,9 @@ class RolePlaying:
48
49
  in the loop. (default: :obj:`False`)
49
50
  critic_criteria (str, optional): Critic criteria for the critic agent.
50
51
  If not specified, set the criteria to improve task performance.
51
- model_type (ModelType, optional): Model type that will be used for
52
- role playing. If specified, it will override the model in all
53
- agents. (default: :obj:`None`)
52
+ model (BaseModelBackend, optional): The model backend to use for
53
+ generating responses. If specified, it will override the model in
54
+ all agents. (default: :obj:`None`)
54
55
  task_type (TaskType, optional): The type of task to perform.
55
56
  (default: :obj:`TaskType.AI_SOCIETY`)
56
57
  assistant_agent_kwargs (Dict, optional): Additional arguments to pass
@@ -84,7 +85,7 @@ class RolePlaying:
84
85
  with_task_planner: bool = False,
85
86
  with_critic_in_the_loop: bool = False,
86
87
  critic_criteria: Optional[str] = None,
87
- model_type: Optional[ModelType] = None,
88
+ model: Optional[BaseModelBackend] = None,
88
89
  task_type: TaskType = TaskType.AI_SOCIETY,
89
90
  assistant_agent_kwargs: Optional[Dict] = None,
90
91
  user_agent_kwargs: Optional[Dict] = None,
@@ -99,7 +100,7 @@ class RolePlaying:
99
100
  self.with_task_specify = with_task_specify
100
101
  self.with_task_planner = with_task_planner
101
102
  self.with_critic_in_the_loop = with_critic_in_the_loop
102
- self.model_type = model_type
103
+ self.model = model
103
104
  self.task_type = task_type
104
105
  self.task_prompt = task_prompt
105
106
 
@@ -189,12 +190,10 @@ class RolePlaying:
189
190
  )
190
191
  )
191
192
  task_specify_meta_dict.update(extend_task_specify_meta_dict or {})
192
- if self.model_type is not None:
193
+ if self.model is not None:
193
194
  if task_specify_agent_kwargs is None:
194
195
  task_specify_agent_kwargs = {}
195
- task_specify_agent_kwargs.update(
196
- dict(model_type=self.model_type)
197
- )
196
+ task_specify_agent_kwargs.update(dict(model=self.model))
198
197
  task_specify_agent = TaskSpecifyAgent(
199
198
  task_type=self.task_type,
200
199
  output_language=output_language,
@@ -224,12 +223,10 @@ class RolePlaying:
224
223
  agents. (default: :obj:`None`)
225
224
  """
226
225
  if self.with_task_planner:
227
- if self.model_type is not None:
226
+ if self.model is not None:
228
227
  if task_planner_agent_kwargs is None:
229
228
  task_planner_agent_kwargs = {}
230
- task_planner_agent_kwargs.update(
231
- dict(model_type=self.model_type)
232
- )
229
+ task_planner_agent_kwargs.update(dict(model=self.model))
233
230
  task_planner_agent = TaskPlannerAgent(
234
231
  output_language=output_language,
235
232
  **(task_planner_agent_kwargs or {}),
@@ -321,13 +318,13 @@ class RolePlaying:
321
318
  output_language (str, optional): The language to be output by the
322
319
  agents. (default: :obj:`None`)
323
320
  """
324
- if self.model_type is not None:
321
+ if self.model is not None:
325
322
  if assistant_agent_kwargs is None:
326
323
  assistant_agent_kwargs = {}
327
- assistant_agent_kwargs.update(dict(model_type=self.model_type))
324
+ assistant_agent_kwargs.update(dict(model=self.model))
328
325
  if user_agent_kwargs is None:
329
326
  user_agent_kwargs = {}
330
- user_agent_kwargs.update(dict(model_type=self.model_type))
327
+ user_agent_kwargs.update(dict(model=self.model))
331
328
 
332
329
  self.assistant_agent = ChatAgent(
333
330
  init_assistant_sys_msg,
@@ -383,10 +380,10 @@ class RolePlaying:
383
380
  critic_msg_meta_dict,
384
381
  role_tuple=(critic_role_name, RoleType.CRITIC),
385
382
  )
386
- if self.model_type is not None:
383
+ if self.model is not None:
387
384
  if critic_kwargs is None:
388
385
  critic_kwargs = {}
389
- critic_kwargs.update(dict(model_type=self.model_type))
386
+ critic_kwargs.update(dict(model=self.model))
390
387
  self.critic = CriticAgent(
391
388
  self.critic_sys_msg,
392
389
  **(critic_kwargs or {}),
@@ -16,7 +16,10 @@ from __future__ import annotations
16
16
  from dataclasses import dataclass, field
17
17
  from typing import List, Union
18
18
 
19
- from unstructured.documents.elements import Element
19
+ try:
20
+ from unstructured.documents.elements import Element
21
+ except ImportError:
22
+ Element = None
20
23
 
21
24
 
22
25
  @dataclass
@@ -72,3 +75,8 @@ class GraphElement:
72
75
  nodes: List[Node]
73
76
  relationships: List[Relationship]
74
77
  source: Element
78
+
79
+ def __post_init__(self):
80
+ if Element is None:
81
+ raise ImportError("""The 'unstructured' package is required to use
82
+ the 'source' attribute.""")
camel/types/__init__.py CHANGED
@@ -14,6 +14,7 @@
14
14
  from .enums import (
15
15
  AudioModelType,
16
16
  EmbeddingModelType,
17
+ ModelPlatformType,
17
18
  ModelType,
18
19
  OpenAIBackendRole,
19
20
  OpenAIImageType,
@@ -62,6 +63,7 @@ __all__ = [
62
63
  'OpenAIImageType',
63
64
  'OpenAIVisionDetailType',
64
65
  'OpenAPIName',
66
+ 'ModelPlatformType',
65
67
  'AudioModelType',
66
68
  'VoiceType',
67
69
  ]
camel/types/enums.py CHANGED
@@ -45,14 +45,22 @@ class ModelType(Enum):
45
45
  CLAUDE_2_0 = "claude-2.0"
46
46
  CLAUDE_INSTANT_1_2 = "claude-instant-1.2"
47
47
 
48
- # 3 models
48
+ # Claude3 models
49
49
  CLAUDE_3_OPUS = "claude-3-opus-20240229"
50
50
  CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
51
51
  CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
52
+ CLAUDE_3_5_SONNET = "claude-3-5-sonnet-20240620"
53
+
54
+ # Nvidia models
55
+ NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
52
56
 
53
57
  @property
54
58
  def value_for_tiktoken(self) -> str:
55
- return self.value if self is not ModelType.STUB else "gpt-3.5-turbo"
59
+ return (
60
+ self.value
61
+ if self is not ModelType.STUB and not isinstance(self, str)
62
+ else "gpt-3.5-turbo"
63
+ )
56
64
 
57
65
  @property
58
66
  def is_openai(self) -> bool:
@@ -97,6 +105,18 @@ class ModelType(Enum):
97
105
  ModelType.CLAUDE_3_OPUS,
98
106
  ModelType.CLAUDE_3_SONNET,
99
107
  ModelType.CLAUDE_3_HAIKU,
108
+ ModelType.CLAUDE_3_5_SONNET,
109
+ }
110
+
111
+ @property
112
+ def is_nvidia(self) -> bool:
113
+ r"""Returns whether this type of models is Nvidia-released model.
114
+
115
+ Returns:
116
+ bool: Whether this type of models is nvidia.
117
+ """
118
+ return self in {
119
+ ModelType.NEMOTRON_4_REWARD,
100
120
  }
101
121
 
102
122
  @property
@@ -130,15 +150,18 @@ class ModelType(Enum):
130
150
  return 2048
131
151
  elif self is ModelType.VICUNA_16K:
132
152
  return 16384
133
- if self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
153
+ elif self in {ModelType.CLAUDE_2_0, ModelType.CLAUDE_INSTANT_1_2}:
134
154
  return 100_000
135
155
  elif self in {
136
156
  ModelType.CLAUDE_2_1,
137
157
  ModelType.CLAUDE_3_OPUS,
138
158
  ModelType.CLAUDE_3_SONNET,
139
159
  ModelType.CLAUDE_3_HAIKU,
160
+ ModelType.CLAUDE_3_5_SONNET,
140
161
  }:
141
162
  return 200_000
163
+ elif self is ModelType.NEMOTRON_4_REWARD:
164
+ return 4096
142
165
  else:
143
166
  raise ValueError("Unknown model type")
144
167
 
@@ -166,35 +189,27 @@ class ModelType(Enum):
166
189
 
167
190
 
168
191
  class EmbeddingModelType(Enum):
169
- ADA_2 = "text-embedding-ada-002"
170
- ADA_1 = "text-embedding-ada-001"
171
- BABBAGE_1 = "text-embedding-babbage-001"
172
- CURIE_1 = "text-embedding-curie-001"
173
- DAVINCI_1 = "text-embedding-davinci-001"
192
+ TEXT_EMBEDDING_ADA_2 = "text-embedding-ada-002"
193
+ TEXT_EMBEDDING_3_SMALL = "text-embedding-3-small"
194
+ TEXT_EMBEDDING_3_LARGE = "text-embedding-3-large"
174
195
 
175
196
  @property
176
197
  def is_openai(self) -> bool:
177
198
  r"""Returns whether this type of models is an OpenAI-released model."""
178
199
  return self in {
179
- EmbeddingModelType.ADA_2,
180
- EmbeddingModelType.ADA_1,
181
- EmbeddingModelType.BABBAGE_1,
182
- EmbeddingModelType.CURIE_1,
183
- EmbeddingModelType.DAVINCI_1,
200
+ EmbeddingModelType.TEXT_EMBEDDING_ADA_2,
201
+ EmbeddingModelType.TEXT_EMBEDDING_3_SMALL,
202
+ EmbeddingModelType.TEXT_EMBEDDING_3_LARGE,
184
203
  }
185
204
 
186
205
  @property
187
206
  def output_dim(self) -> int:
188
- if self is EmbeddingModelType.ADA_2:
207
+ if self is EmbeddingModelType.TEXT_EMBEDDING_ADA_2:
189
208
  return 1536
190
- elif self is EmbeddingModelType.ADA_1:
191
- return 1024
192
- elif self is EmbeddingModelType.BABBAGE_1:
193
- return 2048
194
- elif self is EmbeddingModelType.CURIE_1:
195
- return 4096
196
- elif self is EmbeddingModelType.DAVINCI_1:
197
- return 12288
209
+ elif self is EmbeddingModelType.TEXT_EMBEDDING_3_SMALL:
210
+ return 1536
211
+ elif self is EmbeddingModelType.TEXT_EMBEDDING_3_LARGE:
212
+ return 3072
198
213
  else:
199
214
  raise ValueError(f"Unknown model type {self}.")
200
215
 
@@ -207,6 +222,7 @@ class TaskType(Enum):
207
222
  EVALUATION = "evaluation"
208
223
  SOLUTION_EXTRACTION = "solution_extraction"
209
224
  ROLE_DESCRIPTION = "role_description"
225
+ GENERATE_TEXT_EMBEDDING_DATA = "generate_text_embedding_data"
210
226
  OBJECT_RECOGNITION = "object_recognition"
211
227
  DEFAULT = "default"
212
228
  VIDEO_DESCRIPTION = "video_description"
@@ -279,6 +295,52 @@ class OpenAPIName(Enum):
279
295
  WEB_SCRAPER = "web_scraper"
280
296
 
281
297
 
298
+ class ModelPlatformType(Enum):
299
+ OPENAI = "openai"
300
+ AZURE = "azure"
301
+ ANTHROPIC = "anthropic"
302
+ OPENSOURCE = "opensource"
303
+ OLLAMA = "ollama"
304
+ LITELLM = "litellm"
305
+ ZHIPU = "zhipuai"
306
+ DEFAULT = "default"
307
+
308
+ @property
309
+ def is_openai(self) -> bool:
310
+ r"""Returns whether this platform is openai."""
311
+ return self is ModelPlatformType.OPENAI
312
+
313
+ @property
314
+ def is_azure(self) -> bool:
315
+ r"""Returns whether this platform is azure."""
316
+ return self is ModelPlatformType.AZURE
317
+
318
+ @property
319
+ def is_anthropic(self) -> bool:
320
+ r"""Returns whether this platform is anthropic."""
321
+ return self is ModelPlatformType.ANTHROPIC
322
+
323
+ @property
324
+ def is_ollama(self) -> bool:
325
+ r"""Returns whether this platform is ollama."""
326
+ return self is ModelPlatformType.OLLAMA
327
+
328
+ @property
329
+ def is_litellm(self) -> bool:
330
+ r"""Returns whether this platform is litellm."""
331
+ return self is ModelPlatformType.LITELLM
332
+
333
+ @property
334
+ def is_zhipuai(self) -> bool:
335
+ r"""Returns whether this platform is zhipu."""
336
+ return self is ModelPlatformType.ZHIPU
337
+
338
+ @property
339
+ def is_open_source(self) -> bool:
340
+ r"""Returns whether this platform is opensource."""
341
+ return self is ModelPlatformType.OPENSOURCE
342
+
343
+
282
344
  class AudioModelType(Enum):
283
345
  TTS_1 = "tts-1"
284
346
  TTS_1_HD = "tts-1-hd"
camel/utils/commons.py CHANGED
@@ -62,6 +62,10 @@ def model_api_key_required(func: F) -> F:
62
62
  if not self._api_key and 'ANTHROPIC_API_KEY' not in os.environ:
63
63
  raise ValueError('Anthropic API key not found.')
64
64
  return func(self, *args, **kwargs)
65
+ elif self.model_type.is_nvidia:
66
+ if not self._api_key and 'NVIDIA_API_KEY' not in os.environ:
67
+ raise ValueError('NVIDIA API key not found.')
68
+ return func(self, *args, **kwargs)
65
69
  else:
66
70
  raise ValueError('Unsupported model type.')
67
71
 
@@ -296,9 +296,11 @@ class AnthropicTokenCounter(BaseTokenCounter):
296
296
  Returns:
297
297
  int: Number of tokens in the messages.
298
298
  """
299
- prompt = messages_to_prompt(messages, self.model_type)
300
-
301
- return self.client.count_tokens(prompt)
299
+ num_tokens = 0
300
+ for message in messages:
301
+ content = str(message["content"])
302
+ num_tokens += self.client.count_tokens(content)
303
+ return num_tokens
302
304
 
303
305
 
304
306
  class LiteLLMTokenCounter: