camel-ai 0.1.5.5__py3-none-any.whl → 0.1.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5.5'
15
+ __version__ = '0.1.5.6'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -78,17 +78,16 @@ Expected Output:
78
78
 
79
79
  Nodes:
80
80
 
81
- Node(id='John', type='Person', properties={'agent_generated'})
82
- Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
83
- Node(id='New York City', type='Location', properties={'agent_generated'})
81
+ Node(id='John', type='Person')
82
+ Node(id='XYZ Corporation', type='Organization')
83
+ Node(id='New York City', type='Location')
84
84
 
85
85
  Relationships:
86
86
 
87
87
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
88
- Corporation', type='Organization'), type='WorksAt', properties=
89
- {'agent_generated'})
88
+ Corporation', type='Organization'), type='WorksAt')
90
89
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
91
- type='Location'), type='ResidesIn', properties={'agent_generated'})
90
+ type='Location'), type='ResidesIn')
92
91
 
93
92
  ===== TASK =====
94
93
  Please extracts nodes and relationships from given content and structures them
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
211
210
  import re
212
211
 
213
212
  # Regular expressions to extract nodes and relationships
214
- node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
213
+ node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
215
214
  rel_pattern = (
216
215
  r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
217
- r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', "
218
- r"properties=\{(.*?)\}\)"
216
+ r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
219
217
  )
220
218
 
221
219
  nodes = {}
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
223
221
 
224
222
  # Extract nodes
225
223
  for match in re.finditer(node_pattern, input_string):
226
- id, type, properties = match.groups()
227
- properties = eval(properties)
224
+ id, type = match.groups()
225
+ properties = {'source': 'agent_created'}
228
226
  if id not in nodes:
229
227
  node = Node(id, type, properties)
230
228
  if self._validate_node(node):
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
232
230
 
233
231
  # Extract relationships
234
232
  for match in re.finditer(rel_pattern, input_string):
235
- subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
236
- match.groups()
237
- )
238
- properties = eval(properties_str)
233
+ subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
234
+ properties = {'source': 'agent_created'}
239
235
  if subj_id in nodes and obj_id in nodes:
240
236
  subj = nodes[subj_id]
241
237
  obj = nodes[obj_id]
@@ -100,7 +100,6 @@ class TaskSpecifyAgent(ChatAgent):
100
100
 
101
101
  if meta_dict is not None:
102
102
  task_specify_prompt = task_specify_prompt.format(**meta_dict)
103
-
104
103
  task_msg = BaseMessage.make_user_message(
105
104
  role_name="Task Specifier", content=task_specify_prompt
106
105
  )
camel/configs/__init__.py CHANGED
@@ -13,6 +13,10 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
+ from .gemini_config import (
17
+ Gemini_API_PARAMS,
18
+ GeminiConfig,
19
+ )
16
20
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
17
21
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
18
22
  from .openai_config import (
@@ -35,4 +39,6 @@ __all__ = [
35
39
  'OLLAMA_API_PARAMS',
36
40
  'ZhipuAIConfig',
37
41
  'ZHIPUAI_API_PARAMS',
42
+ 'GeminiConfig',
43
+ 'Gemini_API_PARAMS',
38
44
  ]
@@ -0,0 +1,97 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+
16
+ from collections.abc import Iterable
17
+ from dataclasses import asdict, dataclass
18
+ from typing import Optional
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class GeminiConfig(BaseConfig):
25
+ r"""A simple dataclass used to configure the generation parameters of
26
+ `GenerativeModel.generate_content`.
27
+
28
+ Args:
29
+ candidate_count (int, optional): Number of responses to return.
30
+ stop_sequences (Iterable[str], optional): The set of character
31
+ sequences (up to 5) that will stop output generation. If specified
32
+ the API will stop at the first appearance of a stop sequence.
33
+ The stop sequence will not be included as part of the response.
34
+ max_output_tokens (int, optional): The maximum number of tokens to
35
+ include in a candidate. If unset, this will default to
36
+ output_token_limit specified in the model's specification.
37
+ temperature (float, optional): Controls the randomness of the output.
38
+ Note: The default value varies by model, see the
39
+ `Model.temperature` attribute of the `Model` returned
40
+ the `genai.get_model` function. Values can range from [0.0,1.0],
41
+ inclusive. A value closer to 1.0 will produce responses that are
42
+ more varied and creative, while a value closer to 0.0 will
43
+ typically result in more straightforward responses from the model.
44
+ top_p (int, optional): The maximum cumulative probability of tokens to
45
+ consider when sampling. The model uses combined Top-k and nucleus
46
+ sampling. Tokens are sorted based on their assigned probabilities
47
+ so that only the most likely tokens are considered. Top-k sampling
48
+ directly limits the maximum number of tokens to consider, while
49
+ Nucleus sampling limits number of tokens
50
+ based on the cumulative probability. Note: The default value varies
51
+ by model, see the `Model.top_p` attribute of the `Model` returned
52
+ the `genai.get_model` function.
53
+ top_k (int, optional): The maximum number of tokens to consider when
54
+ sampling. The model uses combined Top-k and nucleus sampling.Top-k
55
+ sampling considers the set of `top_k` most probable tokens.
56
+ Defaults to 40. Note: The default value varies by model, see the
57
+ `Model.top_k` attribute of the `Model` returned the
58
+ `genai.get_model` function.
59
+ response_mime_type (str, optional): Output response mimetype of the
60
+ generated candidate text. Supported mimetype:
61
+ `text/plain`: (default) Text output.
62
+ `application/json`: JSON response in the candidates.
63
+ response_schema (Schema, optional): Specifies the format of the
64
+ JSON requested if response_mime_type is `application/json`.
65
+ safety_settings (SafetySettingOptions, optional):
66
+ Overrides for the model's safety settings.
67
+ tools (FunctionLibraryType, optional):
68
+ `protos.Tools` more info coming soon.
69
+ tool_config (ToolConfigType, optional):
70
+ more info coming soon.
71
+ request_options (RequestOptionsType, optional):
72
+ Options for the request.
73
+ """
74
+
75
+ from google.generativeai.protos import Schema
76
+ from google.generativeai.types.content_types import (
77
+ FunctionLibraryType,
78
+ ToolConfigType,
79
+ )
80
+ from google.generativeai.types.helper_types import RequestOptionsType
81
+ from google.generativeai.types.safety_types import SafetySettingOptions
82
+
83
+ candidate_count: Optional[int] = None
84
+ stop_sequences: Optional[Iterable[str]] = None
85
+ max_output_tokens: Optional[int] = None
86
+ temperature: Optional[float] = None
87
+ top_p: Optional[float] = None
88
+ top_k: Optional[int] = None
89
+ response_mime_type: Optional[str] = None
90
+ response_schema: Optional[Schema] = None
91
+ safety_settings: Optional[SafetySettingOptions] = None
92
+ tools: Optional[FunctionLibraryType] = None
93
+ tool_config: Optional[ToolConfigType] = None
94
+ request_options: Optional[RequestOptionsType] = None
95
+
96
+
97
+ Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
camel/models/__init__.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_model import AnthropicModel
15
15
  from .base_model import BaseModelBackend
16
+ from .gemini_model import GeminiModel
16
17
  from .litellm_model import LiteLLMModel
17
18
  from .model_factory import ModelFactory
18
19
  from .nemotron_model import NemotronModel
@@ -35,4 +36,5 @@ __all__ = [
35
36
  'OpenAIAudioModels',
36
37
  'NemotronModel',
37
38
  'OllamaModel',
39
+ 'GeminiModel',
38
40
  ]
@@ -0,0 +1,203 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
15
+
16
+ from camel.configs import Gemini_API_PARAMS
17
+ from camel.messages import OpenAIMessage
18
+ from camel.models import BaseModelBackend
19
+ from camel.types import (
20
+ ChatCompletion,
21
+ ChatCompletionMessage,
22
+ Choice,
23
+ ModelType,
24
+ )
25
+ from camel.utils import (
26
+ BaseTokenCounter,
27
+ GeminiTokenCounter,
28
+ api_keys_required,
29
+ )
30
+
31
+ if TYPE_CHECKING:
32
+ from google.generativeai.types import ContentsType, GenerateContentResponse
33
+
34
+
35
+ class GeminiModel(BaseModelBackend):
36
+ r"""Gemini API in a unified BaseModelBackend interface."""
37
+
38
+ # NOTE: Currently "stream": True is not supported with Gemini due to the
39
+ # limitation of the current camel design.
40
+
41
+ def __init__(
42
+ self,
43
+ model_type: ModelType,
44
+ model_config_dict: Dict[str, Any],
45
+ api_key: Optional[str] = None,
46
+ url: Optional[str] = None,
47
+ ) -> None:
48
+ r"""Constructor for Gemini backend.
49
+
50
+ Args:
51
+ model_type (ModelType): Model for which a backend is created.
52
+ model_config_dict (Dict[str, Any]): A dictionary that will
53
+ be fed into generate_content().
54
+ api_key (Optional[str]): The API key for authenticating with the
55
+ gemini service. (default: :obj:`None`)
56
+ url (Optional[str]): The url to the gemini service.
57
+ """
58
+ import os
59
+
60
+ import google.generativeai as genai
61
+ from google.generativeai.types.generation_types import GenerationConfig
62
+
63
+ super().__init__(model_type, model_config_dict, api_key, url)
64
+ self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
65
+ genai.configure(api_key=self._api_key)
66
+ self._client = genai.GenerativeModel(self.model_type.value)
67
+ self._token_counter: Optional[BaseTokenCounter] = None
68
+ keys = list(self.model_config_dict.keys())
69
+ generation_config_dict = {
70
+ k: self.model_config_dict.pop(k)
71
+ for k in keys
72
+ if hasattr(GenerationConfig, k)
73
+ }
74
+ generation_config = genai.types.GenerationConfig(
75
+ **generation_config_dict
76
+ )
77
+ self.model_config_dict["generation_config"] = generation_config
78
+
79
+ @property
80
+ def token_counter(self) -> BaseTokenCounter:
81
+ if not self._token_counter:
82
+ self._token_counter = GeminiTokenCounter(self.model_type)
83
+ return self._token_counter
84
+
85
+ @api_keys_required("GOOGLE_API_KEY")
86
+ def run(
87
+ self,
88
+ messages: List[OpenAIMessage],
89
+ ) -> ChatCompletion:
90
+ r"""Runs inference of Gemini model.
91
+ This method can handle multimodal input
92
+
93
+ Args:
94
+ messages: Message list or Message with the chat history
95
+ in OpenAi format.
96
+
97
+ Returns:
98
+ response: A ChatCompletion object formatted for the OpenAI API.
99
+ """
100
+ response = self._client.generate_content(
101
+ contents=self.to_gemini_req(messages),
102
+ **self.model_config_dict,
103
+ )
104
+ response.resolve()
105
+ return self.to_openai_response(response)
106
+
107
+ def check_model_config(self):
108
+ r"""Check whether the model configuration contains any
109
+ unexpected arguments to Gemini API.
110
+
111
+ Raises:
112
+ ValueError: If the model configuration dictionary contains any
113
+ unexpected arguments to OpenAI API.
114
+ """
115
+ if self.model_config_dict is not None:
116
+ for param in self.model_config_dict:
117
+ if param not in Gemini_API_PARAMS:
118
+ raise ValueError(
119
+ f"Unexpected argument `{param}` is "
120
+ "input into Gemini model backend."
121
+ )
122
+
123
+ @property
124
+ def stream(self) -> bool:
125
+ r"""Returns whether the model is in stream mode,
126
+ which sends partial results each time.
127
+
128
+ Returns:
129
+ bool: Whether the model is in stream mode.
130
+ """
131
+ return self.model_config_dict.get('stream', False)
132
+
133
+ def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
134
+ r"""Converts the request from the OpenAI API format to
135
+ the Gemini API request format.
136
+
137
+ Args:
138
+ messages: The request object from the OpenAI API.
139
+
140
+ Returns:
141
+ converted_messages: A list of messages formatted for Gemini API.
142
+ """
143
+ # role reference
144
+ # https://ai.google.dev/api/python/google/generativeai/protos/Content
145
+ converted_messages = []
146
+ for message in messages:
147
+ role = message.get('role')
148
+ if role == 'assistant':
149
+ role_to_gemini = 'model'
150
+ else:
151
+ role_to_gemini = 'user'
152
+ converted_message = {
153
+ "role": role_to_gemini,
154
+ "parts": message.get("content"),
155
+ }
156
+ converted_messages.append(converted_message)
157
+ return converted_messages
158
+
159
+ def to_openai_response(
160
+ self,
161
+ response: 'GenerateContentResponse',
162
+ ) -> ChatCompletion:
163
+ r"""Converts the response from the Gemini API to the OpenAI API
164
+ response format.
165
+
166
+ Args:
167
+ response: The response object returned by the Gemini API
168
+
169
+ Returns:
170
+ openai_response: A ChatCompletion object formatted for
171
+ the OpenAI API.
172
+ """
173
+ import time
174
+ import uuid
175
+
176
+ openai_response = ChatCompletion(
177
+ id=f"chatcmpl-{uuid.uuid4().hex!s}",
178
+ object="chat.completion",
179
+ created=int(time.time()),
180
+ model=self.model_type.value,
181
+ choices=[],
182
+ )
183
+ for i, candidate in enumerate(response.candidates):
184
+ content = ""
185
+ if candidate.content and len(candidate.content.parts) > 0:
186
+ content = candidate.content.parts[0].text
187
+ finish_reason = candidate.finish_reason
188
+ finish_reason_mapping = {
189
+ "FinishReason.STOP": "stop",
190
+ "FinishReason.SAFETY": "content_filter",
191
+ "FinishReason.RECITATION": "content_filter",
192
+ "FinishReason.MAX_TOKENS": "length",
193
+ }
194
+ finish_reason = finish_reason_mapping.get(finish_reason, "stop")
195
+ choice = Choice(
196
+ index=i,
197
+ message=ChatCompletionMessage(
198
+ role="assistant", content=content
199
+ ),
200
+ finish_reason=finish_reason,
201
+ )
202
+ openai_response.choices.append(choice)
203
+ return openai_response
@@ -15,6 +15,7 @@ from typing import Any, Dict, Optional, Union
15
15
 
16
16
  from camel.models.anthropic_model import AnthropicModel
17
17
  from camel.models.base_model import BaseModelBackend
18
+ from camel.models.gemini_model import GeminiModel
18
19
  from camel.models.litellm_model import LiteLLMModel
19
20
  from camel.models.ollama_model import OllamaModel
20
21
  from camel.models.open_source_model import OpenSourceModel
@@ -59,7 +60,6 @@ class ModelFactory:
59
60
  BaseModelBackend: The initialized backend.
60
61
  """
61
62
  model_class: Any
62
-
63
63
  if isinstance(model_type, ModelType):
64
64
  if model_platform.is_open_source and model_type.is_open_source:
65
65
  model_class = OpenSourceModel
@@ -70,6 +70,8 @@ class ModelFactory:
70
70
  model_class = AnthropicModel
71
71
  elif model_platform.is_zhipuai and model_type.is_zhipuai:
72
72
  model_class = ZhipuAIModel
73
+ elif model_platform.is_gemini and model_type.is_gemini:
74
+ model_class = GeminiModel
73
75
  elif model_type == ModelType.STUB:
74
76
  model_class = StubModel
75
77
  else:
@@ -90,5 +92,4 @@ class ModelFactory:
90
92
  )
91
93
  else:
92
94
  raise ValueError(f"Invalid model type `{model_type}` provided.")
93
-
94
95
  return model_class(model_type, model_config_dict, api_key, url)
camel/types/enums.py CHANGED
@@ -58,6 +58,10 @@ class ModelType(Enum):
58
58
  # Nvidia models
59
59
  NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
60
60
 
61
+ # Gemini models
62
+ GEMINI_1_5_FLASH = "gemini-1.5-flash"
63
+ GEMINI_1_5_PRO = "gemini-1.5-pro"
64
+
61
65
  @property
62
66
  def value_for_tiktoken(self) -> str:
63
67
  return (
@@ -126,6 +130,10 @@ class ModelType(Enum):
126
130
  ModelType.NEMOTRON_4_REWARD,
127
131
  }
128
132
 
133
+ @property
134
+ def is_gemini(self) -> bool:
135
+ return self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}
136
+
129
137
  @property
130
138
  def token_limit(self) -> int:
131
139
  r"""Returns the maximum token limit for a given model.
@@ -142,6 +150,10 @@ class ModelType(Enum):
142
150
  return 128000
143
151
  elif self is ModelType.GPT_4O:
144
152
  return 128000
153
+ elif self == ModelType.GEMINI_1_5_FLASH:
154
+ return 1048576
155
+ elif self == ModelType.GEMINI_1_5_PRO:
156
+ return 1048576
145
157
  elif self == ModelType.GLM_4_OPEN_SOURCE:
146
158
  return 8192
147
159
  elif self == ModelType.GLM_3_TURBO:
@@ -331,6 +343,7 @@ class ModelPlatformType(Enum):
331
343
  LITELLM = "litellm"
332
344
  ZHIPU = "zhipuai"
333
345
  DEFAULT = "default"
346
+ GEMINI = "gemini"
334
347
 
335
348
  @property
336
349
  def is_openai(self) -> bool:
@@ -367,6 +380,11 @@ class ModelPlatformType(Enum):
367
380
  r"""Returns whether this platform is opensource."""
368
381
  return self is ModelPlatformType.OPENSOURCE
369
382
 
383
+ @property
384
+ def is_gemini(self) -> bool:
385
+ r"""Returns whether this platform is Gemini."""
386
+ return self is ModelPlatformType.GEMINI
387
+
370
388
 
371
389
  class AudioModelType(Enum):
372
390
  TTS_1 = "tts-1"
camel/utils/__init__.py CHANGED
@@ -32,6 +32,7 @@ from .constants import Constants
32
32
  from .token_counting import (
33
33
  AnthropicTokenCounter,
34
34
  BaseTokenCounter,
35
+ GeminiTokenCounter,
35
36
  LiteLLMTokenCounter,
36
37
  OpenAITokenCounter,
37
38
  OpenSourceTokenCounter,
@@ -60,4 +61,5 @@ __all__ = [
60
61
  'dependencies_required',
61
62
  'api_keys_required',
62
63
  'is_docker_running',
64
+ 'GeminiTokenCounter',
63
65
  ]
@@ -342,6 +342,40 @@ class AnthropicTokenCounter(BaseTokenCounter):
342
342
  return num_tokens
343
343
 
344
344
 
345
+ class GeminiTokenCounter(BaseTokenCounter):
346
+ def __init__(self, model_type: ModelType):
347
+ r"""Constructor for the token counter for Gemini models."""
348
+ import google.generativeai as genai
349
+
350
+ self.model_type = model_type
351
+ self._client = genai.GenerativeModel(self.model_type.value)
352
+
353
+ def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
354
+ r"""Count number of tokens in the provided message list using
355
+ loaded tokenizer specific for this type of model.
356
+
357
+ Args:
358
+ messages (List[OpenAIMessage]): Message list with the chat history
359
+ in OpenAI API format.
360
+
361
+ Returns:
362
+ int: Number of tokens in the messages.
363
+ """
364
+ converted_messages = []
365
+ for message in messages:
366
+ role = message.get('role')
367
+ if role == 'assistant':
368
+ role_to_gemini = 'model'
369
+ else:
370
+ role_to_gemini = 'user'
371
+ converted_message = {
372
+ "role": role_to_gemini,
373
+ "parts": message.get("content"),
374
+ }
375
+ converted_messages.append(converted_message)
376
+ return self._client.count_tokens(converted_messages).total_tokens
377
+
378
+
345
379
  class LiteLLMTokenCounter:
346
380
  def __init__(self, model_type: str):
347
381
  r"""Constructor for the token counter for LiteLLM models.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.5
3
+ Version: 0.1.5.6
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -36,6 +36,7 @@ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
36
36
  Requires-Dist: docstring-parser (>=0.15,<0.16)
37
37
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
38
38
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
39
+ Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
39
40
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
40
41
  Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
41
42
  Requires-Dist: jsonschema (>=4,<5)
@@ -190,7 +191,7 @@ conda create --name camel python=3.9
190
191
  conda activate camel
191
192
 
192
193
  # Clone github repo
193
- git clone -b v0.1.5.5 https://github.com/camel-ai/camel.git
194
+ git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
194
195
 
195
196
  # Change directory into project directory
196
197
  cd camel
@@ -1,20 +1,21 @@
1
- camel/__init__.py,sha256=SpC9pRp8bm5IPRCPalmZqDISaDVcj64s9RpwgxSeg9E,780
1
+ camel/__init__.py,sha256=AHqm3F6XPqSG9VS6iCkImH2Qma_1Sgv2ibfXqciniCQ,780
2
2
  camel/agents/__init__.py,sha256=SSU1wbhZXWwQnE0rRxkpyN57kEu72KklsZNcdLkXfTs,1551
3
3
  camel/agents/base.py,sha256=X39qWSiT1WnDqaJ9k3gQrTpOQSwUKzNEVpp5AY6fDH8,1130
4
4
  camel/agents/chat_agent.py,sha256=yeSSVTnKbRmA7DUFv2waaPz3rMM6z-Gevi0DoYQ8-Uo,27687
5
5
  camel/agents/critic_agent.py,sha256=M3XNxRS0wAs5URjc_0kvtXqUlD-KpXq3L5ADz-KCKGU,7199
6
6
  camel/agents/deductive_reasoner_agent.py,sha256=8R9hY_yCr_guq_ySuIE3eaYbiPeHVrsh6HKqIWrR0zY,13180
7
7
  camel/agents/embodied_agent.py,sha256=Mm2-wvcpduXOvsHMBcavroACyvK06Mxe6QYTf80tdfI,7160
8
- camel/agents/knowledge_graph_agent.py,sha256=h3LdqqX0KPPyq5v9O4WXZTdcyS2Exz5cMGZPei38ZNo,8818
8
+ camel/agents/knowledge_graph_agent.py,sha256=YWJFVml8p6AE4WHxrXK01rMU0idey5SRbyuRYHEWFWQ,8559
9
9
  camel/agents/role_assignment_agent.py,sha256=IWfu5b2RW1gYziffskErhdmybJOusVvhb9gqLF9_5mw,4800
10
10
  camel/agents/search_agent.py,sha256=TMyV2LoBVB0hMnSex6q7xbyLRGsF_EMKxCZ8xbZBX9o,4404
11
- camel/agents/task_agent.py,sha256=aNpn8bYoe2VlVSlWfbV6ynI5zG9pXq6V5NcppqJGVlU,14253
11
+ camel/agents/task_agent.py,sha256=Sn7TckR9p2WyzgFLBN2MfjXaRAzQtW0bgEqpcOqOgzo,14252
12
12
  camel/agents/tool_agents/__init__.py,sha256=ulTNWU2qoFGe3pvVmCq_sdfeSX3NKZ0due66TYvsL-M,862
13
13
  camel/agents/tool_agents/base.py,sha256=nQAhfWi8a_bCgzlf5-G-tmj1fKm6AjpRc89NQkWwpnc,1399
14
14
  camel/agents/tool_agents/hugging_face_tool_agent.py,sha256=1Z5tG6f_86eL0vmtRZ-BJvoLDFFLhoHt8JtDvgat1xU,8723
15
- camel/configs/__init__.py,sha256=WW7R8GtyeXqBdYDNbax6XK6M1BfEByQHdYT1K3kDPMo,1383
15
+ camel/configs/__init__.py,sha256=momCJ2GkGVTwfNKmmBuJQZrz3hhsxkwLACxUT1-kLkE,1500
16
16
  camel/configs/anthropic_config.py,sha256=zD7VMFUw4s7wmBlr64oSXxpEUkhp7wj9mvAd0WK2zFc,3308
17
17
  camel/configs/base_config.py,sha256=CEF8ryl_dkH6LgOhwuP5_EgjaWCUCB-E3GcMWR-2YFE,870
18
+ camel/configs/gemini_config.py,sha256=Der-kDsD2vLTLvsG4ju-BfIxPIFdL1xt-kjYJEhEdhg,4922
18
19
  camel/configs/litellm_config.py,sha256=tfLls2XkKfmejMuPZT-_k2XzMc0GaepDy31XGzPYJ0I,4885
19
20
  camel/configs/ollama_config.py,sha256=npjJMe1lIK_WpnVtSnnKzKE78i2bWb6FPPoPa-nJ3Fo,4396
20
21
  camel/configs/openai_config.py,sha256=tFEiPDQ8Cdvkfds83T7_5osNikwA3NuRGbpjV0wq4Ao,7593
@@ -83,11 +84,12 @@ camel/memories/records.py,sha256=zmZsYHVuq6fYqJDkzhNXF02uWLzdBemaEZeG0Ls90pU,361
83
84
  camel/messages/__init__.py,sha256=djLvpz6AmjeLzuUSQl7J6T2O4x8MwSdcH0l9fbj_3yg,1468
84
85
  camel/messages/base.py,sha256=1cyYITXxBsp2UCdOjF1Ky4W_PgRegEfitqbrF9PjUPs,13721
85
86
  camel/messages/func_message.py,sha256=CCVkbz-2pdxXV0vBETI0xt7d7uiN8zACpRI7lCnfTFQ,3841
86
- camel/models/__init__.py,sha256=RfAHcSSaBUAilObQIU07uICxaujjeO5EwLv4Pg0vrPc,1408
87
+ camel/models/__init__.py,sha256=832nhDetpPm8tg7h9O18rtLQoyANu4VUXF6J6ElDqlU,1465
87
88
  camel/models/anthropic_model.py,sha256=mypGlcn2koBFf3FsgBAqOqWwxvY-c1prINr5nYvjvts,5540
88
89
  camel/models/base_model.py,sha256=TMbS44Fn-6m0OlrxYCtvwKqGUM_4Jz2y6kX-P28nOeI,4030
90
+ camel/models/gemini_model.py,sha256=xawgEhLjUJCRFWbKRM6zojHbrwNBc2elqki85v9PvIA,7291
89
91
  camel/models/litellm_model.py,sha256=3o8ImWSYc_a_3rXA_y5Hh562SzNVyfCmVj9B2pMN78k,4902
90
- camel/models/model_factory.py,sha256=EFc5_M68AVfi34VhPeftaiWfTR9dDRHUxsJ1F81RxhQ,3949
92
+ camel/models/model_factory.py,sha256=ScPN7stphd2J651VPTABMnh9tCSXS-bKMXwB65uDYYg,4107
91
93
  camel/models/nemotron_model.py,sha256=2Idf4wrZervxvfu6av42EKjefFtDnBb6cKnWCJUkqI4,2682
92
94
  camel/models/ollama_model.py,sha256=n6Ra4virpjqEZr3rnXwFVScN4WFj3psksEPGadvyk88,4151
93
95
  camel/models/open_source_model.py,sha256=r8TGq-9xAwOANZ5s_y3fJUGAvS0zDg03RmbZ8X2ga-E,6156
@@ -143,13 +145,13 @@ camel/toolkits/base.py,sha256=znjnZtgxA5gbT7OMnrKQF_a9FK3A7Xk5s_lP94u76vI,923
143
145
  camel/toolkits/code_execution.py,sha256=pwWwZQ5etSghdWUZAg5Wao7l2GC7FYHXiVJfr0tM66E,2426
144
146
  camel/toolkits/github_toolkit.py,sha256=NT6gGqy5kV7AGgiJVHAuhlcZxSkTiSJu6f1sm6n-0PU,11516
145
147
  camel/types/__init__.py,sha256=ArKXATj3z_Vv4ISmROVeo6Mv3tj5kE1dTkqfgwyxVY4,1975
146
- camel/types/enums.py,sha256=77O6gCE5YWgqRIND50QMHopsgQ8Ksu5iqfaY4HbOqRU,11876
148
+ camel/types/enums.py,sha256=ojXbi6vmY3zJm4CslxMh3y9NKIhFpWaPtsGb8G1buwQ,12427
147
149
  camel/types/openai_types.py,sha256=BNQ6iCzKTjSvgcXFsAFIgrUS_YUFZBU6bDoyAp387hI,2045
148
- camel/utils/__init__.py,sha256=hD5gV-vYs_otMnQY0ZOuHoJ4iVkmCh2PgBCkYNgIm0Y,1817
150
+ camel/utils/__init__.py,sha256=m85T6bXzNe_2ObpAlXRMAkud4Qz4886WdLp_SEmJwV8,1867
149
151
  camel/utils/async_func.py,sha256=q8t7vq_Yd_i5i0b2Mce9d7-hrH70WrtQbqa-T4wYwQU,1595
150
152
  camel/utils/commons.py,sha256=J7AOOh5huQkwTvjDt_gpiXNTXnuk0yM_hdtRU8clpNE,11298
151
153
  camel/utils/constants.py,sha256=ZIw5ILfOyJFyjEAYrbJMANeg1_EZI-zMK_xVrkwALbM,1105
152
- camel/utils/token_counting.py,sha256=umFg9UhUpmM9Fu5nbC29UhXuc9iy8b8aKWOYGDEeRcg,15856
153
- camel_ai-0.1.5.5.dist-info/METADATA,sha256=6y14QJuQrAcvFhszMRfROf3yB1ww3bWcFTChl0I43N8,21966
154
- camel_ai-0.1.5.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
155
- camel_ai-0.1.5.5.dist-info/RECORD,,
154
+ camel/utils/token_counting.py,sha256=euqt0g7WXrHYZTOTBDzcPDL5P4gLzaDpdibH9-8cC3g,17096
155
+ camel_ai-0.1.5.6.dist-info/METADATA,sha256=NN6R_0dQiYI5C_e5KC0kdZmEx0PEtGgMQ_lNdMCUwes,22065
156
+ camel_ai-0.1.5.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
157
+ camel_ai-0.1.5.6.dist-info/RECORD,,