camel-ai 0.2.13__py3-none-any.whl → 0.2.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.13'
17
+ __version__ = '0.2.14'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -21,6 +21,7 @@ from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
21
21
  from camel.messages import OpenAIMessage
22
22
  from camel.models import BaseModelBackend
23
23
  from camel.types import (
24
+ NOT_GIVEN,
24
25
  ChatCompletion,
25
26
  ChatCompletionChunk,
26
27
  ModelType,
@@ -103,7 +104,11 @@ class OpenAIModel(BaseModelBackend):
103
104
  """
104
105
  # o1-preview and o1-mini have Beta limitations
105
106
  # reference: https://platform.openai.com/docs/guides/reasoning
106
- if self.model_type in [ModelType.O1_MINI, ModelType.O1_PREVIEW]:
107
+ if self.model_type in [
108
+ ModelType.O1,
109
+ ModelType.O1_MINI,
110
+ ModelType.O1_PREVIEW,
111
+ ]:
107
112
  warnings.warn(
108
113
  "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
109
114
  "which has certain limitations, reference: "
@@ -111,22 +116,21 @@ class OpenAIModel(BaseModelBackend):
111
116
  UserWarning,
112
117
  )
113
118
 
114
- # Remove system message that is not supported in o1 model.
115
- messages = [msg for msg in messages if msg.get("role") != "system"]
116
-
117
119
  # Check and remove unsupported parameters and reset the fixed
118
120
  # parameters
119
- unsupported_keys = ["stream", "tools", "tool_choice"]
121
+ unsupported_keys = [
122
+ "temperature",
123
+ "top_p",
124
+ "presence_penalty",
125
+ "frequency_penalty",
126
+ "logprobs",
127
+ "top_logprobs",
128
+ "logit_bias",
129
+ ]
120
130
  for key in unsupported_keys:
121
131
  if key in self.model_config_dict:
122
132
  del self.model_config_dict[key]
123
133
 
124
- self.model_config_dict["temperature"] = 1.0
125
- self.model_config_dict["top_p"] = 1.0
126
- self.model_config_dict["n"] = 1
127
- self.model_config_dict["presence_penalty"] = 0.0
128
- self.model_config_dict["frequency_penalty"] = 0.0
129
-
130
134
  if self.model_config_dict.get("response_format"):
131
135
  # stream is not supported in beta.chat.completions.parse
132
136
  if "stream" in self.model_config_dict:
@@ -140,6 +144,14 @@ class OpenAIModel(BaseModelBackend):
140
144
 
141
145
  return self._to_chat_completion(response)
142
146
 
147
+ # Removing 'strict': True from the dictionary for
148
+ # client.chat.completions.create
149
+ if self.model_config_dict.get('tools') is not NOT_GIVEN:
150
+ for tool in self.model_config_dict.get('tools', []):
151
+ function_dict = tool.get('function', {})
152
+ if 'strict' in function_dict:
153
+ del function_dict['strict']
154
+
143
155
  response = self._client.chat.completions.create(
144
156
  messages=messages,
145
157
  model=self.model_type,
camel/schemas/__init__.py CHANGED
@@ -13,5 +13,6 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  from .openai_converter import OpenAISchemaConverter
16
+ from .outlines_converter import OutlinesConverter
16
17
 
17
- __all__ = ["OpenAISchemaConverter"]
18
+ __all__ = ["OpenAISchemaConverter", "OutlinesConverter"]
camel/schemas/base.py CHANGED
@@ -15,8 +15,6 @@
15
15
  from abc import ABC, abstractmethod
16
16
  from typing import Any, Dict
17
17
 
18
- from pydantic import BaseModel
19
-
20
18
 
21
19
  class BaseConverter(ABC):
22
20
  r"""A base class for schema outputs that includes functionality
@@ -30,7 +28,7 @@ class BaseConverter(ABC):
30
28
  @abstractmethod
31
29
  def convert(
32
30
  self, content: str, *args: Any, **kwargs: Dict[str, Any]
33
- ) -> BaseModel:
31
+ ) -> Any:
34
32
  r"""Structures the input text into the expected response format.
35
33
 
36
34
  Args:
@@ -40,6 +38,6 @@ class BaseConverter(ABC):
40
38
  prompt (Optional[str], optional): The prompt to be used.
41
39
 
42
40
  Returns:
43
- Optional[BaseModel]: The structured response.
41
+ Any: The converted response.
44
42
  """
45
43
  pass
@@ -0,0 +1,249 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ from typing import Any, Callable, List, Literal, Type, Union
16
+
17
+ from pydantic import BaseModel
18
+
19
+ from .base import BaseConverter
20
+
21
+
22
+ class OutlinesConverter(BaseConverter):
23
+ r"""OutlinesConverter is a class that converts a string or a function
24
+ into a BaseModel schema.
25
+
26
+ Args:
27
+ model_type (str, optional): The model type to be used.
28
+ platform (str, optional): The platform to be used.
29
+ 1. transformers
30
+ 2. mamba
31
+ 3. vllm
32
+ 4. llamacpp
33
+ 5. mlx
34
+ (default: "transformers")
35
+ **kwargs: The keyword arguments to be used. See the outlines
36
+ documentation for more details. See
37
+ https://dottxt-ai.github.io/outlines/latest/reference/models/models/
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ model_type: str,
43
+ platform: Literal[
44
+ "vllm", "transformers", "mamba", "llamacpp", "mlx"
45
+ ] = "transformers",
46
+ **kwargs: Any,
47
+ ):
48
+ self.model_type = model_type
49
+ from outlines import models
50
+
51
+ match platform:
52
+ case "vllm":
53
+ self._outlines_model = models.vllm(model_type, **kwargs)
54
+ case "transformers":
55
+ self._outlines_model = models.transformers(
56
+ model_type, **kwargs
57
+ )
58
+ case "mamba":
59
+ self._outlines_model = models.mamba(model_type, **kwargs)
60
+ case "llamacpp":
61
+ self._outlines_model = models.llamacpp(model_type, **kwargs)
62
+ case "mlx":
63
+ self._outlines_model = models.mlxlm(model_type, **kwargs)
64
+ case _:
65
+ raise ValueError(f"Unsupported platform: {platform}")
66
+
67
+ def convert_regex(self, content: str, regex_pattern: str) -> str:
68
+ r"""Convert the content to the specified regex pattern.
69
+
70
+ Args:
71
+ content (str): The content to be converted.
72
+ regex_pattern (str): The regex pattern to be used.
73
+
74
+ Returns:
75
+ str: The converted content.
76
+ """
77
+ import outlines
78
+
79
+ regex_generator = outlines.generate.regex(
80
+ self._outlines_model, regex_pattern
81
+ )
82
+ return regex_generator(content)
83
+
84
+ def convert_json(
85
+ self,
86
+ content: str,
87
+ output_schema: Union[str, Callable],
88
+ ) -> dict:
89
+ r"""Convert the content to the specified JSON schema given by
90
+ output_schema.
91
+
92
+ Args:
93
+ content (str): The content to be converted.
94
+ output_schema (Union[str, Callable]): The expected format of the
95
+ response.
96
+
97
+ Returns:
98
+ dict: The converted content in JSON format.
99
+ """
100
+ import outlines
101
+
102
+ json_generator = outlines.generate.json(
103
+ self._outlines_model, output_schema
104
+ )
105
+ return json_generator(content)
106
+
107
+ def convert_pydantic(
108
+ self,
109
+ content: str,
110
+ output_schema: Type[BaseModel],
111
+ ) -> BaseModel:
112
+ r"""Convert the content to the specified Pydantic schema.
113
+
114
+ Args:
115
+ content (str): The content to be converted.
116
+ output_schema (Type[BaseModel]): The expected format of the
117
+ response.
118
+
119
+ Returns:
120
+ BaseModel: The converted content in pydantic model format.
121
+ """
122
+ import outlines
123
+
124
+ json_generator = outlines.generate.json(
125
+ self._outlines_model, output_schema
126
+ )
127
+ return json_generator(content)
128
+
129
+ def convert_type(self, content: str, type_name: type) -> str:
130
+ r"""Convert the content to the specified type.
131
+
132
+ The following types are currently available:
133
+ 1. int
134
+ 2. float
135
+ 3. bool
136
+ 4. datetime.date
137
+ 5. datetime.time
138
+ 6. datetime.datetime
139
+ 7. custom types (https://dottxt-ai.github.io/outlines/latest/reference/generation/types/)
140
+
141
+ Args:
142
+ content (str): The content to be converted.
143
+ type_name (type): The type to be used.
144
+
145
+ Returns:
146
+ str: The converted content.
147
+ """
148
+ import outlines
149
+
150
+ type_generator = outlines.generate.format(
151
+ self._outlines_model, type_name
152
+ )
153
+ return type_generator(content)
154
+
155
+ def convert_choice(self, content: str, choices: List[str]) -> str:
156
+ r"""Convert the content to the specified choice.
157
+
158
+ Args:
159
+ content (str): The content to be converted.
160
+ choices (List[str]): The choices to be used.
161
+
162
+ Returns:
163
+ str: The converted content.
164
+ """
165
+ import outlines
166
+
167
+ choices_generator = outlines.generate.choice(
168
+ self._outlines_model, choices
169
+ )
170
+ return choices_generator(content)
171
+
172
+ def convert_grammar(self, content: str, grammar: str) -> str:
173
+ r"""Convert the content to the specified grammar.
174
+
175
+ Args:
176
+ content (str): The content to be converted.
177
+ grammar (str): The grammar to be used.
178
+
179
+ Returns:
180
+ str: The converted content.
181
+ """
182
+ import outlines
183
+
184
+ grammar_generator = outlines.generate.cfg(
185
+ self._outlines_model, grammar
186
+ )
187
+ return grammar_generator(content)
188
+
189
+ def convert( # type: ignore[override]
190
+ self,
191
+ content: str,
192
+ type: Literal["regex", "json", "type", "choice", "grammar"],
193
+ **kwargs,
194
+ ) -> Any:
195
+ r"""Formats the input content into the expected BaseModel.
196
+
197
+ Args:
198
+ type (Literal["regex", "json", "type", "choice", "grammar"]):
199
+ The type of conversion to perform. Options are:
200
+ - "regex": Match the content against a regex pattern.
201
+ - "pydantic": Convert the content into a pydantic model.
202
+ - "json": Convert the content into a JSON based on a
203
+ schema.
204
+ - "type": Convert the content into a specified type.
205
+ - "choice": Match the content against a list of valid
206
+ choices.
207
+ - "grammar": Convert the content using a specified grammar.
208
+ content (str): The content to be formatted.
209
+ **kwargs: Additional keyword arguments specific to the conversion
210
+ type.
211
+
212
+ - For "regex":
213
+ regex_pattern (str): The regex pattern to use for matching.
214
+
215
+ - For "pydantic":
216
+ output_schema (Type[BaseModel]): The schema to validate and
217
+ format the pydantic model.
218
+
219
+ - For "json":
220
+ output_schema (Union[str, Callable]): The schema to validate
221
+ and format the JSON object.
222
+
223
+ - For "type":
224
+ type_name (str): The target type name for the conversion.
225
+
226
+ - For "choice":
227
+ choices (List[str]): A list of valid choices to match against.
228
+
229
+ - For "grammar":
230
+ grammar (str): The grammar definition to use for content
231
+ conversion.
232
+ """
233
+ match type:
234
+ case "regex":
235
+ return self.convert_regex(content, kwargs.get("regex_pattern")) # type: ignore[arg-type]
236
+ case "pydantic":
237
+ return self.convert_pydantic(
238
+ content, kwargs.get("output_schema")
239
+ ) # type: ignore[arg-type]
240
+ case "json":
241
+ return self.convert_json(content, kwargs.get("output_schema")) # type: ignore[arg-type]
242
+ case "type":
243
+ return self.convert_type(content, kwargs.get("type_name")) # type: ignore[arg-type]
244
+ case "choice":
245
+ return self.convert_choice(content, kwargs.get("choices")) # type: ignore[arg-type]
246
+ case "grammar":
247
+ return self.convert_grammar(content, kwargs.get("grammar")) # type: ignore[arg-type]
248
+ case _:
249
+ raise ValueError("Unsupported output schema type")
camel/types/enums.py CHANGED
@@ -34,6 +34,7 @@ class ModelType(UnifiedModelType, Enum):
34
34
  GPT_4_TURBO = "gpt-4-turbo"
35
35
  GPT_4O = "gpt-4o"
36
36
  GPT_4O_MINI = "gpt-4o-mini"
37
+ O1 = "o1"
37
38
  O1_PREVIEW = "o1-preview"
38
39
  O1_MINI = "o1-mini"
39
40
 
@@ -166,6 +167,7 @@ class ModelType(UnifiedModelType, Enum):
166
167
  ModelType.GPT_4_TURBO,
167
168
  ModelType.GPT_4O,
168
169
  ModelType.GPT_4O_MINI,
170
+ ModelType.O1,
169
171
  ModelType.O1_PREVIEW,
170
172
  ModelType.O1_MINI,
171
173
  }
@@ -452,6 +454,7 @@ class ModelType(UnifiedModelType, Enum):
452
454
  }:
453
455
  return 131_072
454
456
  elif self in {
457
+ ModelType.O1,
455
458
  ModelType.CLAUDE_2_1,
456
459
  ModelType.CLAUDE_3_OPUS,
457
460
  ModelType.CLAUDE_3_SONNET,
camel/utils/commons.py CHANGED
@@ -12,7 +12,6 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import importlib
15
- import logging
16
15
  import os
17
16
  import platform
18
17
  import re
@@ -40,19 +39,14 @@ import pydantic
40
39
  import requests
41
40
  from pydantic import BaseModel
42
41
 
43
- from camel.logger import get_logger
44
42
  from camel.types import TaskType
45
43
 
46
44
  from .constants import Constants
47
45
 
48
46
  F = TypeVar('F', bound=Callable[..., Any])
49
47
 
50
- logger = get_logger(__name__)
51
48
 
52
-
53
- def print_text_animated(
54
- text, delay: float = 0.02, end: str = "", log_level: int = logging.INFO
55
- ):
49
+ def print_text_animated(text, delay: float = 0.02, end: str = ""):
56
50
  r"""Prints the given text with an animated effect.
57
51
 
58
52
  Args:
@@ -61,22 +55,10 @@ def print_text_animated(
61
55
  (default: :obj:`0.02`)
62
56
  end (str, optional): The end character to print after each
63
57
  character of text. (default: :obj:`""`)
64
- log_level (int, optional): The log level to use.
65
- See https://docs.python.org/3/library/logging.html#levels
66
- (default: :obj:`logging.INFO`)
67
58
  """
68
- if logger.isEnabledFor(log_level):
69
- # timestamp and other prefixes
70
- logger.log(log_level, '')
71
-
72
- for char in text:
73
- print(char, end=end, flush=True)
74
- time.sleep(delay)
75
- # Close the log entry
76
- logger.log(log_level, '')
77
- else:
78
- # This may be relevant for logging frameworks
79
- logger.log(log_level, text)
59
+ for char in text:
60
+ print(char, end=end, flush=True)
61
+ time.sleep(delay)
80
62
 
81
63
 
82
64
  def get_prompt_template_key_words(template: str) -> Set[str]:
@@ -63,6 +63,7 @@ def get_model_encoding(value_for_tiktoken: str):
63
63
  encoding = tiktoken.encoding_for_model(value_for_tiktoken)
64
64
  except KeyError:
65
65
  if value_for_tiktoken in [
66
+ ModelType.O1.value,
66
67
  ModelType.O1_MINI.value,
67
68
  ModelType.O1_PREVIEW.value,
68
69
  ]:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.2.13
3
+ Version: 0.2.14
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -68,12 +68,12 @@ Requires-Dist: mock (>=5,<6) ; extra == "test"
68
68
  Requires-Dist: nebula3-python (==3.8.2) ; extra == "rag" or extra == "graph-storages" or extra == "all"
69
69
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "rag" or extra == "graph-storages" or extra == "all"
70
70
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
71
- Requires-Dist: nltk (==3.9.1) ; extra == "tools" or extra == "all"
72
71
  Requires-Dist: notion-client (>=2.2.1,<3.0.0) ; extra == "tools" or extra == "all"
73
72
  Requires-Dist: numpy (>=1,<2)
74
- Requires-Dist: openai (>=1.45.0,<2.0.0)
73
+ Requires-Dist: openai (>=1.58.1,<2.0.0)
75
74
  Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
76
75
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
76
+ Requires-Dist: outlines (>=0.1.7,<0.2.0) ; extra == "tools" or extra == "all"
77
77
  Requires-Dist: pandoc
78
78
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
79
79
  Requires-Dist: pdfplumber (>=0.11.0,<0.12.0) ; extra == "tools" or extra == "all"
@@ -109,7 +109,7 @@ Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
109
109
  Requires-Dist: torch (==2.2.1) ; (platform_system == "Darwin" and platform_machine != "arm64") and (extra == "huggingface-agent" or extra == "all")
110
110
  Requires-Dist: torch (>=2,<3) ; (platform_system != "Darwin" or platform_machine == "arm64") and (extra == "huggingface-agent" or extra == "all")
111
111
  Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
112
- Requires-Dist: unstructured[all-docs] (==0.16.11) ; extra == "rag" or extra == "all"
112
+ Requires-Dist: unstructured[all-docs] (==0.16.11) ; extra == "rag" or extra == "tools" or extra == "all" or extra == "all"
113
113
  Requires-Dist: wikipedia (>=1,<2) ; extra == "search-tools" or extra == "tools" or extra == "all"
114
114
  Requires-Dist: wolframalpha (>=5.0.0,<6.0.0) ; extra == "search-tools" or extra == "tools" or extra == "all"
115
115
  Requires-Dist: yt-dlp (>=2024.11.4,<2025.0.0) ; extra == "tools" or extra == "all"
@@ -263,7 +263,7 @@ conda create --name camel python=3.10
263
263
  conda activate camel
264
264
 
265
265
  # Clone github repo
266
- git clone -b v0.2.13 https://github.com/camel-ai/camel.git
266
+ git clone -b v0.2.14 https://github.com/camel-ai/camel.git
267
267
 
268
268
  # Change directory into project directory
269
269
  cd camel
@@ -1,4 +1,4 @@
1
- camel/__init__.py,sha256=pCIReTm4dam962sCmtLU92cJ8uPmiYeGGRtPGPw7kio,912
1
+ camel/__init__.py,sha256=CFY0R6Dg37pCUN8WDaG7Dabv2IlGKd3iHbWlb1QjJ_c,912
2
2
  camel/agents/__init__.py,sha256=LcS4m8s97-yADfznvcaAdUe9W0E9h3m6zrSc9H6m9so,1545
3
3
  camel/agents/base.py,sha256=c4bJYL3G3Z41SaFdMPMn8ZjLdFiFaVOFO6EQIfuCVR8,1124
4
4
  camel/agents/chat_agent.py,sha256=-czR5ppfBLMeYp-tidH9oeLdmjXQh14U13TsJY7qHAo,51469
@@ -111,7 +111,7 @@ camel/models/nvidia_model.py,sha256=fVU0IgXl2iZ4x2uObageXpt_Olw4xE6pYEOLI6ut0zU,
111
111
  camel/models/ollama_model.py,sha256=uiIgXmz6EqRsi3mBh8RAWopOom6rM77H4fP_Hp8cj3U,6057
112
112
  camel/models/openai_audio_models.py,sha256=61tGMxwOwXwh9RThXcQmkTCiGPEhau85_BM_nxgIKmY,10036
113
113
  camel/models/openai_compatible_model.py,sha256=NnDSj7e-SDYDGq0VTQVHCE0kRTJ2JDb62Z8z0ZcpygA,4059
114
- camel/models/openai_model.py,sha256=FkQawaaZ4hWh_Hdm0irOr9K5vIKT5VCnflI3EAPWzZc,6565
114
+ camel/models/openai_model.py,sha256=z6x9z_ARZPyWvrYZNzhjVyQvsV4vAG5IbAZhpOfgTXw,6796
115
115
  camel/models/qwen_model.py,sha256=dK--AZe0tWjgdJIp9ssVd63NWQeBe7qAn8QQhn711Zk,4992
116
116
  camel/models/reka_model.py,sha256=lwg27dzrEOlia9-apX5Km5sHShL6Fl55C4EOr8PXjDQ,8277
117
117
  camel/models/reward/__init__.py,sha256=8tvL9Qo9YAqllhcXB5yfH9dAwuvlXKNpKL1WIM_lJ9s,912
@@ -163,9 +163,10 @@ camel/runtime/remote_http_runtime.py,sha256=G-uYajki-QTjeUdpCoPVDjvI-Rvc69aYvG3S
163
163
  camel/runtime/utils/__init__.py,sha256=_4kT7j4gW9t5Zd_AbFa2mcHe8rpLB0tVlgAhQHxfwQQ,875
164
164
  camel/runtime/utils/function_risk_toolkit.py,sha256=0A9IN61JSziwKYB0AtCTZByXV9A3zWDRcNATG2V2dmA,2356
165
165
  camel/runtime/utils/ignore_risk_toolkit.py,sha256=kvLyF7EWEjXhlQJsmvA3JDB7An_LjtFjCaanNxOVaBE,2695
166
- camel/schemas/__init__.py,sha256=FIispqu0jMi7DPFg8iGsc3NdbC0daxSbWjybd72F6cA,792
167
- camel/schemas/base.py,sha256=S5fDiqJmNzTOhxN3Wl93gAcZnu3ke71fhZod5lq4mPI,1660
166
+ camel/schemas/__init__.py,sha256=UHt0krcozkPQFqD00q2Vk6hLbwV0ySrgaq17MJEZK1c,863
167
+ camel/schemas/base.py,sha256=x0H0oIwbQR6UGdEvR5v-srI25MJ8uTrEw8nnygvLwjw,1604
168
168
  camel/schemas/openai_converter.py,sha256=heyQesFAAjo4RF4R5BB1SPpF4mbNt9UhTvszxyJYDTM,4310
169
+ camel/schemas/outlines_converter.py,sha256=OYKPR1fNyrYs9eh5RiXEAccMbnRc9WTwSVJYbh9HkKE,8738
169
170
  camel/societies/__init__.py,sha256=NOHjtlsY-gV9UCF2xXgcbG-xXyuigmbwbpLpNsDgEJ4,826
170
171
  camel/societies/babyagi_playing.py,sha256=KbTdpHfZ2V8AripVck0bNTOyF-RSaMPCRARz3DvzWfQ,11855
171
172
  camel/societies/role_playing.py,sha256=I7xCYauJwGex1Fj9p1S6UxHI25lVshyidAeY8_6gE2o,23809
@@ -257,16 +258,16 @@ camel/toolkits/video_toolkit.py,sha256=n1P7F_cjdnC2jfUQQiJnhueRYA83GIjUF7HWIrES5
257
258
  camel/toolkits/weather_toolkit.py,sha256=qHAMD56zqd5GWnEWiaA_0aBDwvgacdx0pAHScinY4GY,6965
258
259
  camel/toolkits/whatsapp_toolkit.py,sha256=H_83AFCIoBMvZUcfUvfRTIAjfR2DR79xP2J-rfQKtNo,6326
259
260
  camel/types/__init__.py,sha256=NkjLAPjxo_BReMzRrnv7hfEIQX42F9GHoBK1T_J-WxM,2187
260
- camel/types/enums.py,sha256=QEIh5XjMFf487AGKLonu2nBykirFkXf8L1_LZ30mCfo,23497
261
+ camel/types/enums.py,sha256=YdxHWflANL2UkmKOsyY2sJZXmj7SfK3w6WxRRTF0fiI,23563
261
262
  camel/types/openai_types.py,sha256=7tfrPHScqyerPwIP7dPmKss87lVkvsb175cT-jLJWyg,2222
262
263
  camel/types/unified_model_type.py,sha256=ocNxJM98xdCDeqCt1F7Fcvd4Hw1ZxMgsMRZqetGWndo,3819
263
264
  camel/utils/__init__.py,sha256=PS-QW4kT3DefTtjUEkHvBkxMK06ar_uACbkszQWWPTM,2313
264
265
  camel/utils/async_func.py,sha256=4esRhhGrvfm-iJRloUbU-sYWyHp_mt0bBBXpwyCv6vc,1556
265
- camel/utils/commons.py,sha256=8TB55DseOepZtwlWqHPkqCivQKNecCOfzFKnqpD2hg4,18101
266
+ camel/utils/commons.py,sha256=fcAP_BgSkyssmQUjVaPjfo9zYaqPnvlUJuVApJM2htw,17523
266
267
  camel/utils/constants.py,sha256=MQD3bgLIq_NATp0D1iFkrwfkCwVX-PAOSXheTkkEdkY,1410
267
268
  camel/utils/response_format.py,sha256=9KrbwtOM9cA3LSjTgLiK7oKy-53_uMh1cvpyNwwJpng,2419
268
- camel/utils/token_counting.py,sha256=df3mcY9IJgG1zTTmQbdRc6FhB5cD3n07BoXngNpu8Sw,15305
269
- camel_ai-0.2.13.dist-info/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
270
- camel_ai-0.2.13.dist-info/METADATA,sha256=Reihb0rQP8mdbX_EvEB7lhN56BJfkTZADp6yxrJxnTs,32269
271
- camel_ai-0.2.13.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
272
- camel_ai-0.2.13.dist-info/RECORD,,
269
+ camel/utils/token_counting.py,sha256=cS8sS3lVjn79lzHi5N8z2j7a9vtw2DZy4PwzHcQIv_Q,15337
270
+ camel_ai-0.2.14.dist-info/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
271
+ camel_ai-0.2.14.dist-info/METADATA,sha256=9N74TIEATILUa0_u2I3WTmH1xMq7h6xvpqtxKPRJtdE,32318
272
+ camel_ai-0.2.14.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
273
+ camel_ai-0.2.14.dist-info/RECORD,,