camel-ai 0.1.8__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.8'
15
+ __version__ = '0.2.0'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -181,6 +181,16 @@ class ChatAgent(BaseAgent):
181
181
  tool.get_function_name(): tool.func for tool in all_tools
182
182
  }
183
183
 
184
+ # If the user hasn't configured tools in `BaseModelBackend`,
185
+ # the tools set from `ChatAgent` will be used.
186
+ # This design simplifies the interface while retaining tool-running
187
+ # capabilities for `BaseModelBackend`.
188
+ if all_tools and not self.model_backend.model_config_dict['tools']:
189
+ tool_schema_list = [
190
+ tool.get_openai_tool_schema() for tool in all_tools
191
+ ]
192
+ self.model_backend.model_config_dict['tools'] = tool_schema_list
193
+
184
194
  self.model_config_dict = self.model_backend.model_config_dict
185
195
 
186
196
  self.model_token_limit = token_limit or self.model_backend.token_limit
camel/configs/__init__.py CHANGED
@@ -21,8 +21,10 @@ from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
21
21
  from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
22
22
  from .reka_config import REKA_API_PARAMS, RekaConfig
23
23
  from .samba_config import (
24
+ SAMBA_CLOUD_API_PARAMS,
24
25
  SAMBA_FAST_API_PARAMS,
25
26
  SAMBA_VERSE_API_PARAMS,
27
+ SambaCloudAPIConfig,
26
28
  SambaFastAPIConfig,
27
29
  SambaVerseAPIConfig,
28
30
  )
@@ -57,6 +59,8 @@ __all__ = [
57
59
  'SAMBA_FAST_API_PARAMS',
58
60
  'SambaVerseAPIConfig',
59
61
  'SAMBA_VERSE_API_PARAMS',
62
+ 'SambaCloudAPIConfig',
63
+ 'SAMBA_CLOUD_API_PARAMS',
60
64
  'TogetherAIConfig',
61
65
  'TOGETHERAI_API_PARAMS',
62
66
  ]
@@ -13,7 +13,10 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from typing import Any, Dict, Optional, Union
16
+ from typing import Any, Dict, Optional, Sequence, Union
17
+
18
+ from openai._types import NOT_GIVEN, NotGiven
19
+ from pydantic import Field
17
20
 
18
21
  from camel.configs.base_config import BaseConfig
19
22
 
@@ -107,3 +110,98 @@ class SambaVerseAPIConfig(BaseConfig):
107
110
  SAMBA_VERSE_API_PARAMS = {
108
111
  param for param in SambaVerseAPIConfig().model_fields.keys()
109
112
  }
113
+
114
+
115
+ class SambaCloudAPIConfig(BaseConfig):
116
+ r"""Defines the parameters for generating chat completions using the
117
+ OpenAI API.
118
+
119
+ Args:
120
+ temperature (float, optional): Sampling temperature to use, between
121
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
122
+ while lower values make it more focused and deterministic.
123
+ (default: :obj:`0.2`)
124
+ top_p (float, optional): An alternative to sampling with temperature,
125
+ called nucleus sampling, where the model considers the results of
126
+ the tokens with top_p probability mass. So :obj:`0.1` means only
127
+ the tokens comprising the top 10% probability mass are considered.
128
+ (default: :obj:`1.0`)
129
+ n (int, optional): How many chat completion choices to generate for
130
+ each input message. (default: :obj:`1`)
131
+ response_format (object, optional): An object specifying the format
132
+ that the model must output. Compatible with GPT-4 Turbo and all
133
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
134
+ {"type": "json_object"} enables JSON mode, which guarantees the
135
+ message the model generates is valid JSON. Important: when using
136
+ JSON mode, you must also instruct the model to produce JSON
137
+ yourself via a system or user message. Without this, the model
138
+ may generate an unending stream of whitespace until the generation
139
+ reaches the token limit, resulting in a long-running and seemingly
140
+ "stuck" request. Also note that the message content may be
141
+ partially cut off if finish_reason="length", which indicates the
142
+ generation exceeded max_tokens or the conversation exceeded the
143
+ max context length.
144
+ stream (bool, optional): If True, partial message deltas will be sent
145
+ as data-only server-sent events as they become available.
146
+ (default: :obj:`False`)
147
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
148
+ will stop generating further tokens. (default: :obj:`None`)
149
+ max_tokens (int, optional): The maximum number of tokens to generate
150
+ in the chat completion. The total length of input tokens and
151
+ generated tokens is limited by the model's context length.
152
+ (default: :obj:`None`)
153
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
154
+ :obj:`2.0`. Positive values penalize new tokens based on whether
155
+ they appear in the text so far, increasing the model's likelihood
156
+ to talk about new topics. See more information about frequency and
157
+ presence penalties. (default: :obj:`0.0`)
158
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
159
+ :obj:`2.0`. Positive values penalize new tokens based on their
160
+ existing frequency in the text so far, decreasing the model's
161
+ likelihood to repeat the same line verbatim. See more information
162
+ about frequency and presence penalties. (default: :obj:`0.0`)
163
+ logit_bias (dict, optional): Modify the likelihood of specified tokens
164
+ appearing in the completion. Accepts a json object that maps tokens
165
+ (specified by their token ID in the tokenizer) to an associated
166
+ bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
167
+ is added to the logits generated by the model prior to sampling.
168
+ The exact effect will vary per model, but values between:obj:` -1`
169
+ and :obj:`1` should decrease or increase likelihood of selection;
170
+ values like :obj:`-100` or :obj:`100` should result in a ban or
171
+ exclusive selection of the relevant token. (default: :obj:`{}`)
172
+ user (str, optional): A unique identifier representing your end-user,
173
+ which can help OpenAI to monitor and detect abuse.
174
+ (default: :obj:`""`)
175
+ tools (list[OpenAIFunction], optional): A list of tools the model may
176
+ call. Currently, only functions are supported as a tool. Use this
177
+ to provide a list of functions the model may generate JSON inputs
178
+ for. A max of 128 functions are supported.
179
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
180
+ any) tool is called by the model. :obj:`"none"` means the model
181
+ will not call any tool and instead generates a message.
182
+ :obj:`"auto"` means the model can pick between generating a
183
+ message or calling one or more tools. :obj:`"required"` means the
184
+ model must call one or more tools. Specifying a particular tool
185
+ via {"type": "function", "function": {"name": "my_function"}}
186
+ forces the model to call that tool. :obj:`"none"` is the default
187
+ when no tools are present. :obj:`"auto"` is the default if tools
188
+ are present.
189
+ """
190
+
191
+ temperature: float = 0.2 # openai default: 1.0
192
+ top_p: float = 1.0
193
+ n: int = 1
194
+ stream: bool = False
195
+ stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
196
+ max_tokens: Union[int, NotGiven] = NOT_GIVEN
197
+ presence_penalty: float = 0.0
198
+ response_format: Union[dict, NotGiven] = NOT_GIVEN
199
+ frequency_penalty: float = 0.0
200
+ logit_bias: dict = Field(default_factory=dict)
201
+ user: str = ""
202
+ tool_choice: Optional[Union[dict[str, str], str]] = None
203
+
204
+
205
+ SAMBA_CLOUD_API_PARAMS = {
206
+ param for param in SambaCloudAPIConfig().model_fields.keys()
207
+ }
@@ -76,10 +76,10 @@ class OllamaModel:
76
76
  )
77
77
  print(
78
78
  f"Ollama server started on http://localhost:11434/v1 "
79
- f"for {self.model_type} model"
79
+ f"for {self.model_type} model."
80
80
  )
81
81
  except Exception as e:
82
- print(f"Failed to start Ollama server: {e}")
82
+ print(f"Failed to start Ollama server: {e}.")
83
83
 
84
84
  @property
85
85
  def token_counter(self) -> BaseTokenCounter:
@@ -93,6 +93,22 @@ class OpenAIModel(BaseModelBackend):
93
93
  `ChatCompletion` in the non-stream mode, or
94
94
  `Stream[ChatCompletionChunk]` in the stream mode.
95
95
  """
96
+ # o1-preview and o1-mini have Beta limitations
97
+ # reference: https://platform.openai.com/docs/guides/reasoning
98
+ if self.model_type in [ModelType.O1_MINI, ModelType.O1_PREVIEW]:
99
+ # Remove system message that is not supported in o1 model.
100
+ messages = [msg for msg in messages if msg.get("role") != "system"]
101
+
102
+ # Remove unsupported parameters and reset the fixed parameters
103
+ del self.model_config_dict["stream"]
104
+ del self.model_config_dict["tools"]
105
+ del self.model_config_dict["tool_choice"]
106
+ self.model_config_dict["temperature"] = 1.0
107
+ self.model_config_dict["top_p"] = 1.0
108
+ self.model_config_dict["n"] = 1.0
109
+ self.model_config_dict["presence_penalty"] = 0.0
110
+ self.model_config_dict["frequency_penalty"] = 0.0
111
+
96
112
  response = self._client.chat.completions.create(
97
113
  messages=messages,
98
114
  model=self.model_type.value,
@@ -20,7 +20,11 @@ from typing import Any, Dict, List, Optional, Union
20
20
  import httpx
21
21
  from openai import OpenAI, Stream
22
22
 
23
- from camel.configs import SAMBA_FAST_API_PARAMS, SAMBA_VERSE_API_PARAMS
23
+ from camel.configs import (
24
+ SAMBA_CLOUD_API_PARAMS,
25
+ SAMBA_FAST_API_PARAMS,
26
+ SAMBA_VERSE_API_PARAMS,
27
+ )
24
28
  from camel.messages import OpenAIMessage
25
29
  from camel.types import (
26
30
  ChatCompletion,
@@ -59,9 +63,10 @@ class SambaModel:
59
63
  SambaNova service. (default: :obj:`None`)
60
64
  url (Optional[str]): The url to the SambaNova service. Current
61
65
  support SambaNova Fast API: :obj:`"https://fast-api.snova.ai/
62
- v1/chat/ completions"` and SambaVerse API: :obj:`"https://
63
- sambaverse.sambanova.ai/api/predict"`. (default::obj:`"https://
64
- fast-api.snova.ai/v1/chat/completions"`)
66
+ v1/chat/ completions"`, SambaVerse API: :obj:`"https://
67
+ sambaverse.sambanova.ai/api/predict"` and SambaNova Cloud:
68
+ :obj:`"https://api.sambanova.ai/v1"`
69
+ (default::obj:`"https://fast-api.snova.ai/v1/chat/completions"`)
65
70
  token_counter (Optional[BaseTokenCounter]): Token counter to use
66
71
  for the model. If not provided, `OpenAITokenCounter(ModelType.
67
72
  GPT_4O_MINI)` will be used.
@@ -76,6 +81,14 @@ class SambaModel:
76
81
  self.model_config_dict = model_config_dict
77
82
  self.check_model_config()
78
83
 
84
+ if self._url == "https://api.sambanova.ai/v1":
85
+ self._client = OpenAI(
86
+ timeout=60,
87
+ max_retries=3,
88
+ base_url=self._url,
89
+ api_key=self._api_key,
90
+ )
91
+
79
92
  @property
80
93
  def token_counter(self) -> BaseTokenCounter:
81
94
  r"""Initialize the token counter for the model backend.
@@ -111,6 +124,14 @@ class SambaModel:
111
124
  "input into SambaVerse API."
112
125
  )
113
126
 
127
+ elif self._url == "https://api.sambanova.ai/v1":
128
+ for param in self.model_config_dict:
129
+ if param not in SAMBA_CLOUD_API_PARAMS:
130
+ raise ValueError(
131
+ f"Unexpected argument `{param}` is "
132
+ "input into SambaCloud API."
133
+ )
134
+
114
135
  else:
115
136
  raise ValueError(
116
137
  f"{self._url} is not supported, please check the url to the"
@@ -141,7 +162,7 @@ class SambaModel:
141
162
  def _run_streaming( # type: ignore[misc]
142
163
  self, messages: List[OpenAIMessage]
143
164
  ) -> Stream[ChatCompletionChunk]:
144
- r"""Handles streaming inference with SambaNova FastAPI.
165
+ r"""Handles streaming inference with SambaNova's API.
145
166
 
146
167
  Args:
147
168
  messages (List[OpenAIMessage]): A list of messages representing the
@@ -189,6 +210,15 @@ class SambaModel:
189
210
  except httpx.HTTPError as e:
190
211
  raise RuntimeError(f"HTTP request failed: {e!s}")
191
212
 
213
+ # Handle SambaNova's Cloud API
214
+ elif self._url == "https://api.sambanova.ai/v1":
215
+ response = self._client.chat.completions.create(
216
+ messages=messages,
217
+ model=self.model_type,
218
+ **self.model_config_dict,
219
+ )
220
+ return response
221
+
192
222
  elif self._url == "https://sambaverse.sambanova.ai/api/predict":
193
223
  raise ValueError(
194
224
  "https://sambaverse.sambanova.ai/api/predict doesn't support"
@@ -198,7 +228,7 @@ class SambaModel:
198
228
  def _run_non_streaming(
199
229
  self, messages: List[OpenAIMessage]
200
230
  ) -> ChatCompletion:
201
- r"""Handles non-streaming inference with SambaNova FastAPI.
231
+ r"""Handles non-streaming inference with SambaNova's API.
202
232
 
203
233
  Args:
204
234
  messages (List[OpenAIMessage]): A list of messages representing the
@@ -251,6 +281,15 @@ class SambaModel:
251
281
  except json.JSONDecodeError as e:
252
282
  raise ValueError(f"Failed to decode JSON response: {e!s}")
253
283
 
284
+ # Handle SambaNova's Cloud API
285
+ elif self._url == "https://api.sambanova.ai/v1":
286
+ response = self._client.chat.completions.create(
287
+ messages=messages,
288
+ model=self.model_type,
289
+ **self.model_config_dict,
290
+ )
291
+ return response
292
+
254
293
  # Handle SambaNova's Sambaverse API
255
294
  else:
256
295
  headers = {
@@ -80,10 +80,10 @@ class VLLMModel:
80
80
  )
81
81
  print(
82
82
  f"vllm server started on http://localhost:8000/v1 "
83
- f"for {self.model_type} model"
83
+ f"for {self.model_type} model."
84
84
  )
85
85
  except Exception as e:
86
- print(f"Failed to start vllm server: {e}")
86
+ print(f"Failed to start vllm server: {e}.")
87
87
 
88
88
  @property
89
89
  def token_counter(self) -> BaseTokenCounter:
@@ -19,19 +19,18 @@ from .openai_function import (
19
19
  )
20
20
  from .open_api_specs.security_config import openapi_security_config
21
21
 
22
- from .google_maps_toolkit import MAP_FUNCS, GoogleMapsToolkit
23
- from .math_toolkit import MATH_FUNCS, MathToolkit
24
- from .open_api_toolkit import OPENAPI_FUNCS, OpenAPIToolkit
25
- from .retrieval_toolkit import RETRIEVAL_FUNCS, RetrievalToolkit
26
- from .search_toolkit import SEARCH_FUNCS, SearchToolkit
27
- from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
28
- from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
29
- from .slack_toolkit import SLACK_FUNCS, SlackToolkit
30
- from .dalle_toolkit import DALLE_FUNCS, DalleToolkit
31
- from .linkedin_toolkit import LINKEDIN_FUNCS, LinkedInToolkit
32
- from .reddit_toolkit import REDDIT_FUNCS, RedditToolkit
22
+ from .google_maps_toolkit import GoogleMapsToolkit
23
+ from .math_toolkit import MathToolkit, MATH_FUNCS
24
+ from .open_api_toolkit import OpenAPIToolkit
25
+ from .retrieval_toolkit import RetrievalToolkit
26
+ from .search_toolkit import SearchToolkit, SEARCH_FUNCS
27
+ from .twitter_toolkit import TwitterToolkit
28
+ from .weather_toolkit import WeatherToolkit, WEATHER_FUNCS
29
+ from .slack_toolkit import SlackToolkit
30
+ from .dalle_toolkit import DalleToolkit, DALLE_FUNCS
31
+ from .linkedin_toolkit import LinkedInToolkit
32
+ from .reddit_toolkit import RedditToolkit
33
33
 
34
- from .base import BaseToolkit
35
34
  from .code_execution import CodeExecutionToolkit
36
35
  from .github_toolkit import GithubToolkit
37
36
 
@@ -40,18 +39,6 @@ __all__ = [
40
39
  'get_openai_function_schema',
41
40
  'get_openai_tool_schema',
42
41
  'openapi_security_config',
43
- 'MATH_FUNCS',
44
- 'MAP_FUNCS',
45
- 'OPENAPI_FUNCS',
46
- 'RETRIEVAL_FUNCS',
47
- 'SEARCH_FUNCS',
48
- 'TWITTER_FUNCS',
49
- 'WEATHER_FUNCS',
50
- 'SLACK_FUNCS',
51
- 'DALLE_FUNCS',
52
- 'LINKEDIN_FUNCS',
53
- 'REDDIT_FUNCS',
54
- 'BaseToolkit',
55
42
  'GithubToolkit',
56
43
  'MathToolkit',
57
44
  'GoogleMapsToolkit',
@@ -65,4 +52,8 @@ __all__ = [
65
52
  'LinkedInToolkit',
66
53
  'RedditToolkit',
67
54
  'CodeExecutionToolkit',
55
+ 'MATH_FUNCS',
56
+ 'SEARCH_FUNCS',
57
+ 'WEATHER_FUNCS',
58
+ 'DALLE_FUNCS',
68
59
  ]
@@ -13,10 +13,11 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import os
15
15
  from functools import wraps
16
- from typing import Any, Callable, List, Optional, Tuple, Union
16
+ from typing import Any, Callable, List, Optional, Union
17
17
 
18
18
  from camel.toolkits.base import BaseToolkit
19
19
  from camel.toolkits.openai_function import OpenAIFunction
20
+ from camel.utils import dependencies_required
20
21
 
21
22
 
22
23
  def handle_googlemaps_exceptions(
@@ -74,57 +75,47 @@ def handle_googlemaps_exceptions(
74
75
  return wrapper
75
76
 
76
77
 
78
+ def _format_offset_to_natural_language(offset: int) -> str:
79
+ r"""Converts a time offset in seconds to a more natural language
80
+ description using hours as the unit, with decimal places to represent
81
+ minutes and seconds.
82
+
83
+ Args:
84
+ offset (int): The time offset in seconds. Can be positive,
85
+ negative, or zero.
86
+
87
+ Returns:
88
+ str: A string representing the offset in hours, such as
89
+ "+2.50 hours" or "-3.75 hours".
90
+ """
91
+ # Convert the offset to hours as a float
92
+ hours = offset / 3600.0
93
+ hours_str = f"{hours:+.2f} hour{'s' if abs(hours) != 1 else ''}"
94
+ return hours_str
95
+
96
+
77
97
  class GoogleMapsToolkit(BaseToolkit):
78
98
  r"""A class representing a toolkit for interacting with GoogleMaps API.
79
-
80
99
  This class provides methods for validating addresses, retrieving elevation,
81
100
  and fetching timezone information using the Google Maps API.
82
101
  """
83
102
 
84
- def _import_googlemaps_or_raise(self) -> Any:
85
- r"""Attempts to import the `googlemaps` library and returns it.
86
-
87
- Returns:
88
- module: The `googlemaps` module if successfully imported.
89
-
90
- Raises:
91
- ImportError: If the `googlemaps` library is not installed, this
92
- error is raised with a message instructing how to install the
93
- library using pip.
94
- """
95
- try:
96
- import googlemaps
97
-
98
- return googlemaps
99
- except ImportError:
100
- raise ImportError(
101
- "Please install `googlemaps` first. You can install "
102
- "it by running `pip install googlemaps`."
103
- )
104
-
105
- def _get_googlemaps_api_key(self) -> str:
106
- r"""Retrieve the Google Maps API key from environment variables.
107
-
108
- Returns:
109
- str: The Google Maps API key.
103
+ @dependencies_required('googlemaps')
104
+ def __init__(self) -> None:
105
+ import googlemaps
110
106
 
111
- Raises:
112
- ValueError: If the API key is not found in the environment
113
- variables.
114
- """
115
- # Get `GOOGLEMAPS_API_KEY` here:
116
- # https://console.cloud.google.com/apis/credentials
117
- GOOGLEMAPS_API_KEY = os.environ.get('GOOGLEMAPS_API_KEY')
118
- if not GOOGLEMAPS_API_KEY:
107
+ api_key = os.environ.get('GOOGLE_API_KEY')
108
+ if not api_key:
119
109
  raise ValueError(
120
- "`GOOGLEMAPS_API_KEY` not found in environment "
121
- "variables. `GOOGLEMAPS_API_KEY` API keys are "
122
- "generated in the `Credentials` page of the "
123
- "`APIs & Services` tab of "
110
+ "`GOOGLE_API_KEY` not found in environment variables. "
111
+ "`GOOGLE_API_KEY` API keys are generated in the `Credentials` "
112
+ "page of the `APIs & Services` tab of "
124
113
  "https://console.cloud.google.com/apis/credentials."
125
114
  )
126
- return GOOGLEMAPS_API_KEY
127
115
 
116
+ self.gmaps = googlemaps.Client(key=api_key)
117
+
118
+ @handle_googlemaps_exceptions
128
119
  def get_address_description(
129
120
  self,
130
121
  address: Union[str, List[str]],
@@ -132,18 +123,17 @@ class GoogleMapsToolkit(BaseToolkit):
132
123
  locality: Optional[str] = None,
133
124
  ) -> str:
134
125
  r"""Validates an address via Google Maps API, returns a descriptive
135
- summary.
136
-
137
- Validates an address using Google Maps API, returning a summary that
138
- includes information on address completion, formatted address, location
139
- coordinates, and metadata types that are true for the given address.
126
+ summary. Validates an address using Google Maps API, returning a
127
+ summary that includes information on address completion, formatted
128
+ address, location coordinates, and metadata types that are true for
129
+ the given address.
140
130
 
141
131
  Args:
142
132
  address (Union[str, List[str]]): The address or components to
143
133
  validate. Can be a single string or a list representing
144
134
  different parts.
145
135
  region_code (str, optional): Country code for regional restriction,
146
- helps narrowing down results. (default: :obj:`None`)
136
+ helps narrow down results. (default: :obj:`None`)
147
137
  locality (str, optional): Restricts validation to a specific
148
138
  locality, e.g., "Mountain View". (default: :obj:`None`)
149
139
 
@@ -152,89 +142,70 @@ class GoogleMapsToolkit(BaseToolkit):
152
142
  information on address completion, formatted address,
153
143
  geographical coordinates (latitude and longitude), and metadata
154
144
  types true for the address.
155
-
156
- Raises:
157
- ImportError: If the `googlemaps` library is not installed.
158
- Exception: For unexpected errors during the address validation.
159
145
  """
160
- googlemaps = self._import_googlemaps_or_raise()
161
- google_maps_api_key = self._get_googlemaps_api_key()
162
- try:
163
- gmaps = googlemaps.Client(key=google_maps_api_key)
164
- except Exception as e:
165
- return f"Error: {e!s}"
166
-
167
- try:
168
- addressvalidation_result = gmaps.addressvalidation(
169
- [address],
170
- regionCode=region_code,
171
- locality=locality,
172
- enableUspsCass=False,
173
- ) # Always False as per requirements
174
-
175
- # Check if the result contains an error
176
- if 'error' in addressvalidation_result:
177
- error_info = addressvalidation_result['error']
178
- error_message = error_info.get(
179
- 'message', 'An unknown error occurred'
180
- )
181
- error_status = error_info.get('status', 'UNKNOWN_STATUS')
182
- error_code = error_info.get('code', 'UNKNOWN_CODE')
183
- return (
184
- f"Address validation failed with error: {error_message} "
185
- f"Status: {error_status}, Code: {error_code}"
186
- )
187
-
188
- # Assuming the successful response structure
189
- # includes a 'result' key
190
- result = addressvalidation_result['result']
191
- verdict = result.get('verdict', {})
192
- address_info = result.get('address', {})
193
- geocode = result.get('geocode', {})
194
- metadata = result.get('metadata', {})
195
-
196
- # Construct the descriptive string
197
- address_complete = (
198
- "Yes" if verdict.get('addressComplete', False) else "No"
199
- )
200
- formatted_address = address_info.get(
201
- 'formattedAddress', 'Not available'
146
+ addressvalidation_result = self.gmaps.addressvalidation(
147
+ [address],
148
+ regionCode=region_code,
149
+ locality=locality,
150
+ enableUspsCass=False,
151
+ ) # Always False as per requirements
152
+
153
+ # Check if the result contains an error
154
+ if 'error' in addressvalidation_result:
155
+ error_info = addressvalidation_result['error']
156
+ error_message = error_info.get(
157
+ 'message', 'An unknown error occurred'
202
158
  )
203
- location = geocode.get('location', {})
204
- latitude = location.get('latitude', 'Not available')
205
- longitude = location.get('longitude', 'Not available')
206
- true_metadata_types = [
207
- key for key, value in metadata.items() if value
208
- ]
209
- true_metadata_types_str = (
210
- ', '.join(true_metadata_types)
211
- if true_metadata_types
212
- else 'None'
159
+ error_status = error_info.get('status', 'UNKNOWN_STATUS')
160
+ error_code = error_info.get('code', 'UNKNOWN_CODE')
161
+ return (
162
+ f"Address validation failed with error: {error_message} "
163
+ f"Status: {error_status}, Code: {error_code}"
213
164
  )
214
165
 
215
- description = (
216
- f"Address completion status: {address_complete}. "
217
- f"Formatted address: {formatted_address}. "
218
- f"Location (latitude, longitude): ({latitude}, {longitude}). "
219
- f"Metadata indicating true types: {true_metadata_types_str}."
220
- )
166
+ # Assuming the successful response structure
167
+ # includes a 'result' key
168
+ result = addressvalidation_result['result']
169
+ verdict = result.get('verdict', {})
170
+ address_info = result.get('address', {})
171
+ geocode = result.get('geocode', {})
172
+ metadata = result.get('metadata', {})
173
+
174
+ # Construct the descriptive string
175
+ address_complete = (
176
+ "Yes" if verdict.get('addressComplete', False) else "No"
177
+ )
178
+ formatted_address = address_info.get(
179
+ 'formattedAddress', 'Not available'
180
+ )
181
+ location = geocode.get('location', {})
182
+ latitude = location.get('latitude', 'Not available')
183
+ longitude = location.get('longitude', 'Not available')
184
+ true_metadata_types = [key for key, value in metadata.items() if value]
185
+ true_metadata_types_str = (
186
+ ', '.join(true_metadata_types) if true_metadata_types else 'None'
187
+ )
221
188
 
222
- return description
223
- except Exception as e:
224
- return f"An unexpected error occurred: {e!s}"
189
+ description = (
190
+ f"Address completion status: {address_complete}. "
191
+ f"Formatted address: {formatted_address}. "
192
+ f"Location (latitude, longitude): ({latitude}, {longitude}). "
193
+ f"Metadata indicating true types: {true_metadata_types_str}."
194
+ )
195
+
196
+ return description
225
197
 
226
198
  @handle_googlemaps_exceptions
227
- def get_elevation(self, lat_lng: Tuple) -> str:
199
+ def get_elevation(self, lat: float, lng: float) -> str:
228
200
  r"""Retrieves elevation data for a given latitude and longitude.
229
-
230
201
  Uses the Google Maps API to fetch elevation data for the specified
231
202
  latitude and longitude. It handles exceptions gracefully and returns a
232
203
  description of the elevation, including its value in meters and the
233
204
  data resolution.
234
205
 
235
206
  Args:
236
- lat_lng (Tuple[float, float]): The latitude and longitude for
237
- which to retrieve elevation data.
207
+ lat (float): The latitude of the location to query.
208
+ lng (float): The longitude of the location to query.
238
209
 
239
210
  Returns:
240
211
  str: A description of the elevation at the specified location(s),
@@ -242,15 +213,8 @@ class GoogleMapsToolkit(BaseToolkit):
242
213
  elevation data is not available, a message indicating this is
243
214
  returned.
244
215
  """
245
- googlemaps = self._import_googlemaps_or_raise()
246
- google_maps_api_key = self._get_googlemaps_api_key()
247
- try:
248
- gmaps = googlemaps.Client(key=google_maps_api_key)
249
- except Exception as e:
250
- return f"Error: {e!s}"
251
-
252
216
  # Assuming gmaps is a configured Google Maps client instance
253
- elevation_result = gmaps.elevation(lat_lng)
217
+ elevation_result = self.gmaps.elevation((lat, lng))
254
218
 
255
219
  # Extract the elevation data from the first
256
220
  # (and presumably only) result
@@ -273,28 +237,9 @@ class GoogleMapsToolkit(BaseToolkit):
273
237
 
274
238
  return description
275
239
 
276
- def _format_offset_to_natural_language(self, offset: int) -> str:
277
- r"""Converts a time offset in seconds to a more natural language
278
- description using hours as the unit, with decimal places to represent
279
- minutes and seconds.
280
-
281
- Args:
282
- offset (int): The time offset in seconds. Can be positive,
283
- negative, or zero.
284
-
285
- Returns:
286
- str: A string representing the offset in hours, such as
287
- "+2.50 hours" or "-3.75 hours".
288
- """
289
- # Convert the offset to hours as a float
290
- hours = offset / 3600.0
291
- hours_str = f"{hours:+.2f} hour{'s' if abs(hours) != 1 else ''}"
292
- return hours_str
293
-
294
240
  @handle_googlemaps_exceptions
295
- def get_timezone(self, lat_lng: Tuple) -> str:
241
+ def get_timezone(self, lat: float, lng: float) -> str:
296
242
  r"""Retrieves timezone information for a given latitude and longitude.
297
-
298
243
  This function uses the Google Maps Timezone API to fetch timezone
299
244
  data for the specified latitude and longitude. It returns a natural
300
245
  language description of the timezone, including the timezone ID, name,
@@ -302,23 +247,16 @@ class GoogleMapsToolkit(BaseToolkit):
302
247
  offset from Coordinated Universal Time (UTC).
303
248
 
304
249
  Args:
305
- lat_lng (Tuple[float, float]): The latitude and longitude for
306
- which to retrieve elevation data.
250
+ lat (float): The latitude of the location to query.
251
+ lng (float): The longitude of the location to query.
307
252
 
308
253
  Returns:
309
254
  str: A descriptive string of the timezone information,
310
- including the timezone ID and name, standard time offset,
311
- daylight saving time offset, and total offset from UTC.
255
+ including the timezone ID and name, standard time offset,
256
+ daylight saving time offset, and total offset from UTC.
312
257
  """
313
- googlemaps = self._import_googlemaps_or_raise()
314
- google_maps_api_key = self._get_googlemaps_api_key()
315
- try:
316
- gmaps = googlemaps.Client(key=google_maps_api_key)
317
- except Exception as e:
318
- return f"Error: {e!s}"
319
-
320
258
  # Get timezone information
321
- timezone_dict = gmaps.timezone(lat_lng)
259
+ timezone_dict = self.gmaps.timezone((lat, lng))
322
260
 
323
261
  # Extract necessary information
324
262
  dst_offset = timezone_dict[
@@ -330,10 +268,10 @@ class GoogleMapsToolkit(BaseToolkit):
330
268
  timezone_id = timezone_dict['timeZoneId']
331
269
  timezone_name = timezone_dict['timeZoneName']
332
270
 
333
- raw_offset_str = self._format_offset_to_natural_language(raw_offset)
334
- dst_offset_str = self._format_offset_to_natural_language(dst_offset)
271
+ raw_offset_str = _format_offset_to_natural_language(raw_offset)
272
+ dst_offset_str = _format_offset_to_natural_language(dst_offset)
335
273
  total_offset_seconds = dst_offset + raw_offset
336
- total_offset_str = self._format_offset_to_natural_language(
274
+ total_offset_str = _format_offset_to_natural_language(
337
275
  total_offset_seconds
338
276
  )
339
277
 
@@ -343,9 +281,8 @@ class GoogleMapsToolkit(BaseToolkit):
343
281
  f"The standard time offset is {raw_offset_str}. "
344
282
  f"Daylight Saving Time offset is {dst_offset_str}. "
345
283
  f"The total offset from Coordinated Universal Time (UTC) is "
346
- f"{total_offset_str}, including any "
347
- "Daylight Saving Time adjustment "
348
- f"if applicable. "
284
+ f"{total_offset_str}, including any Daylight Saving Time "
285
+ f"adjustment if applicable. "
349
286
  )
350
287
 
351
288
  return description
@@ -363,6 +300,3 @@ class GoogleMapsToolkit(BaseToolkit):
363
300
  OpenAIFunction(self.get_elevation),
364
301
  OpenAIFunction(self.get_timezone),
365
302
  ]
366
-
367
-
368
- MAP_FUNCS: List[OpenAIFunction] = GoogleMapsToolkit().get_tools()
@@ -225,6 +225,3 @@ class LinkedInToolkit(BaseToolkit):
225
225
  if not token:
226
226
  return "Access token not found. Please set LINKEDIN_ACCESS_TOKEN."
227
227
  return token
228
-
229
-
230
- LINKEDIN_FUNCS: List[OpenAIFunction] = LinkedInToolkit().get_tools()
@@ -542,6 +542,3 @@ class OpenAPIToolkit:
542
542
  OpenAIFunction(a_func, a_schema)
543
543
  for a_func, a_schema in zip(all_funcs_lst, all_schemas_lst)
544
544
  ]
545
-
546
-
547
- OPENAPI_FUNCS: List[OpenAIFunction] = OpenAPIToolkit().get_tools()
@@ -232,6 +232,3 @@ class RedditToolkit(BaseToolkit):
232
232
  OpenAIFunction(self.perform_sentiment_analysis),
233
233
  OpenAIFunction(self.track_keyword_discussions),
234
234
  ]
235
-
236
-
237
- REDDIT_FUNCS: List[OpenAIFunction] = RedditToolkit().get_tools()
@@ -86,7 +86,3 @@ class RetrievalToolkit(BaseToolkit):
86
86
  return [
87
87
  OpenAIFunction(self.information_retrieval),
88
88
  ]
89
-
90
-
91
- # add the function to OpenAIFunction list
92
- RETRIEVAL_FUNCS: List[OpenAIFunction] = RetrievalToolkit().get_tools()
@@ -303,6 +303,3 @@ class SlackToolkit(BaseToolkit):
303
303
  OpenAIFunction(self.send_slack_message),
304
304
  OpenAIFunction(self.delete_slack_message),
305
305
  ]
306
-
307
-
308
- SLACK_FUNCS: List[OpenAIFunction] = SlackToolkit().get_tools()
@@ -410,7 +410,7 @@ class TwitterToolkit(BaseToolkit):
410
410
  return TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
411
411
 
412
412
  def _get_oauth_session(self) -> requests.Session:
413
- r'''Initiates an OAuth1Session with Twitter's API and returns it.
413
+ r"""Initiates an OAuth1Session with Twitter's API and returns it.
414
414
 
415
415
  The function first fetches a request token, then prompts the user to
416
416
  authorize the application. After the user has authorized the
@@ -431,7 +431,7 @@ class TwitterToolkit(BaseToolkit):
431
431
  Manage-Tweets/create_tweet.py
432
432
  https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/
433
433
  User-Lookup/get_users_me_user_context.py
434
- '''
434
+ """
435
435
  try:
436
436
  from requests_oauthlib import OAuth1Session
437
437
  except ImportError:
@@ -517,6 +517,3 @@ class TwitterToolkit(BaseToolkit):
517
517
  return "HTTP Exception"
518
518
  else:
519
519
  return "Unexpected Exception"
520
-
521
-
522
- TWITTER_FUNCS: List[OpenAIFunction] = TwitterToolkit().get_tools()
camel/types/enums.py CHANGED
@@ -29,6 +29,8 @@ class ModelType(Enum):
29
29
  GPT_4_TURBO = "gpt-4-turbo"
30
30
  GPT_4O = "gpt-4o"
31
31
  GPT_4O_MINI = "gpt-4o-mini"
32
+ O1_PREVIEW = "o1-preview"
33
+ O1_MINI = "o1-mini"
32
34
 
33
35
  GLM_4 = "glm-4"
34
36
  GLM_4_OPEN_SOURCE = "glm-4-open-source"
@@ -105,6 +107,8 @@ class ModelType(Enum):
105
107
  ModelType.GPT_4_TURBO,
106
108
  ModelType.GPT_4O,
107
109
  ModelType.GPT_4O_MINI,
110
+ ModelType.O1_PREVIEW,
111
+ ModelType.O1_MINI,
108
112
  }
109
113
 
110
114
  @property
@@ -270,6 +274,8 @@ class ModelType(Enum):
270
274
  ModelType.GPT_4O,
271
275
  ModelType.GPT_4O_MINI,
272
276
  ModelType.GPT_4_TURBO,
277
+ ModelType.O1_PREVIEW,
278
+ ModelType.O1_MINI,
273
279
  ModelType.MISTRAL_LARGE,
274
280
  ModelType.MISTRAL_NEMO,
275
281
  ModelType.QWEN_2,
@@ -193,8 +193,14 @@ def get_model_encoding(value_for_tiktoken: str):
193
193
  try:
194
194
  encoding = tiktoken.encoding_for_model(value_for_tiktoken)
195
195
  except KeyError:
196
- print("Model not found. Using cl100k_base encoding.")
197
- encoding = tiktoken.get_encoding("cl100k_base")
196
+ if value_for_tiktoken in [
197
+ ModelType.O1_MINI.value,
198
+ ModelType.O1_PREVIEW.value,
199
+ ]:
200
+ encoding = tiktoken.get_encoding("o200k_base")
201
+ else:
202
+ print("Model not found. Using cl100k_base encoding.")
203
+ encoding = tiktoken.get_encoding("cl100k_base")
198
204
  return encoding
199
205
 
200
206
 
@@ -288,6 +294,9 @@ class OpenAITokenCounter(BaseTokenCounter):
288
294
  elif ("gpt-3.5-turbo" in self.model) or ("gpt-4" in self.model):
289
295
  self.tokens_per_message = 3
290
296
  self.tokens_per_name = 1
297
+ elif "o1" in self.model:
298
+ self.tokens_per_message = 2
299
+ self.tokens_per_name = 1
291
300
  else:
292
301
  # flake8: noqa :E501
293
302
  raise NotImplementedError(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.8
3
+ Version: 0.2.0
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -56,9 +56,10 @@ Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "
56
56
  Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
57
57
  Requires-Dist: nltk (==3.8.1) ; extra == "tools" or extra == "all"
58
58
  Requires-Dist: numpy (>=1,<2)
59
- Requires-Dist: openai (>=1.2.3,<2.0.0)
59
+ Requires-Dist: openai (>=1.45.0,<2.0.0)
60
60
  Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
61
61
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
62
+ Requires-Dist: pandoc
62
63
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
63
64
  Requires-Dist: pillow (>=10.2.0,<11.0.0) ; extra == "tools" or extra == "all"
64
65
  Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
@@ -213,7 +214,7 @@ conda create --name camel python=3.10
213
214
  conda activate camel
214
215
 
215
216
  # Clone github repo
216
- git clone -b v0.1.8 https://github.com/camel-ai/camel.git
217
+ git clone -b v0.2.0 https://github.com/camel-ai/camel.git
217
218
 
218
219
  # Change directory into project directory
219
220
  cd camel
@@ -1,7 +1,7 @@
1
- camel/__init__.py,sha256=LIlV3O5R-G_rJo3VJCy1pfsbJTHL2EOHf6G78HpJgqs,778
1
+ camel/__init__.py,sha256=zzLXt4OmIHyizUAU7n1idW4aN99Vcbjpo9JHPUUprko,778
2
2
  camel/agents/__init__.py,sha256=SSU1wbhZXWwQnE0rRxkpyN57kEu72KklsZNcdLkXfTs,1551
3
3
  camel/agents/base.py,sha256=X39qWSiT1WnDqaJ9k3gQrTpOQSwUKzNEVpp5AY6fDH8,1130
4
- camel/agents/chat_agent.py,sha256=ikYGo5ahcmubb5OHNcHlehTC0adnGxCMx0iKPnyUYoU,35760
4
+ camel/agents/chat_agent.py,sha256=cIAGXDTbZVrVsq8UsZRv8Et9eAx-lFIT73DsYTVopo0,36276
5
5
  camel/agents/critic_agent.py,sha256=To-istnO-9Eb0iabdeIDrgfvkxYYfsdX9xIZiSrc3oM,7493
6
6
  camel/agents/deductive_reasoner_agent.py,sha256=49vwglWYHgXf-VRftdMN9OFGOwqdsXyTt45PP6z-pbg,13473
7
7
  camel/agents/embodied_agent.py,sha256=3ABuiRQXBpplKbuhPY5KNLJyKc6Z8SgXgzIges3ZwVs,7542
@@ -12,7 +12,7 @@ camel/agents/task_agent.py,sha256=n9xIU3QtcptRPSuHZJ4ntQ_M_a8AvJ6U9ZRV8VaxV5A,14
12
12
  camel/agents/tool_agents/__init__.py,sha256=ulTNWU2qoFGe3pvVmCq_sdfeSX3NKZ0due66TYvsL-M,862
13
13
  camel/agents/tool_agents/base.py,sha256=nQAhfWi8a_bCgzlf5-G-tmj1fKm6AjpRc89NQkWwpnc,1399
14
14
  camel/agents/tool_agents/hugging_face_tool_agent.py,sha256=1Z5tG6f_86eL0vmtRZ-BJvoLDFFLhoHt8JtDvgat1xU,8723
15
- camel/configs/__init__.py,sha256=GmkMkyWsCWUW_8Oc9yajTUac39Sx9iSVlnzaZFTFm_0,2231
15
+ camel/configs/__init__.py,sha256=3SCXrA7ML8CNoOWQ1poJY6dE2e07Qxf7ACD_mdo2aJ8,2341
16
16
  camel/configs/anthropic_config.py,sha256=DGQoPyYrayhYQ7aSjkYYGHOZ5VdQ9qahtaS0p_GpU0Q,3294
17
17
  camel/configs/base_config.py,sha256=gjsDACMCk-hXDBk7qkeHcpbQrWy6jbp4iyzfqgghJEk,2485
18
18
  camel/configs/gemini_config.py,sha256=YHJSNEAIxBxPX1NAj2rWvM4OOR7vmIANH88pZO-aOsY,6880
@@ -22,7 +22,7 @@ camel/configs/mistral_config.py,sha256=G9LuY0-3S6az-8j8kpqB-4asgoaxTOsZVYeZBYJl6
22
22
  camel/configs/ollama_config.py,sha256=xrT-ulqvANjIu0bVxOzN93uaKUs8e2gW1tmYK1jULEM,4357
23
23
  camel/configs/openai_config.py,sha256=yQf7lkBcYTtCNAopow3SlZgcDMlMkiCpC5Dvhh9wb9M,7327
24
24
  camel/configs/reka_config.py,sha256=ECYg3BT7onwZX-iKLq-5TBhCFdm70rV-9hZ_G6Ye8-k,3504
25
- camel/configs/samba_config.py,sha256=XpNrsQjTIpEDxtGpVhr9eAU89k48jiMhF6qihWDOc0k,4577
25
+ camel/configs/samba_config.py,sha256=U1krJSFRaFCvNCxKxgT0zlYxifviHsdkS53hr6D_Bvg,10254
26
26
  camel/configs/togetherai_config.py,sha256=WERo8W6yb-qy_3qa1GUckt58J5XGKwN5X_nC9baL8Cs,5663
27
27
  camel/configs/vllm_config.py,sha256=jfeveBnlkkBHC2RFkffG6ZlTkGzkwrX_WXMwHkg36Jg,5516
28
28
  camel/configs/zhipuai_config.py,sha256=zU8Zaj3d9d7SCFEFIkCIRNlnJw9z_oFDmIoCQKrerEM,3600
@@ -68,16 +68,16 @@ camel/models/litellm_model.py,sha256=5sTOzI07FsxDEW3jSK-XXBx91Yo8z9voahyCsK36U6U
68
68
  camel/models/mistral_model.py,sha256=39rHJ-z_6Z-UbtUqZPEAbCdFYY1Ft0Drs42jG5hHaho,9517
69
69
  camel/models/model_factory.py,sha256=_dQOx_MYxXih6uQOjkKR7uoIrhDcWRsMTKHbO3NLYN0,5974
70
70
  camel/models/nemotron_model.py,sha256=2Idf4wrZervxvfu6av42EKjefFtDnBb6cKnWCJUkqI4,2682
71
- camel/models/ollama_model.py,sha256=tdcKUKRo9gsa6ifvEzlE32T5eHFmi-9guKP3afwhUKs,5684
71
+ camel/models/ollama_model.py,sha256=FSMwH2-856Zhxusm4B773xBBHdlD3UOw9OAuH5eTJTw,5686
72
72
  camel/models/open_source_model.py,sha256=p5a2sCeZl5SyrgkygClndOrHEjpJxmyhE1CqKE2fZSw,6363
73
73
  camel/models/openai_audio_models.py,sha256=_ddOxqzFZCVZaK6h33Z0THU6HXk2XlJTxVWquZ3oOaQ,10042
74
74
  camel/models/openai_compatibility_model.py,sha256=7h1zSFBgg_mQojFvtSqC54tcZOZY0NFsZ7ZNlns5CWk,4229
75
- camel/models/openai_model.py,sha256=uOtiLmbdH7sDKqk9oV0i1HEVni_4ApPXCukShZwQDKA,4611
75
+ camel/models/openai_model.py,sha256=27NbN0bU_cAPmWjwsWceNO4zW8WS2j3P_YWNTXeg1O8,5464
76
76
  camel/models/reka_model.py,sha256=_ERZvtkK0Gd7GUx3f4VVqqtH093clVMoJfa896t9f2M,8043
77
- camel/models/samba_model.py,sha256=kIpk6gtwVEpXAKD8kE33TYP4o4Lm-hp6b_X7lsskcio,16385
77
+ camel/models/samba_model.py,sha256=CgAYMIVJFAEoyCOsYS7qD_bvWhzOkvA6SD5nGBClbzE,17699
78
78
  camel/models/stub_model.py,sha256=DuqaBsS55STSbcLJsk025Uwo_u4ixrSSKqKEoZj2ihY,3680
79
79
  camel/models/togetherai_model.py,sha256=kUFGxb6cXUgkvMNQ0MsDKW27Udw622zt2QIVa3U7iLU,5461
80
- camel/models/vllm_model.py,sha256=YsJmgvxcsXrUGOpAVV4mrRz7T-zwywn8fBxTgNfiQ3s,5806
80
+ camel/models/vllm_model.py,sha256=Q71tfFyfd1t81r1CxS6UT5KLadDYPgOt_cG_z6DHkWE,5808
81
81
  camel/models/zhipuai_model.py,sha256=JqJDEMk6vWH-ZnKkMwdG4yDvJWf1xk4PBsp2ifSFGR0,4939
82
82
  camel/prompts/__init__.py,sha256=O5bkcuwj2kXTkz5yDPiiMI8KN04vI8bCKG7mGE1SIdI,2326
83
83
  camel/prompts/ai_society.py,sha256=ApgvIED1Z_mdsWDNc2_u35Ktp7pEKksMrOIQKo_q5cI,6306
@@ -132,13 +132,13 @@ camel/terminators/__init__.py,sha256=pE7fcfDUNngdbm1BhzSQPRMXNbdd28rl9YbF4gKWwXE
132
132
  camel/terminators/base.py,sha256=TSkl3maNEsdjyAniJaSgFfD4UF8RQ1LwNIiGw0dN8Gg,1396
133
133
  camel/terminators/response_terminator.py,sha256=zcXuigbvlclUoBv4xcVbfU36ZohUT1RhI-rSnukloUY,4951
134
134
  camel/terminators/token_limit_terminator.py,sha256=mK30wVUnoqNAvIo-wxkqY5gUSNay2M04rsAktKqoiOI,2087
135
- camel/toolkits/__init__.py,sha256=CfWQm901AVP_Eq-PMxJAej8EZ9ZAUoWDPnGlCJkkZL4,2326
135
+ camel/toolkits/__init__.py,sha256=g5xQo15tx1WiR6MEHS6IXayLSnwSWk88VK7dosEG79M,2033
136
136
  camel/toolkits/base.py,sha256=ez04Ei8jwIAws023bM19EGkOPUkQMouULqBvOKfM4kM,986
137
137
  camel/toolkits/code_execution.py,sha256=fWBhn1_3adiv7YYuA0gJzEBlc_dYNS6_hVtDbgB-zX0,2425
138
138
  camel/toolkits/dalle_toolkit.py,sha256=IalDFfNCz58LMRdCZNSJfLMiauHGBGN9XNRV7pzuf28,5261
139
139
  camel/toolkits/github_toolkit.py,sha256=ZauRY-kW8nx_L6igVEF62hD16j3KhqU2r49t1j6hO78,10979
140
- camel/toolkits/google_maps_toolkit.py,sha256=uylzlmsbjbcMwjVDPVLTLiZrUKSmGxpfukSqaJ8bM94,14343
141
- camel/toolkits/linkedin_toolkit.py,sha256=gdqj-6XnVUH-YpkZMS042t-FQ4yB1KRj3syCwjfLrnw,8004
140
+ camel/toolkits/google_maps_toolkit.py,sha256=7kTWBp6hzh10MryFY4RLIBAWD-9fjiecHIQcRm0OsvA,11972
141
+ camel/toolkits/linkedin_toolkit.py,sha256=JgO8vpuum_KBijvKvDSjM9QpRPedT1azVSZHJb4EtfM,7933
142
142
  camel/toolkits/math_toolkit.py,sha256=r-85DHvihR87DU6n_W75pecV1P9xV3Hylfp6u-ue7T4,2521
143
143
  camel/toolkits/open_api_specs/biztoc/__init__.py,sha256=f3LXNDzN2XWWoF2D0nesG8VuEA6Zd14i2aiTDbCm5bA,708
144
144
  camel/toolkits/open_api_specs/biztoc/ai-plugin.json,sha256=IJinQbLv5MFPGFwdN7PbOhwArFVExSEZdJspe-mOBIo,866
@@ -165,22 +165,22 @@ camel/toolkits/open_api_specs/web_scraper/ai-plugin.json,sha256=jjHvbj0DQ4AYcL9J
165
165
  camel/toolkits/open_api_specs/web_scraper/openapi.yaml,sha256=u_WalQ01e8W1D27VnZviOylpGmJ-zssYrfAgkzqdoyk,2191
166
166
  camel/toolkits/open_api_specs/web_scraper/paths/__init__.py,sha256=f3LXNDzN2XWWoF2D0nesG8VuEA6Zd14i2aiTDbCm5bA,708
167
167
  camel/toolkits/open_api_specs/web_scraper/paths/scraper.py,sha256=SQGbFkshLN4xm-Ya49ssbSvaU1nFVNFYhWsEPYVeFe0,1123
168
- camel/toolkits/open_api_toolkit.py,sha256=rbQrhY6gHoZi9kiX9138pah9qZ2S8K5Vex1zFGWeCK8,23403
168
+ camel/toolkits/open_api_toolkit.py,sha256=dwXd-msNKVAOmqF1WIkdsK8bcOjDH9rV2sT1AJy8pMY,23334
169
169
  camel/toolkits/openai_function.py,sha256=eaE441qxLvuRKr_WrpYLGkr5P2Nav07VVdR29n76RkU,14767
170
- camel/toolkits/reddit_toolkit.py,sha256=FMHK7k9UHN-IqdabqyBavpo0ZOloveuPsOe3Ou-tq4o,8975
171
- camel/toolkits/retrieval_toolkit.py,sha256=UFByIxMB8m_C8HH-a65MeBJJACoJcVrcKMU9TGzm_SI,3828
170
+ camel/toolkits/reddit_toolkit.py,sha256=zVojG_dM_ZbU8lZDM7AnxfMsL2JVWnNYdWqvdYUwVeM,8908
171
+ camel/toolkits/retrieval_toolkit.py,sha256=qE1IS2WZnFtnxvj4t7eSUYMhKpK5-4ifExbIBgLEPT8,3713
172
172
  camel/toolkits/search_toolkit.py,sha256=vXe026bQpLic09iwY5PN4RS6SXeHYBBkjfnOlJYB670,12943
173
- camel/toolkits/slack_toolkit.py,sha256=JdgDJe7iExTmG7dDXOG6v5KpVjZ6_My_d_WFTYSxkw4,10839
174
- camel/toolkits/twitter_toolkit.py,sha256=oQw8wRkU7iDxaocsmWvio4pU75pmq6FJAorPdQ2xEAE,19810
173
+ camel/toolkits/slack_toolkit.py,sha256=gblCbN_RCsOdgo1GGUF-R8YJneNRjezJMHhYoRFjCE0,10774
174
+ camel/toolkits/twitter_toolkit.py,sha256=0QYLlsg4hUVev2Z0hPJHksDNHG_54IiVHsB3IxCqrXs,19741
175
175
  camel/toolkits/weather_toolkit.py,sha256=n4YrUI_jTIH7oqH918IdHbXLgfQ2BPGIWWK8Jp8G1Uw,7054
176
176
  camel/types/__init__.py,sha256=ArKXATj3z_Vv4ISmROVeo6Mv3tj5kE1dTkqfgwyxVY4,1975
177
- camel/types/enums.py,sha256=Nx35qNmjRTwT_B6uceZ3BtwxthrRqbRRnOk558_vQUk,17458
177
+ camel/types/enums.py,sha256=APqZIKtR2tVbUe-1JZWi7a50gjjDC7QHw9A0eKJuefA,17642
178
178
  camel/types/openai_types.py,sha256=BNQ6iCzKTjSvgcXFsAFIgrUS_YUFZBU6bDoyAp387hI,2045
179
179
  camel/utils/__init__.py,sha256=IdI9v0FetNR-nx-Hg4bmNHoYto6Xfcs_uaomksdewmo,2303
180
180
  camel/utils/async_func.py,sha256=SLo8KPkrNKdsONvFf3KBb33EgFn4gH2EKSX1aI_LKes,1578
181
181
  camel/utils/commons.py,sha256=y7eng5QF5Hkt5tuNhtEOJycTIq9hXymrUuwIS5nRad4,16481
182
182
  camel/utils/constants.py,sha256=8n4F8Y-DZy4z2F0hRvAq6f-d9SbS59kK5FyLrnJ3mkY,1360
183
- camel/utils/token_counting.py,sha256=G7vBzrxSXm4DzHMOfMXaOYjYf8WJTpxjHjlzmngHlYQ,21004
183
+ camel/utils/token_counting.py,sha256=AVGml8X_qq3rPdzw2tc9I3n-oEkGyNH_vPsedhVtew0,21318
184
184
  camel/workforce/__init__.py,sha256=6jwJWDlESEqcnWCm61WCyjzFUF6KLzXA_fGI86rHfiE,878
185
185
  camel/workforce/base.py,sha256=lEHqgOV1tmsy7y4wuuKClcDkoPCRvXVdMrBngsM_6yY,1722
186
186
  camel/workforce/manager_node.py,sha256=eMmsOAoy0Wtk92b_06GhGnwKDgrTo0w-UgQorkh-az0,11529
@@ -191,6 +191,6 @@ camel/workforce/utils.py,sha256=Z-kODz5PMPtfeKKVqpcQq-b-B8oqC7XSwi_F3__Ijhs,3526
191
191
  camel/workforce/worker_node.py,sha256=wsRqk2rugCvvkcmCzvn-y-gQuyuJGAG8PIr1KtgqJFw,3878
192
192
  camel/workforce/workforce.py,sha256=SVJJgSSkYvk05RgL9oaJzHwzziH7u51KLINRuzLB8BI,1773
193
193
  camel/workforce/workforce_prompt.py,sha256=cAWYEIA0rau5itEekSoUIFttBzpKM9RzB6x-mfukGSU,4665
194
- camel_ai-0.1.8.dist-info/METADATA,sha256=ceVYBfVAJD9y8zBVbAMJmFC2omjmZlvftsFxVTHBtHU,24654
195
- camel_ai-0.1.8.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
196
- camel_ai-0.1.8.dist-info/RECORD,,
194
+ camel_ai-0.2.0.dist-info/METADATA,sha256=6mTZQestY-Rvn1pkyLO2-DycdgKFKy6YO7yDf37Vmq8,24677
195
+ camel_ai-0.2.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
196
+ camel_ai-0.2.0.dist-info/RECORD,,