camel-ai 0.1.5.5__py3-none-any.whl → 0.1.5.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (70) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +1 -1
  3. camel/agents/knowledge_graph_agent.py +11 -15
  4. camel/agents/task_agent.py +0 -1
  5. camel/configs/__init__.py +6 -0
  6. camel/configs/gemini_config.py +98 -0
  7. camel/configs/litellm_config.py +1 -1
  8. camel/configs/openai_config.py +1 -1
  9. camel/configs/zhipuai_config.py +1 -1
  10. camel/models/__init__.py +2 -0
  11. camel/models/base_model.py +4 -1
  12. camel/models/gemini_model.py +203 -0
  13. camel/models/litellm_model.py +16 -0
  14. camel/models/model_factory.py +3 -2
  15. camel/models/ollama_model.py +16 -0
  16. camel/models/zhipuai_model.py +0 -1
  17. camel/toolkits/__init__.py +36 -0
  18. camel/toolkits/base.py +1 -1
  19. camel/toolkits/code_execution.py +1 -1
  20. camel/toolkits/github_toolkit.py +3 -2
  21. camel/toolkits/google_maps_toolkit.py +367 -0
  22. camel/toolkits/math_toolkit.py +79 -0
  23. camel/toolkits/open_api_toolkit.py +548 -0
  24. camel/toolkits/retrieval_toolkit.py +76 -0
  25. camel/toolkits/search_toolkit.py +326 -0
  26. camel/toolkits/slack_toolkit.py +308 -0
  27. camel/toolkits/twitter_toolkit.py +522 -0
  28. camel/toolkits/weather_toolkit.py +173 -0
  29. camel/types/enums.py +18 -0
  30. camel/utils/__init__.py +2 -0
  31. camel/utils/async_func.py +1 -1
  32. camel/utils/token_counting.py +34 -0
  33. {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.7.dist-info}/METADATA +3 -2
  34. {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.7.dist-info}/RECORD +61 -60
  35. camel/functions/__init__.py +0 -51
  36. camel/functions/google_maps_function.py +0 -335
  37. camel/functions/math_functions.py +0 -61
  38. camel/functions/open_api_function.py +0 -508
  39. camel/functions/retrieval_functions.py +0 -61
  40. camel/functions/search_functions.py +0 -298
  41. camel/functions/slack_functions.py +0 -286
  42. camel/functions/twitter_function.py +0 -479
  43. camel/functions/weather_functions.py +0 -144
  44. /camel/{functions → toolkits}/open_api_specs/biztoc/__init__.py +0 -0
  45. /camel/{functions → toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
  46. /camel/{functions → toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
  47. /camel/{functions → toolkits}/open_api_specs/coursera/__init__.py +0 -0
  48. /camel/{functions → toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
  49. /camel/{functions → toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
  50. /camel/{functions → toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
  51. /camel/{functions → toolkits}/open_api_specs/klarna/__init__.py +0 -0
  52. /camel/{functions → toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
  53. /camel/{functions → toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
  54. /camel/{functions → toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
  55. /camel/{functions → toolkits}/open_api_specs/outschool/__init__.py +0 -0
  56. /camel/{functions → toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
  57. /camel/{functions → toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
  58. /camel/{functions → toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
  59. /camel/{functions → toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
  60. /camel/{functions → toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
  61. /camel/{functions → toolkits}/open_api_specs/security_config.py +0 -0
  62. /camel/{functions → toolkits}/open_api_specs/speak/__init__.py +0 -0
  63. /camel/{functions → toolkits}/open_api_specs/speak/openapi.yaml +0 -0
  64. /camel/{functions → toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
  65. /camel/{functions → toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
  66. /camel/{functions → toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
  67. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
  68. /camel/{functions → toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
  69. /camel/{functions → toolkits}/openai_function.py +0 -0
  70. {camel_ai-0.1.5.5.dist-info → camel_ai-0.1.5.7.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5.5'
15
+ __version__ = '0.1.5.7'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -42,8 +42,8 @@ from camel.utils import get_model_encoding
42
42
  if TYPE_CHECKING:
43
43
  from openai import Stream
44
44
 
45
- from camel.functions import OpenAIFunction
46
45
  from camel.terminators import ResponseTerminator
46
+ from camel.toolkits import OpenAIFunction
47
47
 
48
48
 
49
49
  @dataclass(frozen=True)
@@ -78,17 +78,16 @@ Expected Output:
78
78
 
79
79
  Nodes:
80
80
 
81
- Node(id='John', type='Person', properties={'agent_generated'})
82
- Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
83
- Node(id='New York City', type='Location', properties={'agent_generated'})
81
+ Node(id='John', type='Person')
82
+ Node(id='XYZ Corporation', type='Organization')
83
+ Node(id='New York City', type='Location')
84
84
 
85
85
  Relationships:
86
86
 
87
87
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
88
- Corporation', type='Organization'), type='WorksAt', properties=
89
- {'agent_generated'})
88
+ Corporation', type='Organization'), type='WorksAt')
90
89
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
91
- type='Location'), type='ResidesIn', properties={'agent_generated'})
90
+ type='Location'), type='ResidesIn')
92
91
 
93
92
  ===== TASK =====
94
93
  Please extracts nodes and relationships from given content and structures them
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
211
210
  import re
212
211
 
213
212
  # Regular expressions to extract nodes and relationships
214
- node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
213
+ node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
215
214
  rel_pattern = (
216
215
  r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
217
- r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', "
218
- r"properties=\{(.*?)\}\)"
216
+ r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
219
217
  )
220
218
 
221
219
  nodes = {}
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
223
221
 
224
222
  # Extract nodes
225
223
  for match in re.finditer(node_pattern, input_string):
226
- id, type, properties = match.groups()
227
- properties = eval(properties)
224
+ id, type = match.groups()
225
+ properties = {'source': 'agent_created'}
228
226
  if id not in nodes:
229
227
  node = Node(id, type, properties)
230
228
  if self._validate_node(node):
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
232
230
 
233
231
  # Extract relationships
234
232
  for match in re.finditer(rel_pattern, input_string):
235
- subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
236
- match.groups()
237
- )
238
- properties = eval(properties_str)
233
+ subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
234
+ properties = {'source': 'agent_created'}
239
235
  if subj_id in nodes and obj_id in nodes:
240
236
  subj = nodes[subj_id]
241
237
  obj = nodes[obj_id]
@@ -100,7 +100,6 @@ class TaskSpecifyAgent(ChatAgent):
100
100
 
101
101
  if meta_dict is not None:
102
102
  task_specify_prompt = task_specify_prompt.format(**meta_dict)
103
-
104
103
  task_msg = BaseMessage.make_user_message(
105
104
  role_name="Task Specifier", content=task_specify_prompt
106
105
  )
camel/configs/__init__.py CHANGED
@@ -13,6 +13,10 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
+ from .gemini_config import (
17
+ Gemini_API_PARAMS,
18
+ GeminiConfig,
19
+ )
16
20
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
17
21
  from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
18
22
  from .openai_config import (
@@ -35,4 +39,6 @@ __all__ = [
35
39
  'OLLAMA_API_PARAMS',
36
40
  'ZhipuAIConfig',
37
41
  'ZHIPUAI_API_PARAMS',
42
+ 'GeminiConfig',
43
+ 'Gemini_API_PARAMS',
38
44
  ]
@@ -0,0 +1,98 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+
16
+ from collections.abc import Iterable
17
+ from dataclasses import asdict, dataclass
18
+ from typing import TYPE_CHECKING, Optional
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+ if TYPE_CHECKING:
23
+ from google.generativeai.protos import Schema
24
+ from google.generativeai.types.content_types import (
25
+ FunctionLibraryType,
26
+ ToolConfigType,
27
+ )
28
+ from google.generativeai.types.helper_types import RequestOptionsType
29
+ from google.generativeai.types.safety_types import SafetySettingOptions
30
+
31
+
32
+ @dataclass(frozen=True)
33
+ class GeminiConfig(BaseConfig):
34
+ r"""A simple dataclass used to configure the generation parameters of
35
+ `GenerativeModel.generate_content`.
36
+
37
+ Args:
38
+ candidate_count (int, optional): Number of responses to return.
39
+ stop_sequences (Iterable[str], optional): The set of character
40
+ sequences (up to 5) that will stop output generation. If specified
41
+ the API will stop at the first appearance of a stop sequence.
42
+ The stop sequence will not be included as part of the response.
43
+ max_output_tokens (int, optional): The maximum number of tokens to
44
+ include in a candidate. If unset, this will default to
45
+ output_token_limit specified in the model's specification.
46
+ temperature (float, optional): Controls the randomness of the output.
47
+ Note: The default value varies by model, see the
48
+ `Model.temperature` attribute of the `Model` returned
49
+ the `genai.get_model` function. Values can range from [0.0,1.0],
50
+ inclusive. A value closer to 1.0 will produce responses that are
51
+ more varied and creative, while a value closer to 0.0 will
52
+ typically result in more straightforward responses from the model.
53
+ top_p (int, optional): The maximum cumulative probability of tokens to
54
+ consider when sampling. The model uses combined Top-k and nucleus
55
+ sampling. Tokens are sorted based on their assigned probabilities
56
+ so that only the most likely tokens are considered. Top-k sampling
57
+ directly limits the maximum number of tokens to consider, while
58
+ Nucleus sampling limits number of tokens
59
+ based on the cumulative probability. Note: The default value varies
60
+ by model, see the `Model.top_p` attribute of the `Model` returned
61
+ the `genai.get_model` function.
62
+ top_k (int, optional): The maximum number of tokens to consider when
63
+ sampling. The model uses combined Top-k and nucleus sampling.Top-k
64
+ sampling considers the set of `top_k` most probable tokens.
65
+ Defaults to 40. Note: The default value varies by model, see the
66
+ `Model.top_k` attribute of the `Model` returned the
67
+ `genai.get_model` function.
68
+ response_mime_type (str, optional): Output response mimetype of the
69
+ generated candidate text. Supported mimetype:
70
+ `text/plain`: (default) Text output.
71
+ `application/json`: JSON response in the candidates.
72
+ response_schema (Schema, optional): Specifies the format of the
73
+ JSON requested if response_mime_type is `application/json`.
74
+ safety_settings (SafetySettingOptions, optional):
75
+ Overrides for the model's safety settings.
76
+ tools (FunctionLibraryType, optional):
77
+ `protos.Tools` more info coming soon.
78
+ tool_config (ToolConfigType, optional):
79
+ more info coming soon.
80
+ request_options (RequestOptionsType, optional):
81
+ Options for the request.
82
+ """
83
+
84
+ candidate_count: Optional[int] = None
85
+ stop_sequences: Optional[Iterable[str]] = None
86
+ max_output_tokens: Optional[int] = None
87
+ temperature: Optional[float] = None
88
+ top_p: Optional[float] = None
89
+ top_k: Optional[int] = None
90
+ response_mime_type: Optional[str] = None
91
+ response_schema: Optional['Schema'] = None
92
+ safety_settings: Optional['SafetySettingOptions'] = None
93
+ tools: Optional['FunctionLibraryType'] = None
94
+ tool_config: Optional['ToolConfigType'] = None
95
+ request_options: Optional['RequestOptionsType'] = None
96
+
97
+
98
+ Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, List, Optional, Union
19
19
  from camel.configs.base_config import BaseConfig
20
20
 
21
21
  if TYPE_CHECKING:
22
- from camel.functions import OpenAIFunction
22
+ from camel.toolkits import OpenAIFunction
23
23
 
24
24
 
25
25
  @dataclass(frozen=True)
@@ -21,7 +21,7 @@ from openai._types import NOT_GIVEN, NotGiven
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
23
  if TYPE_CHECKING:
24
- from camel.functions import OpenAIFunction
24
+ from camel.toolkits import OpenAIFunction
25
25
 
26
26
 
27
27
  @dataclass(frozen=True)
@@ -21,7 +21,7 @@ from openai._types import NOT_GIVEN, NotGiven
21
21
  from camel.configs.base_config import BaseConfig
22
22
 
23
23
  if TYPE_CHECKING:
24
- from camel.functions import OpenAIFunction
24
+ from camel.toolkits import OpenAIFunction
25
25
 
26
26
 
27
27
  @dataclass(frozen=True)
camel/models/__init__.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_model import AnthropicModel
15
15
  from .base_model import BaseModelBackend
16
+ from .gemini_model import GeminiModel
16
17
  from .litellm_model import LiteLLMModel
17
18
  from .model_factory import ModelFactory
18
19
  from .nemotron_model import NemotronModel
@@ -35,4 +36,5 @@ __all__ = [
35
36
  'OpenAIAudioModels',
36
37
  'NemotronModel',
37
38
  'OllamaModel',
39
+ 'GeminiModel',
38
40
  ]
@@ -109,7 +109,10 @@ class BaseModelBackend(ABC):
109
109
  Returns:
110
110
  int: The maximum token limit for the given model.
111
111
  """
112
- return self.model_type.token_limit
112
+ return (
113
+ self.model_config_dict.get("max_tokens")
114
+ or self.model_type.token_limit
115
+ )
113
116
 
114
117
  @property
115
118
  def stream(self) -> bool:
@@ -0,0 +1,203 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
15
+
16
+ from camel.configs import Gemini_API_PARAMS
17
+ from camel.messages import OpenAIMessage
18
+ from camel.models import BaseModelBackend
19
+ from camel.types import (
20
+ ChatCompletion,
21
+ ChatCompletionMessage,
22
+ Choice,
23
+ ModelType,
24
+ )
25
+ from camel.utils import (
26
+ BaseTokenCounter,
27
+ GeminiTokenCounter,
28
+ api_keys_required,
29
+ )
30
+
31
+ if TYPE_CHECKING:
32
+ from google.generativeai.types import ContentsType, GenerateContentResponse
33
+
34
+
35
+ class GeminiModel(BaseModelBackend):
36
+ r"""Gemini API in a unified BaseModelBackend interface."""
37
+
38
+ # NOTE: Currently "stream": True is not supported with Gemini due to the
39
+ # limitation of the current camel design.
40
+
41
+ def __init__(
42
+ self,
43
+ model_type: ModelType,
44
+ model_config_dict: Dict[str, Any],
45
+ api_key: Optional[str] = None,
46
+ url: Optional[str] = None,
47
+ ) -> None:
48
+ r"""Constructor for Gemini backend.
49
+
50
+ Args:
51
+ model_type (ModelType): Model for which a backend is created.
52
+ model_config_dict (Dict[str, Any]): A dictionary that will
53
+ be fed into generate_content().
54
+ api_key (Optional[str]): The API key for authenticating with the
55
+ gemini service. (default: :obj:`None`)
56
+ url (Optional[str]): The url to the gemini service.
57
+ """
58
+ import os
59
+
60
+ import google.generativeai as genai
61
+ from google.generativeai.types.generation_types import GenerationConfig
62
+
63
+ super().__init__(model_type, model_config_dict, api_key, url)
64
+ self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
65
+ genai.configure(api_key=self._api_key)
66
+ self._client = genai.GenerativeModel(self.model_type.value)
67
+ self._token_counter: Optional[BaseTokenCounter] = None
68
+ keys = list(self.model_config_dict.keys())
69
+ generation_config_dict = {
70
+ k: self.model_config_dict.pop(k)
71
+ for k in keys
72
+ if hasattr(GenerationConfig, k)
73
+ }
74
+ generation_config = genai.types.GenerationConfig(
75
+ **generation_config_dict
76
+ )
77
+ self.model_config_dict["generation_config"] = generation_config
78
+
79
+ @property
80
+ def token_counter(self) -> BaseTokenCounter:
81
+ if not self._token_counter:
82
+ self._token_counter = GeminiTokenCounter(self.model_type)
83
+ return self._token_counter
84
+
85
+ @api_keys_required("GOOGLE_API_KEY")
86
+ def run(
87
+ self,
88
+ messages: List[OpenAIMessage],
89
+ ) -> ChatCompletion:
90
+ r"""Runs inference of Gemini model.
91
+ This method can handle multimodal input
92
+
93
+ Args:
94
+ messages: Message list or Message with the chat history
95
+ in OpenAi format.
96
+
97
+ Returns:
98
+ response: A ChatCompletion object formatted for the OpenAI API.
99
+ """
100
+ response = self._client.generate_content(
101
+ contents=self.to_gemini_req(messages),
102
+ **self.model_config_dict,
103
+ )
104
+ response.resolve()
105
+ return self.to_openai_response(response)
106
+
107
+ def check_model_config(self):
108
+ r"""Check whether the model configuration contains any
109
+ unexpected arguments to Gemini API.
110
+
111
+ Raises:
112
+ ValueError: If the model configuration dictionary contains any
113
+ unexpected arguments to OpenAI API.
114
+ """
115
+ if self.model_config_dict is not None:
116
+ for param in self.model_config_dict:
117
+ if param not in Gemini_API_PARAMS:
118
+ raise ValueError(
119
+ f"Unexpected argument `{param}` is "
120
+ "input into Gemini model backend."
121
+ )
122
+
123
+ @property
124
+ def stream(self) -> bool:
125
+ r"""Returns whether the model is in stream mode,
126
+ which sends partial results each time.
127
+
128
+ Returns:
129
+ bool: Whether the model is in stream mode.
130
+ """
131
+ return self.model_config_dict.get('stream', False)
132
+
133
+ def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
134
+ r"""Converts the request from the OpenAI API format to
135
+ the Gemini API request format.
136
+
137
+ Args:
138
+ messages: The request object from the OpenAI API.
139
+
140
+ Returns:
141
+ converted_messages: A list of messages formatted for Gemini API.
142
+ """
143
+ # role reference
144
+ # https://ai.google.dev/api/python/google/generativeai/protos/Content
145
+ converted_messages = []
146
+ for message in messages:
147
+ role = message.get('role')
148
+ if role == 'assistant':
149
+ role_to_gemini = 'model'
150
+ else:
151
+ role_to_gemini = 'user'
152
+ converted_message = {
153
+ "role": role_to_gemini,
154
+ "parts": message.get("content"),
155
+ }
156
+ converted_messages.append(converted_message)
157
+ return converted_messages
158
+
159
+ def to_openai_response(
160
+ self,
161
+ response: 'GenerateContentResponse',
162
+ ) -> ChatCompletion:
163
+ r"""Converts the response from the Gemini API to the OpenAI API
164
+ response format.
165
+
166
+ Args:
167
+ response: The response object returned by the Gemini API
168
+
169
+ Returns:
170
+ openai_response: A ChatCompletion object formatted for
171
+ the OpenAI API.
172
+ """
173
+ import time
174
+ import uuid
175
+
176
+ openai_response = ChatCompletion(
177
+ id=f"chatcmpl-{uuid.uuid4().hex!s}",
178
+ object="chat.completion",
179
+ created=int(time.time()),
180
+ model=self.model_type.value,
181
+ choices=[],
182
+ )
183
+ for i, candidate in enumerate(response.candidates):
184
+ content = ""
185
+ if candidate.content and len(candidate.content.parts) > 0:
186
+ content = candidate.content.parts[0].text
187
+ finish_reason = candidate.finish_reason
188
+ finish_reason_mapping = {
189
+ "FinishReason.STOP": "stop",
190
+ "FinishReason.SAFETY": "content_filter",
191
+ "FinishReason.RECITATION": "content_filter",
192
+ "FinishReason.MAX_TOKENS": "length",
193
+ }
194
+ finish_reason = finish_reason_mapping.get(finish_reason, "stop")
195
+ choice = Choice(
196
+ index=i,
197
+ message=ChatCompletionMessage(
198
+ role="assistant", content=content
199
+ ),
200
+ finish_reason=finish_reason,
201
+ )
202
+ openai_response.choices.append(choice)
203
+ return openai_response
@@ -138,3 +138,19 @@ class LiteLLMModel:
138
138
  f"Unexpected argument `{param}` is "
139
139
  "input into LiteLLM model backend."
140
140
  )
141
+
142
+ @property
143
+ def token_limit(self) -> int:
144
+ """Returns the maximum token limit for the given model.
145
+
146
+ Returns:
147
+ int: The maximum token limit for the given model.
148
+ """
149
+ max_tokens = self.model_config_dict.get("max_tokens")
150
+ if isinstance(max_tokens, int):
151
+ return max_tokens
152
+ print(
153
+ "Must set `max_tokens` as an integer in `model_config_dict` when"
154
+ " setting up the model. Using 4096 as default value."
155
+ )
156
+ return 4096
@@ -15,6 +15,7 @@ from typing import Any, Dict, Optional, Union
15
15
 
16
16
  from camel.models.anthropic_model import AnthropicModel
17
17
  from camel.models.base_model import BaseModelBackend
18
+ from camel.models.gemini_model import GeminiModel
18
19
  from camel.models.litellm_model import LiteLLMModel
19
20
  from camel.models.ollama_model import OllamaModel
20
21
  from camel.models.open_source_model import OpenSourceModel
@@ -59,7 +60,6 @@ class ModelFactory:
59
60
  BaseModelBackend: The initialized backend.
60
61
  """
61
62
  model_class: Any
62
-
63
63
  if isinstance(model_type, ModelType):
64
64
  if model_platform.is_open_source and model_type.is_open_source:
65
65
  model_class = OpenSourceModel
@@ -70,6 +70,8 @@ class ModelFactory:
70
70
  model_class = AnthropicModel
71
71
  elif model_platform.is_zhipuai and model_type.is_zhipuai:
72
72
  model_class = ZhipuAIModel
73
+ elif model_platform.is_gemini and model_type.is_gemini:
74
+ model_class = GeminiModel
73
75
  elif model_type == ModelType.STUB:
74
76
  model_class = StubModel
75
77
  else:
@@ -90,5 +92,4 @@ class ModelFactory:
90
92
  )
91
93
  else:
92
94
  raise ValueError(f"Invalid model type `{model_type}` provided.")
93
-
94
95
  return model_class(model_type, model_config_dict, api_key, url)
@@ -104,6 +104,22 @@ class OllamaModel:
104
104
  )
105
105
  return response
106
106
 
107
+ @property
108
+ def token_limit(self) -> int:
109
+ """Returns the maximum token limit for the given model.
110
+
111
+ Returns:
112
+ int: The maximum token limit for the given model.
113
+ """
114
+ max_tokens = self.model_config_dict.get("max_tokens")
115
+ if isinstance(max_tokens, int):
116
+ return max_tokens
117
+ print(
118
+ "Must set `max_tokens` as an integer in `model_config_dict` when"
119
+ " setting up the model. Using 4096 as default value."
120
+ )
121
+ return 4096
122
+
107
123
  @property
108
124
  def stream(self) -> bool:
109
125
  r"""Returns whether the model is in stream mode, which sends partial
@@ -118,7 +118,6 @@ class ZhipuAIModel(BaseModelBackend):
118
118
  f"Unexpected argument `{param}` is "
119
119
  "input into ZhipuAI model backend."
120
120
  )
121
- pass
122
121
 
123
122
  @property
124
123
  def stream(self) -> bool:
@@ -11,13 +11,49 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ # ruff: noqa: I001
15
+ from .openai_function import (
16
+ OpenAIFunction,
17
+ get_openai_function_schema,
18
+ get_openai_tool_schema,
19
+ )
20
+ from .open_api_specs.security_config import openapi_security_config
21
+
22
+ from .google_maps_toolkit import MAP_FUNCS, GoogleMapsToolkit
23
+ from .math_toolkit import MATH_FUNCS, MathToolkit
24
+ from .open_api_toolkit import OPENAPI_FUNCS, OpenAPIToolkit
25
+ from .retrieval_toolkit import RETRIEVAL_FUNCS, RetrievalToolkit
26
+ from .search_toolkit import SEARCH_FUNCS, SearchToolkit
27
+ from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
28
+ from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
29
+ from .slack_toolkit import SLACK_FUNCS, SlackToolkit
14
30
 
15
31
  from .base import BaseToolkit
16
32
  from .code_execution import CodeExecutionToolkit
17
33
  from .github_toolkit import GithubToolkit
18
34
 
19
35
  __all__ = [
36
+ 'OpenAIFunction',
37
+ 'get_openai_function_schema',
38
+ 'get_openai_tool_schema',
39
+ 'openapi_security_config',
40
+ 'MATH_FUNCS',
41
+ 'MAP_FUNCS',
42
+ 'OPENAPI_FUNCS',
43
+ 'RETRIEVAL_FUNCS',
44
+ 'SEARCH_FUNCS',
45
+ 'TWITTER_FUNCS',
46
+ 'WEATHER_FUNCS',
47
+ 'SLACK_FUNCS',
20
48
  'BaseToolkit',
21
49
  'GithubToolkit',
50
+ 'MathToolkit',
51
+ 'GoogleMapsToolkit',
52
+ 'SearchToolkit',
53
+ 'SlackToolkit',
54
+ 'TwitterToolkit',
55
+ 'WeatherToolkit',
56
+ 'RetrievalToolkit',
57
+ 'OpenAPIToolkit',
22
58
  'CodeExecutionToolkit',
23
59
  ]
camel/toolkits/base.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from typing import List
16
16
 
17
- from camel.functions import OpenAIFunction
17
+ from .openai_function import OpenAIFunction
18
18
 
19
19
 
20
20
  class BaseToolkit:
@@ -13,8 +13,8 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from typing import List, Literal
15
15
 
16
- from camel.functions import OpenAIFunction
17
16
  from camel.interpreters import InternalPythonInterpreter
17
+ from camel.toolkits import OpenAIFunction
18
18
 
19
19
  from .base import BaseToolkit
20
20
 
@@ -17,10 +17,11 @@ from dataclasses import dataclass
17
17
  from datetime import datetime, timedelta
18
18
  from typing import List, Optional
19
19
 
20
- from camel.functions import OpenAIFunction
21
- from camel.toolkits.base import BaseToolkit
22
20
  from camel.utils import dependencies_required
23
21
 
22
+ from .base import BaseToolkit
23
+ from .openai_function import OpenAIFunction
24
+
24
25
 
25
26
  @dataclass
26
27
  class GithubIssue: