camel-ai 0.1.5.4__py3-none-any.whl → 0.1.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (48) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/knowledge_graph_agent.py +11 -15
  3. camel/agents/task_agent.py +0 -1
  4. camel/configs/__init__.py +12 -0
  5. camel/configs/gemini_config.py +97 -0
  6. camel/configs/litellm_config.py +8 -18
  7. camel/configs/ollama_config.py +85 -0
  8. camel/configs/zhipuai_config.py +78 -0
  9. camel/embeddings/openai_embedding.py +2 -2
  10. camel/functions/search_functions.py +5 -14
  11. camel/functions/slack_functions.py +5 -7
  12. camel/functions/twitter_function.py +3 -8
  13. camel/functions/weather_functions.py +3 -8
  14. camel/interpreters/__init__.py +2 -0
  15. camel/interpreters/docker_interpreter.py +235 -0
  16. camel/loaders/__init__.py +2 -0
  17. camel/loaders/base_io.py +5 -9
  18. camel/loaders/jina_url_reader.py +99 -0
  19. camel/loaders/unstructured_io.py +4 -6
  20. camel/models/__init__.py +2 -0
  21. camel/models/anthropic_model.py +6 -4
  22. camel/models/gemini_model.py +203 -0
  23. camel/models/litellm_model.py +49 -21
  24. camel/models/model_factory.py +4 -2
  25. camel/models/nemotron_model.py +14 -6
  26. camel/models/ollama_model.py +11 -17
  27. camel/models/openai_audio_models.py +10 -2
  28. camel/models/openai_model.py +4 -3
  29. camel/models/zhipuai_model.py +12 -6
  30. camel/retrievers/bm25_retriever.py +3 -8
  31. camel/retrievers/cohere_rerank_retriever.py +3 -5
  32. camel/storages/__init__.py +2 -0
  33. camel/storages/graph_storages/neo4j_graph.py +3 -7
  34. camel/storages/key_value_storages/__init__.py +2 -0
  35. camel/storages/key_value_storages/redis.py +169 -0
  36. camel/storages/vectordb_storages/milvus.py +3 -7
  37. camel/storages/vectordb_storages/qdrant.py +3 -7
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/code_execution.py +69 -0
  40. camel/toolkits/github_toolkit.py +5 -9
  41. camel/types/enums.py +53 -1
  42. camel/utils/__init__.py +4 -2
  43. camel/utils/async_func.py +42 -0
  44. camel/utils/commons.py +31 -49
  45. camel/utils/token_counting.py +74 -1
  46. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/METADATA +12 -3
  47. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/RECORD +48 -39
  48. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/WHEEL +0 -0
@@ -0,0 +1,235 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import io
16
+ import shlex
17
+ import tarfile
18
+ import uuid
19
+ from pathlib import Path
20
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional
21
+
22
+ from colorama import Fore
23
+
24
+ from camel.interpreters.base import BaseInterpreter
25
+ from camel.interpreters.interpreter_error import InterpreterError
26
+ from camel.utils import is_docker_running
27
+
28
+ if TYPE_CHECKING:
29
+ from docker.models.containers import Container
30
+
31
+
32
+ class DockerInterpreter(BaseInterpreter):
33
+ r"""A class for executing code files or code strings in a docker container.
34
+
35
+ This class handles the execution of code in different scripting languages
36
+ (currently Python and Bash) within a docker container, capturing their
37
+ stdout and stderr streams, and allowing user checking before executing code
38
+ strings.
39
+
40
+ Args:
41
+ require_confirm (bool, optional): If `True`, prompt user before
42
+ running code strings for security. Defaults to `True`.
43
+ print_stdout (bool, optional): If `True`, print the standard
44
+ output of the executed code. Defaults to `False`.
45
+ print_stderr (bool, optional): If `True`, print the standard error
46
+ of the executed code. Defaults to `True`.
47
+ """
48
+
49
+ _CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, str]] = {
50
+ "python": "python {file_name}",
51
+ "bash": "bash {file_name}",
52
+ }
53
+
54
+ _CODE_EXTENSION_MAPPING: ClassVar[Dict[str, str]] = {
55
+ "python": "py",
56
+ "bash": "sh",
57
+ }
58
+
59
+ _CODE_TYPE_MAPPING: ClassVar[Dict[str, str]] = {
60
+ "python": "python",
61
+ "py3": "python",
62
+ "python3": "python",
63
+ "py": "python",
64
+ "shell": "bash",
65
+ "bash": "bash",
66
+ "sh": "bash",
67
+ }
68
+
69
+ def __init__(
70
+ self,
71
+ require_confirm: bool = True,
72
+ print_stdout: bool = False,
73
+ print_stderr: bool = True,
74
+ ) -> None:
75
+ self.require_confirm = require_confirm
76
+ self.print_stdout = print_stdout
77
+ self.print_stderr = print_stderr
78
+
79
+ # lazy initialization of container
80
+ self._container: Optional[Container] = None
81
+
82
+ def __del__(self) -> None:
83
+ if self._container is not None:
84
+ self._container.remove(force=True)
85
+
86
+ def _initialize_if_needed(self) -> None:
87
+ if self._container is not None:
88
+ return
89
+
90
+ if not is_docker_running():
91
+ raise InterpreterError(
92
+ "Docker daemon is not running. Please install/start docker "
93
+ "and try again."
94
+ )
95
+
96
+ import docker
97
+
98
+ client = docker.from_env()
99
+ self._container = client.containers.run(
100
+ "python:3.10",
101
+ detach=True,
102
+ name=f"camel-interpreter-{uuid.uuid4()}",
103
+ command="tail -f /dev/null",
104
+ )
105
+
106
+ def _create_file_in_container(self, content: str) -> Path:
107
+ # get a random name for the file
108
+ filename = str(uuid.uuid4())
109
+ # create a tar in memory
110
+ tar_stream = io.BytesIO()
111
+ with tarfile.open(fileobj=tar_stream, mode='w') as tar:
112
+ tarinfo = tarfile.TarInfo(name=filename)
113
+ tarinfo.size = len(content)
114
+ tar.addfile(tarinfo, io.BytesIO(content.encode('utf-8')))
115
+ tar_stream.seek(0)
116
+
117
+ # copy the tar into the container
118
+ if self._container is None:
119
+ raise InterpreterError(
120
+ "Container is not initialized. Try running the code again."
121
+ )
122
+ self._container.put_archive("/tmp", tar_stream)
123
+ return Path(f"/tmp/{filename}")
124
+
125
+ def _run_file_in_container(
126
+ self,
127
+ file: Path,
128
+ code_type: str,
129
+ ) -> str:
130
+ code_type = self._check_code_type(code_type)
131
+ commands = shlex.split(
132
+ self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
133
+ file_name=str(file)
134
+ )
135
+ )
136
+ if self._container is None:
137
+ raise InterpreterError(
138
+ "Container is not initialized. Try running the code again."
139
+ )
140
+ stdout, stderr = self._container.exec_run(
141
+ commands,
142
+ demux=True,
143
+ ).output
144
+
145
+ if self.print_stdout and stdout:
146
+ print("======stdout======")
147
+ print(Fore.GREEN + stdout.decode() + Fore.RESET)
148
+ print("==================")
149
+ if self.print_stderr and stderr:
150
+ print("======stderr======")
151
+ print(Fore.RED + stderr.decode() + Fore.RESET)
152
+ print("==================")
153
+ exec_result = f"{stdout.decode()}" if stdout else ""
154
+ exec_result += f"(stderr: {stderr.decode()})" if stderr else ""
155
+ return exec_result
156
+
157
+ def run(
158
+ self,
159
+ code: str,
160
+ code_type: str,
161
+ ) -> str:
162
+ r"""Executes the given code in the conatiner attached to the
163
+ interpreter, and captures the stdout and stderr streams.
164
+
165
+ Args:
166
+ code (str): The code string to execute.
167
+ code_type (str): The type of code to execute (e.g., 'python',
168
+ 'bash').
169
+
170
+ Returns:
171
+ str: A string containing the captured stdout and stderr of the
172
+ executed code.
173
+
174
+ Raises:
175
+ InterpreterError: If the user declines to run the code, or the
176
+ code type is unsupported, or there is an error in the docker
177
+ API/container
178
+ """
179
+ import docker.errors
180
+
181
+ code_type = self._check_code_type(code_type)
182
+
183
+ # Print code for security checking
184
+ if self.require_confirm:
185
+ print(f"The following {code_type} code will run in container:")
186
+ print(Fore.CYAN + code + Fore.RESET)
187
+ while True:
188
+ choice = input("Running code? [Y/n]:").lower()
189
+ if choice in ["y", "yes", "ye", ""]:
190
+ break
191
+ elif choice not in ["no", "n"]:
192
+ continue
193
+ raise InterpreterError(
194
+ "Execution halted: User opted not to run the code. "
195
+ "This choice stops the current operation and any "
196
+ "further code execution."
197
+ )
198
+
199
+ self._initialize_if_needed()
200
+
201
+ try:
202
+ temp_file_path = self._create_file_in_container(code)
203
+ result = self._run_file_in_container(temp_file_path, code_type)
204
+ except docker.errors.APIError as e:
205
+ raise InterpreterError(
206
+ f"Execution halted due to docker API error: {e.explanation}. "
207
+ "This choice stops the current operation and any "
208
+ "further code execution."
209
+ ) from e
210
+ except docker.errors.DockerException as e:
211
+ raise InterpreterError(
212
+ f"Execution halted due to docker exceptoin: {e}. "
213
+ "This choice stops the current operation and any "
214
+ "further code execution."
215
+ ) from e
216
+ return result
217
+
218
+ def _check_code_type(self, code_type: str) -> str:
219
+ if code_type not in self._CODE_TYPE_MAPPING:
220
+ raise InterpreterError(
221
+ f"Unsupported code type {code_type}. Currently "
222
+ f"`{self.__class__.__name__}` only supports "
223
+ f"{', '.join(self._CODE_EXTENSION_MAPPING.keys())}."
224
+ )
225
+ return self._CODE_TYPE_MAPPING[code_type]
226
+
227
+ def supported_code_types(self) -> List[str]:
228
+ r"""Provides supported code types by the interpreter."""
229
+ return list(self._CODE_EXTENSION_MAPPING.keys())
230
+
231
+ def update_action_space(self, action_space: Dict[str, Any]) -> None:
232
+ r"""Updates action space for *python* interpreter"""
233
+ raise RuntimeError(
234
+ "SubprocessInterpreter doesn't support " "`action_space`."
235
+ )
camel/loaders/__init__.py CHANGED
@@ -13,10 +13,12 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .base_io import File, read_file
16
+ from .jina_url_reader import JinaURLReader
16
17
  from .unstructured_io import UnstructuredIO
17
18
 
18
19
  __all__ = [
19
20
  'File',
20
21
  'read_file',
21
22
  'UnstructuredIO',
23
+ 'JinaURLReader',
22
24
  ]
camel/loaders/base_io.py CHANGED
@@ -19,6 +19,8 @@ from hashlib import md5
19
19
  from io import BytesIO
20
20
  from typing import Any, Dict, List, Optional
21
21
 
22
+ from camel.utils import dependencies_required
23
+
22
24
 
23
25
  class File(ABC):
24
26
  r"""Represents an uploaded file comprised of Documents"""
@@ -94,6 +96,7 @@ def strip_consecutive_newlines(text: str) -> str:
94
96
 
95
97
  class DocxFile(File):
96
98
  @classmethod
99
+ @dependencies_required('docx2txt')
97
100
  def from_bytes(cls, file: BytesIO) -> "DocxFile":
98
101
  r"""Creates a DocxFile object from a BytesIO object.
99
102
 
@@ -104,15 +107,8 @@ class DocxFile(File):
104
107
  Returns:
105
108
  DocxFile: A DocxFile object.
106
109
  """
107
- # Use docx2txt to extract text from docx files
108
- try:
109
- import docx2txt
110
- except ImportError:
111
- raise ImportError(
112
- "Please install `docx2txt` first. "
113
- "You can install it by running "
114
- "`pip install docx2txt`."
115
- )
110
+ import docx2txt
111
+
116
112
  text = docx2txt.process(file)
117
113
  text = strip_consecutive_newlines(text)
118
114
  # Create a dictionary with the extracted text
@@ -0,0 +1,99 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Optional
17
+
18
+ from camel.types.enums import JinaReturnFormat
19
+
20
+ JINA_ENDPOINT = "https://r.jina.ai/"
21
+
22
+
23
+ class JinaURLReader:
24
+ r"""URL Reader provided by Jina AI. The output is cleaner and more
25
+ LLM-friendly than the URL Reader of UnstructuredIO. Can be configured to
26
+ replace the UnstructuredIO URL Reader in the pipeline.
27
+
28
+ Args:
29
+ api_key (Optional[str], optional): The API key for Jina AI. If not
30
+ provided, the reader will have a lower rate limit. Defaults to
31
+ None.
32
+ return_format (ReturnFormat, optional): The level of detail
33
+ of the returned content, which is optimized for LLMs. For
34
+ now screenshots are not supported. Defaults to
35
+ ReturnFormat.DEFAULT.
36
+ json_response (bool, optional): Whether to return the response
37
+ in JSON format. Defaults to False.
38
+ timeout (int, optional): The maximum time in seconds to wait for
39
+ the page to be rendered. Defaults to 30.
40
+ **kwargs (Any): Additional keyword arguments, including proxies,
41
+ cookies, etc. It should align with the HTTP Header field and
42
+ value pairs listed in the reference.
43
+
44
+ References:
45
+ https://jina.ai/reader
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ api_key: Optional[str] = None,
51
+ return_format: JinaReturnFormat = JinaReturnFormat.DEFAULT,
52
+ json_response: bool = False,
53
+ timeout: int = 30,
54
+ **kwargs: Any,
55
+ ) -> None:
56
+ api_key = api_key or os.getenv('JINA_API_KEY')
57
+ if api_key is None:
58
+ print(
59
+ "[JinaURLReader] JINA_API_KEY not set. This will result in a "
60
+ "low rate limit of Jina URL Reader. Get API key here: "
61
+ "https://jina.ai/reader."
62
+ )
63
+
64
+ # if the following field not provided, it will be None
65
+ api_field = f"Bearer {api_key}" if api_key else None
66
+ json_field = "application/json" if json_response else None
67
+
68
+ raw_headers = {
69
+ "Authorization": api_field,
70
+ "X-Return-Format": return_format.value,
71
+ "Accept": json_field,
72
+ "X-Timeout": str(timeout),
73
+ **kwargs,
74
+ }
75
+
76
+ # eliminate None values
77
+ self._headers = {k: v for k, v in raw_headers.items() if v}
78
+
79
+ def read_content(self, url: str) -> str:
80
+ r"""Reads the content of a URL and returns it as a string with
81
+ given form.
82
+
83
+ Args:
84
+ url (str): The URL to read.
85
+
86
+ Returns:
87
+ str: The content of the URL.
88
+ """
89
+
90
+ import requests
91
+
92
+ full_url = f"{JINA_ENDPOINT}{url}"
93
+ try:
94
+ resp = requests.get(full_url, headers=self._headers)
95
+ resp.raise_for_status()
96
+ except Exception as e:
97
+ raise Exception(f"Failed to read content from {url}: {e}") from e
98
+
99
+ return resp.text
@@ -16,6 +16,8 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
16
16
 
17
17
  from unstructured.documents.elements import Element
18
18
 
19
+ from camel.utils import dependencies_required
20
+
19
21
 
20
22
  class UnstructuredIO:
21
23
  r"""A class to handle various functionalities provided by the
@@ -37,6 +39,7 @@ class UnstructuredIO:
37
39
  """
38
40
  self._ensure_unstructured_version(self.UNSTRUCTURED_MIN_VERSION)
39
41
 
42
+ @dependencies_required('unstructured')
40
43
  def _ensure_unstructured_version(self, min_version: str) -> None:
41
44
  r"""Validates that the installed 'Unstructured' library version
42
45
  satisfies the specified minimum version requirement. This function is
@@ -58,12 +61,7 @@ class UnstructuredIO:
58
61
  strings.
59
62
  """
60
63
  from packaging import version
61
-
62
- try:
63
- from unstructured.__version__ import __version__
64
-
65
- except ImportError as e:
66
- raise ImportError("Package `unstructured` not installed.") from e
64
+ from unstructured.__version__ import __version__
67
65
 
68
66
  # Use packaging.version to compare versions
69
67
  min_ver = version.parse(min_version)
camel/models/__init__.py CHANGED
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_model import AnthropicModel
15
15
  from .base_model import BaseModelBackend
16
+ from .gemini_model import GeminiModel
16
17
  from .litellm_model import LiteLLMModel
17
18
  from .model_factory import ModelFactory
18
19
  from .nemotron_model import NemotronModel
@@ -35,4 +36,5 @@ __all__ = [
35
36
  'OpenAIAudioModels',
36
37
  'NemotronModel',
37
38
  'OllamaModel',
39
+ 'GeminiModel',
38
40
  ]
@@ -23,7 +23,7 @@ from camel.types import ChatCompletion, ModelType
23
23
  from camel.utils import (
24
24
  AnthropicTokenCounter,
25
25
  BaseTokenCounter,
26
- model_api_key_required,
26
+ api_keys_required,
27
27
  )
28
28
 
29
29
 
@@ -46,11 +46,13 @@ class AnthropicModel(BaseModelBackend):
46
46
  be fed into Anthropic.messages.create().
47
47
  api_key (Optional[str]): The API key for authenticating with the
48
48
  Anthropic service. (default: :obj:`None`)
49
- url (Optional[str]): The url to the model service.
49
+ url (Optional[str]): The url to the Anthropic service. (default:
50
+ :obj:`None`)
50
51
  """
51
52
  super().__init__(model_type, model_config_dict, api_key, url)
52
53
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
53
- self.client = Anthropic(api_key=self._api_key, base_url=url)
54
+ self._url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
55
+ self.client = Anthropic(api_key=self._api_key, base_url=self._url)
54
56
  self._token_counter: Optional[BaseTokenCounter] = None
55
57
 
56
58
  def _convert_response_from_anthropic_to_openai(self, response):
@@ -96,7 +98,7 @@ class AnthropicModel(BaseModelBackend):
96
98
  """
97
99
  return self.client.count_tokens(prompt)
98
100
 
99
- @model_api_key_required
101
+ @api_keys_required("ANTHROPIC_API_KEY")
100
102
  def run(
101
103
  self,
102
104
  messages: List[OpenAIMessage],
@@ -0,0 +1,203 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional
15
+
16
+ from camel.configs import Gemini_API_PARAMS
17
+ from camel.messages import OpenAIMessage
18
+ from camel.models import BaseModelBackend
19
+ from camel.types import (
20
+ ChatCompletion,
21
+ ChatCompletionMessage,
22
+ Choice,
23
+ ModelType,
24
+ )
25
+ from camel.utils import (
26
+ BaseTokenCounter,
27
+ GeminiTokenCounter,
28
+ api_keys_required,
29
+ )
30
+
31
+ if TYPE_CHECKING:
32
+ from google.generativeai.types import ContentsType, GenerateContentResponse
33
+
34
+
35
+ class GeminiModel(BaseModelBackend):
36
+ r"""Gemini API in a unified BaseModelBackend interface."""
37
+
38
+ # NOTE: Currently "stream": True is not supported with Gemini due to the
39
+ # limitation of the current camel design.
40
+
41
+ def __init__(
42
+ self,
43
+ model_type: ModelType,
44
+ model_config_dict: Dict[str, Any],
45
+ api_key: Optional[str] = None,
46
+ url: Optional[str] = None,
47
+ ) -> None:
48
+ r"""Constructor for Gemini backend.
49
+
50
+ Args:
51
+ model_type (ModelType): Model for which a backend is created.
52
+ model_config_dict (Dict[str, Any]): A dictionary that will
53
+ be fed into generate_content().
54
+ api_key (Optional[str]): The API key for authenticating with the
55
+ gemini service. (default: :obj:`None`)
56
+ url (Optional[str]): The url to the gemini service.
57
+ """
58
+ import os
59
+
60
+ import google.generativeai as genai
61
+ from google.generativeai.types.generation_types import GenerationConfig
62
+
63
+ super().__init__(model_type, model_config_dict, api_key, url)
64
+ self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
65
+ genai.configure(api_key=self._api_key)
66
+ self._client = genai.GenerativeModel(self.model_type.value)
67
+ self._token_counter: Optional[BaseTokenCounter] = None
68
+ keys = list(self.model_config_dict.keys())
69
+ generation_config_dict = {
70
+ k: self.model_config_dict.pop(k)
71
+ for k in keys
72
+ if hasattr(GenerationConfig, k)
73
+ }
74
+ generation_config = genai.types.GenerationConfig(
75
+ **generation_config_dict
76
+ )
77
+ self.model_config_dict["generation_config"] = generation_config
78
+
79
+ @property
80
+ def token_counter(self) -> BaseTokenCounter:
81
+ if not self._token_counter:
82
+ self._token_counter = GeminiTokenCounter(self.model_type)
83
+ return self._token_counter
84
+
85
+ @api_keys_required("GOOGLE_API_KEY")
86
+ def run(
87
+ self,
88
+ messages: List[OpenAIMessage],
89
+ ) -> ChatCompletion:
90
+ r"""Runs inference of Gemini model.
91
+ This method can handle multimodal input
92
+
93
+ Args:
94
+ messages: Message list or Message with the chat history
95
+ in OpenAi format.
96
+
97
+ Returns:
98
+ response: A ChatCompletion object formatted for the OpenAI API.
99
+ """
100
+ response = self._client.generate_content(
101
+ contents=self.to_gemini_req(messages),
102
+ **self.model_config_dict,
103
+ )
104
+ response.resolve()
105
+ return self.to_openai_response(response)
106
+
107
+ def check_model_config(self):
108
+ r"""Check whether the model configuration contains any
109
+ unexpected arguments to Gemini API.
110
+
111
+ Raises:
112
+ ValueError: If the model configuration dictionary contains any
113
+ unexpected arguments to OpenAI API.
114
+ """
115
+ if self.model_config_dict is not None:
116
+ for param in self.model_config_dict:
117
+ if param not in Gemini_API_PARAMS:
118
+ raise ValueError(
119
+ f"Unexpected argument `{param}` is "
120
+ "input into Gemini model backend."
121
+ )
122
+
123
+ @property
124
+ def stream(self) -> bool:
125
+ r"""Returns whether the model is in stream mode,
126
+ which sends partial results each time.
127
+
128
+ Returns:
129
+ bool: Whether the model is in stream mode.
130
+ """
131
+ return self.model_config_dict.get('stream', False)
132
+
133
+ def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
134
+ r"""Converts the request from the OpenAI API format to
135
+ the Gemini API request format.
136
+
137
+ Args:
138
+ messages: The request object from the OpenAI API.
139
+
140
+ Returns:
141
+ converted_messages: A list of messages formatted for Gemini API.
142
+ """
143
+ # role reference
144
+ # https://ai.google.dev/api/python/google/generativeai/protos/Content
145
+ converted_messages = []
146
+ for message in messages:
147
+ role = message.get('role')
148
+ if role == 'assistant':
149
+ role_to_gemini = 'model'
150
+ else:
151
+ role_to_gemini = 'user'
152
+ converted_message = {
153
+ "role": role_to_gemini,
154
+ "parts": message.get("content"),
155
+ }
156
+ converted_messages.append(converted_message)
157
+ return converted_messages
158
+
159
+ def to_openai_response(
160
+ self,
161
+ response: 'GenerateContentResponse',
162
+ ) -> ChatCompletion:
163
+ r"""Converts the response from the Gemini API to the OpenAI API
164
+ response format.
165
+
166
+ Args:
167
+ response: The response object returned by the Gemini API
168
+
169
+ Returns:
170
+ openai_response: A ChatCompletion object formatted for
171
+ the OpenAI API.
172
+ """
173
+ import time
174
+ import uuid
175
+
176
+ openai_response = ChatCompletion(
177
+ id=f"chatcmpl-{uuid.uuid4().hex!s}",
178
+ object="chat.completion",
179
+ created=int(time.time()),
180
+ model=self.model_type.value,
181
+ choices=[],
182
+ )
183
+ for i, candidate in enumerate(response.candidates):
184
+ content = ""
185
+ if candidate.content and len(candidate.content.parts) > 0:
186
+ content = candidate.content.parts[0].text
187
+ finish_reason = candidate.finish_reason
188
+ finish_reason_mapping = {
189
+ "FinishReason.STOP": "stop",
190
+ "FinishReason.SAFETY": "content_filter",
191
+ "FinishReason.RECITATION": "content_filter",
192
+ "FinishReason.MAX_TOKENS": "length",
193
+ }
194
+ finish_reason = finish_reason_mapping.get(finish_reason, "stop")
195
+ choice = Choice(
196
+ index=i,
197
+ message=ChatCompletionMessage(
198
+ role="assistant", content=content
199
+ ),
200
+ finish_reason=finish_reason,
201
+ )
202
+ openai_response.choices.append(choice)
203
+ return openai_response