camel-ai 0.1.5.3__py3-none-any.whl → 0.1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (51) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/knowledge_graph_agent.py +4 -1
  3. camel/configs/__init__.py +6 -0
  4. camel/configs/litellm_config.py +8 -18
  5. camel/configs/ollama_config.py +85 -0
  6. camel/configs/zhipuai_config.py +78 -0
  7. camel/embeddings/base.py +10 -9
  8. camel/embeddings/openai_embedding.py +27 -14
  9. camel/embeddings/sentence_transformers_embeddings.py +28 -14
  10. camel/functions/search_functions.py +5 -14
  11. camel/functions/slack_functions.py +5 -7
  12. camel/functions/twitter_function.py +3 -8
  13. camel/functions/weather_functions.py +3 -8
  14. camel/interpreters/__init__.py +2 -0
  15. camel/interpreters/docker_interpreter.py +235 -0
  16. camel/loaders/__init__.py +2 -0
  17. camel/loaders/base_io.py +5 -9
  18. camel/loaders/jina_url_reader.py +99 -0
  19. camel/loaders/unstructured_io.py +4 -6
  20. camel/models/anthropic_model.py +6 -4
  21. camel/models/litellm_model.py +49 -21
  22. camel/models/model_factory.py +1 -0
  23. camel/models/nemotron_model.py +14 -6
  24. camel/models/ollama_model.py +11 -17
  25. camel/models/openai_audio_models.py +10 -2
  26. camel/models/openai_model.py +4 -3
  27. camel/models/zhipuai_model.py +12 -6
  28. camel/retrievers/auto_retriever.py +2 -2
  29. camel/retrievers/bm25_retriever.py +3 -8
  30. camel/retrievers/cohere_rerank_retriever.py +3 -5
  31. camel/storages/__init__.py +2 -0
  32. camel/storages/graph_storages/graph_element.py +9 -1
  33. camel/storages/graph_storages/neo4j_graph.py +3 -7
  34. camel/storages/key_value_storages/__init__.py +2 -0
  35. camel/storages/key_value_storages/redis.py +169 -0
  36. camel/storages/vectordb_storages/milvus.py +3 -7
  37. camel/storages/vectordb_storages/qdrant.py +3 -7
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/code_execution.py +69 -0
  40. camel/toolkits/github_toolkit.py +5 -9
  41. camel/types/enums.py +49 -20
  42. camel/utils/__init__.py +2 -2
  43. camel/utils/async_func.py +42 -0
  44. camel/utils/commons.py +31 -49
  45. camel/utils/token_counting.py +40 -1
  46. {camel_ai-0.1.5.3.dist-info → camel_ai-0.1.5.5.dist-info}/METADATA +16 -8
  47. {camel_ai-0.1.5.3.dist-info → camel_ai-0.1.5.5.dist-info}/RECORD +48 -44
  48. camel/bots/__init__.py +0 -20
  49. camel/bots/discord_bot.py +0 -103
  50. camel/bots/telegram_bot.py +0 -84
  51. {camel_ai-0.1.5.3.dist-info → camel_ai-0.1.5.5.dist-info}/WHEEL +0 -0
@@ -0,0 +1,235 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import io
16
+ import shlex
17
+ import tarfile
18
+ import uuid
19
+ from pathlib import Path
20
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional
21
+
22
+ from colorama import Fore
23
+
24
+ from camel.interpreters.base import BaseInterpreter
25
+ from camel.interpreters.interpreter_error import InterpreterError
26
+ from camel.utils import is_docker_running
27
+
28
+ if TYPE_CHECKING:
29
+ from docker.models.containers import Container
30
+
31
+
32
+ class DockerInterpreter(BaseInterpreter):
33
+ r"""A class for executing code files or code strings in a docker container.
34
+
35
+ This class handles the execution of code in different scripting languages
36
+ (currently Python and Bash) within a docker container, capturing their
37
+ stdout and stderr streams, and allowing user checking before executing code
38
+ strings.
39
+
40
+ Args:
41
+ require_confirm (bool, optional): If `True`, prompt user before
42
+ running code strings for security. Defaults to `True`.
43
+ print_stdout (bool, optional): If `True`, print the standard
44
+ output of the executed code. Defaults to `False`.
45
+ print_stderr (bool, optional): If `True`, print the standard error
46
+ of the executed code. Defaults to `True`.
47
+ """
48
+
49
+ _CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, str]] = {
50
+ "python": "python {file_name}",
51
+ "bash": "bash {file_name}",
52
+ }
53
+
54
+ _CODE_EXTENSION_MAPPING: ClassVar[Dict[str, str]] = {
55
+ "python": "py",
56
+ "bash": "sh",
57
+ }
58
+
59
+ _CODE_TYPE_MAPPING: ClassVar[Dict[str, str]] = {
60
+ "python": "python",
61
+ "py3": "python",
62
+ "python3": "python",
63
+ "py": "python",
64
+ "shell": "bash",
65
+ "bash": "bash",
66
+ "sh": "bash",
67
+ }
68
+
69
+ def __init__(
70
+ self,
71
+ require_confirm: bool = True,
72
+ print_stdout: bool = False,
73
+ print_stderr: bool = True,
74
+ ) -> None:
75
+ self.require_confirm = require_confirm
76
+ self.print_stdout = print_stdout
77
+ self.print_stderr = print_stderr
78
+
79
+ # lazy initialization of container
80
+ self._container: Optional[Container] = None
81
+
82
+ def __del__(self) -> None:
83
+ if self._container is not None:
84
+ self._container.remove(force=True)
85
+
86
+ def _initialize_if_needed(self) -> None:
87
+ if self._container is not None:
88
+ return
89
+
90
+ if not is_docker_running():
91
+ raise InterpreterError(
92
+ "Docker daemon is not running. Please install/start docker "
93
+ "and try again."
94
+ )
95
+
96
+ import docker
97
+
98
+ client = docker.from_env()
99
+ self._container = client.containers.run(
100
+ "python:3.10",
101
+ detach=True,
102
+ name=f"camel-interpreter-{uuid.uuid4()}",
103
+ command="tail -f /dev/null",
104
+ )
105
+
106
+ def _create_file_in_container(self, content: str) -> Path:
107
+ # get a random name for the file
108
+ filename = str(uuid.uuid4())
109
+ # create a tar in memory
110
+ tar_stream = io.BytesIO()
111
+ with tarfile.open(fileobj=tar_stream, mode='w') as tar:
112
+ tarinfo = tarfile.TarInfo(name=filename)
113
+ tarinfo.size = len(content)
114
+ tar.addfile(tarinfo, io.BytesIO(content.encode('utf-8')))
115
+ tar_stream.seek(0)
116
+
117
+ # copy the tar into the container
118
+ if self._container is None:
119
+ raise InterpreterError(
120
+ "Container is not initialized. Try running the code again."
121
+ )
122
+ self._container.put_archive("/tmp", tar_stream)
123
+ return Path(f"/tmp/{filename}")
124
+
125
+ def _run_file_in_container(
126
+ self,
127
+ file: Path,
128
+ code_type: str,
129
+ ) -> str:
130
+ code_type = self._check_code_type(code_type)
131
+ commands = shlex.split(
132
+ self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
133
+ file_name=str(file)
134
+ )
135
+ )
136
+ if self._container is None:
137
+ raise InterpreterError(
138
+ "Container is not initialized. Try running the code again."
139
+ )
140
+ stdout, stderr = self._container.exec_run(
141
+ commands,
142
+ demux=True,
143
+ ).output
144
+
145
+ if self.print_stdout and stdout:
146
+ print("======stdout======")
147
+ print(Fore.GREEN + stdout.decode() + Fore.RESET)
148
+ print("==================")
149
+ if self.print_stderr and stderr:
150
+ print("======stderr======")
151
+ print(Fore.RED + stderr.decode() + Fore.RESET)
152
+ print("==================")
153
+ exec_result = f"{stdout.decode()}" if stdout else ""
154
+ exec_result += f"(stderr: {stderr.decode()})" if stderr else ""
155
+ return exec_result
156
+
157
+ def run(
158
+ self,
159
+ code: str,
160
+ code_type: str,
161
+ ) -> str:
162
+ r"""Executes the given code in the conatiner attached to the
163
+ interpreter, and captures the stdout and stderr streams.
164
+
165
+ Args:
166
+ code (str): The code string to execute.
167
+ code_type (str): The type of code to execute (e.g., 'python',
168
+ 'bash').
169
+
170
+ Returns:
171
+ str: A string containing the captured stdout and stderr of the
172
+ executed code.
173
+
174
+ Raises:
175
+ InterpreterError: If the user declines to run the code, or the
176
+ code type is unsupported, or there is an error in the docker
177
+ API/container
178
+ """
179
+ import docker.errors
180
+
181
+ code_type = self._check_code_type(code_type)
182
+
183
+ # Print code for security checking
184
+ if self.require_confirm:
185
+ print(f"The following {code_type} code will run in container:")
186
+ print(Fore.CYAN + code + Fore.RESET)
187
+ while True:
188
+ choice = input("Running code? [Y/n]:").lower()
189
+ if choice in ["y", "yes", "ye", ""]:
190
+ break
191
+ elif choice not in ["no", "n"]:
192
+ continue
193
+ raise InterpreterError(
194
+ "Execution halted: User opted not to run the code. "
195
+ "This choice stops the current operation and any "
196
+ "further code execution."
197
+ )
198
+
199
+ self._initialize_if_needed()
200
+
201
+ try:
202
+ temp_file_path = self._create_file_in_container(code)
203
+ result = self._run_file_in_container(temp_file_path, code_type)
204
+ except docker.errors.APIError as e:
205
+ raise InterpreterError(
206
+ f"Execution halted due to docker API error: {e.explanation}. "
207
+ "This choice stops the current operation and any "
208
+ "further code execution."
209
+ ) from e
210
+ except docker.errors.DockerException as e:
211
+ raise InterpreterError(
212
+ f"Execution halted due to docker exceptoin: {e}. "
213
+ "This choice stops the current operation and any "
214
+ "further code execution."
215
+ ) from e
216
+ return result
217
+
218
+ def _check_code_type(self, code_type: str) -> str:
219
+ if code_type not in self._CODE_TYPE_MAPPING:
220
+ raise InterpreterError(
221
+ f"Unsupported code type {code_type}. Currently "
222
+ f"`{self.__class__.__name__}` only supports "
223
+ f"{', '.join(self._CODE_EXTENSION_MAPPING.keys())}."
224
+ )
225
+ return self._CODE_TYPE_MAPPING[code_type]
226
+
227
+ def supported_code_types(self) -> List[str]:
228
+ r"""Provides supported code types by the interpreter."""
229
+ return list(self._CODE_EXTENSION_MAPPING.keys())
230
+
231
+ def update_action_space(self, action_space: Dict[str, Any]) -> None:
232
+ r"""Updates action space for *python* interpreter"""
233
+ raise RuntimeError(
234
+ "SubprocessInterpreter doesn't support " "`action_space`."
235
+ )
camel/loaders/__init__.py CHANGED
@@ -13,10 +13,12 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .base_io import File, read_file
16
+ from .jina_url_reader import JinaURLReader
16
17
  from .unstructured_io import UnstructuredIO
17
18
 
18
19
  __all__ = [
19
20
  'File',
20
21
  'read_file',
21
22
  'UnstructuredIO',
23
+ 'JinaURLReader',
22
24
  ]
camel/loaders/base_io.py CHANGED
@@ -19,6 +19,8 @@ from hashlib import md5
19
19
  from io import BytesIO
20
20
  from typing import Any, Dict, List, Optional
21
21
 
22
+ from camel.utils import dependencies_required
23
+
22
24
 
23
25
  class File(ABC):
24
26
  r"""Represents an uploaded file comprised of Documents"""
@@ -94,6 +96,7 @@ def strip_consecutive_newlines(text: str) -> str:
94
96
 
95
97
  class DocxFile(File):
96
98
  @classmethod
99
+ @dependencies_required('docx2txt')
97
100
  def from_bytes(cls, file: BytesIO) -> "DocxFile":
98
101
  r"""Creates a DocxFile object from a BytesIO object.
99
102
 
@@ -104,15 +107,8 @@ class DocxFile(File):
104
107
  Returns:
105
108
  DocxFile: A DocxFile object.
106
109
  """
107
- # Use docx2txt to extract text from docx files
108
- try:
109
- import docx2txt
110
- except ImportError:
111
- raise ImportError(
112
- "Please install `docx2txt` first. "
113
- "You can install it by running "
114
- "`pip install docx2txt`."
115
- )
110
+ import docx2txt
111
+
116
112
  text = docx2txt.process(file)
117
113
  text = strip_consecutive_newlines(text)
118
114
  # Create a dictionary with the extracted text
@@ -0,0 +1,99 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Optional
17
+
18
+ from camel.types.enums import JinaReturnFormat
19
+
20
+ JINA_ENDPOINT = "https://r.jina.ai/"
21
+
22
+
23
+ class JinaURLReader:
24
+ r"""URL Reader provided by Jina AI. The output is cleaner and more
25
+ LLM-friendly than the URL Reader of UnstructuredIO. Can be configured to
26
+ replace the UnstructuredIO URL Reader in the pipeline.
27
+
28
+ Args:
29
+ api_key (Optional[str], optional): The API key for Jina AI. If not
30
+ provided, the reader will have a lower rate limit. Defaults to
31
+ None.
32
+ return_format (ReturnFormat, optional): The level of detail
33
+ of the returned content, which is optimized for LLMs. For
34
+ now screenshots are not supported. Defaults to
35
+ ReturnFormat.DEFAULT.
36
+ json_response (bool, optional): Whether to return the response
37
+ in JSON format. Defaults to False.
38
+ timeout (int, optional): The maximum time in seconds to wait for
39
+ the page to be rendered. Defaults to 30.
40
+ **kwargs (Any): Additional keyword arguments, including proxies,
41
+ cookies, etc. It should align with the HTTP Header field and
42
+ value pairs listed in the reference.
43
+
44
+ References:
45
+ https://jina.ai/reader
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ api_key: Optional[str] = None,
51
+ return_format: JinaReturnFormat = JinaReturnFormat.DEFAULT,
52
+ json_response: bool = False,
53
+ timeout: int = 30,
54
+ **kwargs: Any,
55
+ ) -> None:
56
+ api_key = api_key or os.getenv('JINA_API_KEY')
57
+ if api_key is None:
58
+ print(
59
+ "[JinaURLReader] JINA_API_KEY not set. This will result in a "
60
+ "low rate limit of Jina URL Reader. Get API key here: "
61
+ "https://jina.ai/reader."
62
+ )
63
+
64
+ # if the following field not provided, it will be None
65
+ api_field = f"Bearer {api_key}" if api_key else None
66
+ json_field = "application/json" if json_response else None
67
+
68
+ raw_headers = {
69
+ "Authorization": api_field,
70
+ "X-Return-Format": return_format.value,
71
+ "Accept": json_field,
72
+ "X-Timeout": str(timeout),
73
+ **kwargs,
74
+ }
75
+
76
+ # eliminate None values
77
+ self._headers = {k: v for k, v in raw_headers.items() if v}
78
+
79
+ def read_content(self, url: str) -> str:
80
+ r"""Reads the content of a URL and returns it as a string with
81
+ given form.
82
+
83
+ Args:
84
+ url (str): The URL to read.
85
+
86
+ Returns:
87
+ str: The content of the URL.
88
+ """
89
+
90
+ import requests
91
+
92
+ full_url = f"{JINA_ENDPOINT}{url}"
93
+ try:
94
+ resp = requests.get(full_url, headers=self._headers)
95
+ resp.raise_for_status()
96
+ except Exception as e:
97
+ raise Exception(f"Failed to read content from {url}: {e}") from e
98
+
99
+ return resp.text
@@ -16,6 +16,8 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
16
16
 
17
17
  from unstructured.documents.elements import Element
18
18
 
19
+ from camel.utils import dependencies_required
20
+
19
21
 
20
22
  class UnstructuredIO:
21
23
  r"""A class to handle various functionalities provided by the
@@ -37,6 +39,7 @@ class UnstructuredIO:
37
39
  """
38
40
  self._ensure_unstructured_version(self.UNSTRUCTURED_MIN_VERSION)
39
41
 
42
+ @dependencies_required('unstructured')
40
43
  def _ensure_unstructured_version(self, min_version: str) -> None:
41
44
  r"""Validates that the installed 'Unstructured' library version
42
45
  satisfies the specified minimum version requirement. This function is
@@ -58,12 +61,7 @@ class UnstructuredIO:
58
61
  strings.
59
62
  """
60
63
  from packaging import version
61
-
62
- try:
63
- from unstructured.__version__ import __version__
64
-
65
- except ImportError as e:
66
- raise ImportError("Package `unstructured` not installed.") from e
64
+ from unstructured.__version__ import __version__
67
65
 
68
66
  # Use packaging.version to compare versions
69
67
  min_ver = version.parse(min_version)
@@ -23,7 +23,7 @@ from camel.types import ChatCompletion, ModelType
23
23
  from camel.utils import (
24
24
  AnthropicTokenCounter,
25
25
  BaseTokenCounter,
26
- model_api_key_required,
26
+ api_keys_required,
27
27
  )
28
28
 
29
29
 
@@ -46,11 +46,13 @@ class AnthropicModel(BaseModelBackend):
46
46
  be fed into Anthropic.messages.create().
47
47
  api_key (Optional[str]): The API key for authenticating with the
48
48
  Anthropic service. (default: :obj:`None`)
49
- url (Optional[str]): The url to the model service.
49
+ url (Optional[str]): The url to the Anthropic service. (default:
50
+ :obj:`None`)
50
51
  """
51
52
  super().__init__(model_type, model_config_dict, api_key, url)
52
53
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
53
- self.client = Anthropic(api_key=self._api_key, base_url=url)
54
+ self._url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
55
+ self.client = Anthropic(api_key=self._api_key, base_url=self._url)
54
56
  self._token_counter: Optional[BaseTokenCounter] = None
55
57
 
56
58
  def _convert_response_from_anthropic_to_openai(self, response):
@@ -96,7 +98,7 @@ class AnthropicModel(BaseModelBackend):
96
98
  """
97
99
  return self.client.count_tokens(prompt)
98
100
 
99
- @model_api_key_required
101
+ @api_keys_required("ANTHROPIC_API_KEY")
100
102
  def run(
101
103
  self,
102
104
  messages: List[OpenAIMessage],
@@ -11,24 +11,25 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
14
+ from typing import Any, Dict, List, Optional
15
15
 
16
16
  from camel.configs import LITELLM_API_PARAMS
17
17
  from camel.messages import OpenAIMessage
18
+ from camel.types import ChatCompletion
18
19
  from camel.utils import LiteLLMTokenCounter
19
20
 
20
- if TYPE_CHECKING:
21
- from litellm.utils import CustomStreamWrapper, ModelResponse
22
-
23
21
 
24
22
  class LiteLLMModel:
25
23
  r"""Constructor for LiteLLM backend with OpenAI compatibility."""
26
24
 
27
- # NOTE: Currently "stream": True is not supported with LiteLLM due to the
28
- # limitation of the current camel design.
25
+ # NOTE: Currently stream mode is not supported.
29
26
 
30
27
  def __init__(
31
- self, model_type: str, model_config_dict: Dict[str, Any]
28
+ self,
29
+ model_type: str,
30
+ model_config_dict: Dict[str, Any],
31
+ api_key: Optional[str] = None,
32
+ url: Optional[str] = None,
32
33
  ) -> None:
33
34
  r"""Constructor for LiteLLM backend.
34
35
 
@@ -37,12 +38,48 @@ class LiteLLMModel:
37
38
  such as GPT-3.5-turbo, Claude-2, etc.
38
39
  model_config_dict (Dict[str, Any]): A dictionary of parameters for
39
40
  the model configuration.
41
+ api_key (Optional[str]): The API key for authenticating with the
42
+ model service. (default: :obj:`None`)
43
+ url (Optional[str]): The url to the model service. (default:
44
+ :obj:`None`)
40
45
  """
41
46
  self.model_type = model_type
42
47
  self.model_config_dict = model_config_dict
43
48
  self._client = None
44
49
  self._token_counter: Optional[LiteLLMTokenCounter] = None
45
50
  self.check_model_config()
51
+ self._url = url
52
+ self._api_key = api_key
53
+
54
+ def _convert_response_from_litellm_to_openai(
55
+ self, response
56
+ ) -> ChatCompletion:
57
+ r"""Converts a response from the LiteLLM format to the OpenAI format.
58
+
59
+ Parameters:
60
+ response (LiteLLMResponse): The response object from LiteLLM.
61
+
62
+ Returns:
63
+ ChatCompletion: The response object in OpenAI's format.
64
+ """
65
+ return ChatCompletion.construct(
66
+ id=response.id,
67
+ choices=[
68
+ {
69
+ "index": response.choices[0].index,
70
+ "message": {
71
+ "role": response.choices[0].message.role,
72
+ "content": response.choices[0].message.content,
73
+ },
74
+ "finish_reason": response.choices[0].finish_reason,
75
+ }
76
+ ],
77
+ created=response.created,
78
+ model=response.model,
79
+ object=response.object,
80
+ system_fingerprint=response.system_fingerprint,
81
+ usage=response.usage,
82
+ )
46
83
 
47
84
  @property
48
85
  def client(self):
@@ -67,7 +104,7 @@ class LiteLLMModel:
67
104
  def run(
68
105
  self,
69
106
  messages: List[OpenAIMessage],
70
- ) -> Union['ModelResponse', 'CustomStreamWrapper']:
107
+ ) -> ChatCompletion:
71
108
  r"""Runs inference of LiteLLM chat completion.
72
109
 
73
110
  Args:
@@ -75,15 +112,16 @@ class LiteLLMModel:
75
112
  in OpenAI format.
76
113
 
77
114
  Returns:
78
- Union[ModelResponse, CustomStreamWrapper]:
79
- `ModelResponse` in the non-stream mode, or
80
- `CustomStreamWrapper` in the stream mode.
115
+ ChatCompletion
81
116
  """
82
117
  response = self.client(
118
+ api_key=self._api_key,
119
+ base_url=self._url,
83
120
  model=self.model_type,
84
121
  messages=messages,
85
122
  **self.model_config_dict,
86
123
  )
124
+ response = self._convert_response_from_litellm_to_openai(response)
87
125
  return response
88
126
 
89
127
  def check_model_config(self):
@@ -100,13 +138,3 @@ class LiteLLMModel:
100
138
  f"Unexpected argument `{param}` is "
101
139
  "input into LiteLLM model backend."
102
140
  )
103
-
104
- @property
105
- def stream(self) -> bool:
106
- r"""Returns whether the model is in stream mode, which sends partial
107
- results each time.
108
-
109
- Returns:
110
- bool: Whether the model is in stream mode.
111
- """
112
- return self.model_config_dict.get('stream', False)
@@ -80,6 +80,7 @@ class ModelFactory:
80
80
  elif isinstance(model_type, str):
81
81
  if model_platform.is_ollama:
82
82
  model_class = OllamaModel
83
+ return model_class(model_type, model_config_dict, url)
83
84
  elif model_platform.is_litellm:
84
85
  model_class = LiteLLMModel
85
86
  else:
@@ -20,7 +20,7 @@ from camel.messages import OpenAIMessage
20
20
  from camel.types import ChatCompletion, ModelType
21
21
  from camel.utils import (
22
22
  BaseTokenCounter,
23
- model_api_key_required,
23
+ api_keys_required,
24
24
  )
25
25
 
26
26
 
@@ -33,6 +33,7 @@ class NemotronModel:
33
33
  self,
34
34
  model_type: ModelType,
35
35
  api_key: Optional[str] = None,
36
+ url: Optional[str] = None,
36
37
  ) -> None:
37
38
  r"""Constructor for Nvidia backend.
38
39
 
@@ -40,18 +41,25 @@ class NemotronModel:
40
41
  model_type (ModelType): Model for which a backend is created.
41
42
  api_key (Optional[str]): The API key for authenticating with the
42
43
  Nvidia service. (default: :obj:`None`)
44
+ url (Optional[str]): The url to the Nvidia service. (default:
45
+ :obj:`None`)
43
46
  """
44
47
  self.model_type = model_type
45
- url = os.environ.get('NVIDIA_API_BASE_URL', None)
48
+ self._url = url or os.environ.get("NVIDIA_API_BASE_URL")
46
49
  self._api_key = api_key or os.environ.get("NVIDIA_API_KEY")
47
- if not url or not self._api_key:
48
- raise ValueError("The NVIDIA API base url and key should be set.")
50
+ if not self._url or not self._api_key:
51
+ raise ValueError(
52
+ "NVIDIA_API_BASE_URL and NVIDIA_API_KEY should be set."
53
+ )
49
54
  self._client = OpenAI(
50
- timeout=60, max_retries=3, base_url=url, api_key=self._api_key
55
+ timeout=60,
56
+ max_retries=3,
57
+ base_url=self._url,
58
+ api_key=self._api_key,
51
59
  )
52
60
  self._token_counter: Optional[BaseTokenCounter] = None
53
61
 
54
- @model_api_key_required
62
+ @api_keys_required("NVIDIA_API_KEY")
55
63
  def run(
56
64
  self,
57
65
  messages: List[OpenAIMessage],