camel-ai 0.1.5.4__py3-none-any.whl → 0.1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (43) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/litellm_config.py +8 -18
  4. camel/configs/ollama_config.py +85 -0
  5. camel/configs/zhipuai_config.py +78 -0
  6. camel/embeddings/openai_embedding.py +2 -2
  7. camel/functions/search_functions.py +5 -14
  8. camel/functions/slack_functions.py +5 -7
  9. camel/functions/twitter_function.py +3 -8
  10. camel/functions/weather_functions.py +3 -8
  11. camel/interpreters/__init__.py +2 -0
  12. camel/interpreters/docker_interpreter.py +235 -0
  13. camel/loaders/__init__.py +2 -0
  14. camel/loaders/base_io.py +5 -9
  15. camel/loaders/jina_url_reader.py +99 -0
  16. camel/loaders/unstructured_io.py +4 -6
  17. camel/models/anthropic_model.py +6 -4
  18. camel/models/litellm_model.py +49 -21
  19. camel/models/model_factory.py +1 -0
  20. camel/models/nemotron_model.py +14 -6
  21. camel/models/ollama_model.py +11 -17
  22. camel/models/openai_audio_models.py +10 -2
  23. camel/models/openai_model.py +4 -3
  24. camel/models/zhipuai_model.py +12 -6
  25. camel/retrievers/bm25_retriever.py +3 -8
  26. camel/retrievers/cohere_rerank_retriever.py +3 -5
  27. camel/storages/__init__.py +2 -0
  28. camel/storages/graph_storages/neo4j_graph.py +3 -7
  29. camel/storages/key_value_storages/__init__.py +2 -0
  30. camel/storages/key_value_storages/redis.py +169 -0
  31. camel/storages/vectordb_storages/milvus.py +3 -7
  32. camel/storages/vectordb_storages/qdrant.py +3 -7
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/code_execution.py +69 -0
  35. camel/toolkits/github_toolkit.py +5 -9
  36. camel/types/enums.py +35 -1
  37. camel/utils/__init__.py +2 -2
  38. camel/utils/async_func.py +42 -0
  39. camel/utils/commons.py +31 -49
  40. camel/utils/token_counting.py +40 -1
  41. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.5.dist-info}/METADATA +11 -3
  42. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.5.dist-info}/RECORD +43 -36
  43. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.5.dist-info}/WHEEL +0 -0
camel/loaders/__init__.py CHANGED
@@ -13,10 +13,12 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .base_io import File, read_file
16
+ from .jina_url_reader import JinaURLReader
16
17
  from .unstructured_io import UnstructuredIO
17
18
 
18
19
  __all__ = [
19
20
  'File',
20
21
  'read_file',
21
22
  'UnstructuredIO',
23
+ 'JinaURLReader',
22
24
  ]
camel/loaders/base_io.py CHANGED
@@ -19,6 +19,8 @@ from hashlib import md5
19
19
  from io import BytesIO
20
20
  from typing import Any, Dict, List, Optional
21
21
 
22
+ from camel.utils import dependencies_required
23
+
22
24
 
23
25
  class File(ABC):
24
26
  r"""Represents an uploaded file comprised of Documents"""
@@ -94,6 +96,7 @@ def strip_consecutive_newlines(text: str) -> str:
94
96
 
95
97
  class DocxFile(File):
96
98
  @classmethod
99
+ @dependencies_required('docx2txt')
97
100
  def from_bytes(cls, file: BytesIO) -> "DocxFile":
98
101
  r"""Creates a DocxFile object from a BytesIO object.
99
102
 
@@ -104,15 +107,8 @@ class DocxFile(File):
104
107
  Returns:
105
108
  DocxFile: A DocxFile object.
106
109
  """
107
- # Use docx2txt to extract text from docx files
108
- try:
109
- import docx2txt
110
- except ImportError:
111
- raise ImportError(
112
- "Please install `docx2txt` first. "
113
- "You can install it by running "
114
- "`pip install docx2txt`."
115
- )
110
+ import docx2txt
111
+
116
112
  text = docx2txt.process(file)
117
113
  text = strip_consecutive_newlines(text)
118
114
  # Create a dictionary with the extracted text
@@ -0,0 +1,99 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import os
16
+ from typing import Any, Optional
17
+
18
+ from camel.types.enums import JinaReturnFormat
19
+
20
+ JINA_ENDPOINT = "https://r.jina.ai/"
21
+
22
+
23
+ class JinaURLReader:
24
+ r"""URL Reader provided by Jina AI. The output is cleaner and more
25
+ LLM-friendly than the URL Reader of UnstructuredIO. Can be configured to
26
+ replace the UnstructuredIO URL Reader in the pipeline.
27
+
28
+ Args:
29
+ api_key (Optional[str], optional): The API key for Jina AI. If not
30
+ provided, the reader will have a lower rate limit. Defaults to
31
+ None.
32
+ return_format (ReturnFormat, optional): The level of detail
33
+ of the returned content, which is optimized for LLMs. For
34
+ now screenshots are not supported. Defaults to
35
+ ReturnFormat.DEFAULT.
36
+ json_response (bool, optional): Whether to return the response
37
+ in JSON format. Defaults to False.
38
+ timeout (int, optional): The maximum time in seconds to wait for
39
+ the page to be rendered. Defaults to 30.
40
+ **kwargs (Any): Additional keyword arguments, including proxies,
41
+ cookies, etc. It should align with the HTTP Header field and
42
+ value pairs listed in the reference.
43
+
44
+ References:
45
+ https://jina.ai/reader
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ api_key: Optional[str] = None,
51
+ return_format: JinaReturnFormat = JinaReturnFormat.DEFAULT,
52
+ json_response: bool = False,
53
+ timeout: int = 30,
54
+ **kwargs: Any,
55
+ ) -> None:
56
+ api_key = api_key or os.getenv('JINA_API_KEY')
57
+ if api_key is None:
58
+ print(
59
+ "[JinaURLReader] JINA_API_KEY not set. This will result in a "
60
+ "low rate limit of Jina URL Reader. Get API key here: "
61
+ "https://jina.ai/reader."
62
+ )
63
+
64
+ # if the following field not provided, it will be None
65
+ api_field = f"Bearer {api_key}" if api_key else None
66
+ json_field = "application/json" if json_response else None
67
+
68
+ raw_headers = {
69
+ "Authorization": api_field,
70
+ "X-Return-Format": return_format.value,
71
+ "Accept": json_field,
72
+ "X-Timeout": str(timeout),
73
+ **kwargs,
74
+ }
75
+
76
+ # eliminate None values
77
+ self._headers = {k: v for k, v in raw_headers.items() if v}
78
+
79
+ def read_content(self, url: str) -> str:
80
+ r"""Reads the content of a URL and returns it as a string with
81
+ given form.
82
+
83
+ Args:
84
+ url (str): The URL to read.
85
+
86
+ Returns:
87
+ str: The content of the URL.
88
+ """
89
+
90
+ import requests
91
+
92
+ full_url = f"{JINA_ENDPOINT}{url}"
93
+ try:
94
+ resp = requests.get(full_url, headers=self._headers)
95
+ resp.raise_for_status()
96
+ except Exception as e:
97
+ raise Exception(f"Failed to read content from {url}: {e}") from e
98
+
99
+ return resp.text
@@ -16,6 +16,8 @@ from typing import Any, Dict, List, Literal, Optional, Tuple, Union
16
16
 
17
17
  from unstructured.documents.elements import Element
18
18
 
19
+ from camel.utils import dependencies_required
20
+
19
21
 
20
22
  class UnstructuredIO:
21
23
  r"""A class to handle various functionalities provided by the
@@ -37,6 +39,7 @@ class UnstructuredIO:
37
39
  """
38
40
  self._ensure_unstructured_version(self.UNSTRUCTURED_MIN_VERSION)
39
41
 
42
+ @dependencies_required('unstructured')
40
43
  def _ensure_unstructured_version(self, min_version: str) -> None:
41
44
  r"""Validates that the installed 'Unstructured' library version
42
45
  satisfies the specified minimum version requirement. This function is
@@ -58,12 +61,7 @@ class UnstructuredIO:
58
61
  strings.
59
62
  """
60
63
  from packaging import version
61
-
62
- try:
63
- from unstructured.__version__ import __version__
64
-
65
- except ImportError as e:
66
- raise ImportError("Package `unstructured` not installed.") from e
64
+ from unstructured.__version__ import __version__
67
65
 
68
66
  # Use packaging.version to compare versions
69
67
  min_ver = version.parse(min_version)
@@ -23,7 +23,7 @@ from camel.types import ChatCompletion, ModelType
23
23
  from camel.utils import (
24
24
  AnthropicTokenCounter,
25
25
  BaseTokenCounter,
26
- model_api_key_required,
26
+ api_keys_required,
27
27
  )
28
28
 
29
29
 
@@ -46,11 +46,13 @@ class AnthropicModel(BaseModelBackend):
46
46
  be fed into Anthropic.messages.create().
47
47
  api_key (Optional[str]): The API key for authenticating with the
48
48
  Anthropic service. (default: :obj:`None`)
49
- url (Optional[str]): The url to the model service.
49
+ url (Optional[str]): The url to the Anthropic service. (default:
50
+ :obj:`None`)
50
51
  """
51
52
  super().__init__(model_type, model_config_dict, api_key, url)
52
53
  self._api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
53
- self.client = Anthropic(api_key=self._api_key, base_url=url)
54
+ self._url = url or os.environ.get("ANTHROPIC_API_BASE_URL")
55
+ self.client = Anthropic(api_key=self._api_key, base_url=self._url)
54
56
  self._token_counter: Optional[BaseTokenCounter] = None
55
57
 
56
58
  def _convert_response_from_anthropic_to_openai(self, response):
@@ -96,7 +98,7 @@ class AnthropicModel(BaseModelBackend):
96
98
  """
97
99
  return self.client.count_tokens(prompt)
98
100
 
99
- @model_api_key_required
101
+ @api_keys_required("ANTHROPIC_API_KEY")
100
102
  def run(
101
103
  self,
102
104
  messages: List[OpenAIMessage],
@@ -11,24 +11,25 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
14
+ from typing import Any, Dict, List, Optional
15
15
 
16
16
  from camel.configs import LITELLM_API_PARAMS
17
17
  from camel.messages import OpenAIMessage
18
+ from camel.types import ChatCompletion
18
19
  from camel.utils import LiteLLMTokenCounter
19
20
 
20
- if TYPE_CHECKING:
21
- from litellm.utils import CustomStreamWrapper, ModelResponse
22
-
23
21
 
24
22
  class LiteLLMModel:
25
23
  r"""Constructor for LiteLLM backend with OpenAI compatibility."""
26
24
 
27
- # NOTE: Currently "stream": True is not supported with LiteLLM due to the
28
- # limitation of the current camel design.
25
+ # NOTE: Currently stream mode is not supported.
29
26
 
30
27
  def __init__(
31
- self, model_type: str, model_config_dict: Dict[str, Any]
28
+ self,
29
+ model_type: str,
30
+ model_config_dict: Dict[str, Any],
31
+ api_key: Optional[str] = None,
32
+ url: Optional[str] = None,
32
33
  ) -> None:
33
34
  r"""Constructor for LiteLLM backend.
34
35
 
@@ -37,12 +38,48 @@ class LiteLLMModel:
37
38
  such as GPT-3.5-turbo, Claude-2, etc.
38
39
  model_config_dict (Dict[str, Any]): A dictionary of parameters for
39
40
  the model configuration.
41
+ api_key (Optional[str]): The API key for authenticating with the
42
+ model service. (default: :obj:`None`)
43
+ url (Optional[str]): The url to the model service. (default:
44
+ :obj:`None`)
40
45
  """
41
46
  self.model_type = model_type
42
47
  self.model_config_dict = model_config_dict
43
48
  self._client = None
44
49
  self._token_counter: Optional[LiteLLMTokenCounter] = None
45
50
  self.check_model_config()
51
+ self._url = url
52
+ self._api_key = api_key
53
+
54
+ def _convert_response_from_litellm_to_openai(
55
+ self, response
56
+ ) -> ChatCompletion:
57
+ r"""Converts a response from the LiteLLM format to the OpenAI format.
58
+
59
+ Parameters:
60
+ response (LiteLLMResponse): The response object from LiteLLM.
61
+
62
+ Returns:
63
+ ChatCompletion: The response object in OpenAI's format.
64
+ """
65
+ return ChatCompletion.construct(
66
+ id=response.id,
67
+ choices=[
68
+ {
69
+ "index": response.choices[0].index,
70
+ "message": {
71
+ "role": response.choices[0].message.role,
72
+ "content": response.choices[0].message.content,
73
+ },
74
+ "finish_reason": response.choices[0].finish_reason,
75
+ }
76
+ ],
77
+ created=response.created,
78
+ model=response.model,
79
+ object=response.object,
80
+ system_fingerprint=response.system_fingerprint,
81
+ usage=response.usage,
82
+ )
46
83
 
47
84
  @property
48
85
  def client(self):
@@ -67,7 +104,7 @@ class LiteLLMModel:
67
104
  def run(
68
105
  self,
69
106
  messages: List[OpenAIMessage],
70
- ) -> Union['ModelResponse', 'CustomStreamWrapper']:
107
+ ) -> ChatCompletion:
71
108
  r"""Runs inference of LiteLLM chat completion.
72
109
 
73
110
  Args:
@@ -75,15 +112,16 @@ class LiteLLMModel:
75
112
  in OpenAI format.
76
113
 
77
114
  Returns:
78
- Union[ModelResponse, CustomStreamWrapper]:
79
- `ModelResponse` in the non-stream mode, or
80
- `CustomStreamWrapper` in the stream mode.
115
+ ChatCompletion
81
116
  """
82
117
  response = self.client(
118
+ api_key=self._api_key,
119
+ base_url=self._url,
83
120
  model=self.model_type,
84
121
  messages=messages,
85
122
  **self.model_config_dict,
86
123
  )
124
+ response = self._convert_response_from_litellm_to_openai(response)
87
125
  return response
88
126
 
89
127
  def check_model_config(self):
@@ -100,13 +138,3 @@ class LiteLLMModel:
100
138
  f"Unexpected argument `{param}` is "
101
139
  "input into LiteLLM model backend."
102
140
  )
103
-
104
- @property
105
- def stream(self) -> bool:
106
- r"""Returns whether the model is in stream mode, which sends partial
107
- results each time.
108
-
109
- Returns:
110
- bool: Whether the model is in stream mode.
111
- """
112
- return self.model_config_dict.get('stream', False)
@@ -80,6 +80,7 @@ class ModelFactory:
80
80
  elif isinstance(model_type, str):
81
81
  if model_platform.is_ollama:
82
82
  model_class = OllamaModel
83
+ return model_class(model_type, model_config_dict, url)
83
84
  elif model_platform.is_litellm:
84
85
  model_class = LiteLLMModel
85
86
  else:
@@ -20,7 +20,7 @@ from camel.messages import OpenAIMessage
20
20
  from camel.types import ChatCompletion, ModelType
21
21
  from camel.utils import (
22
22
  BaseTokenCounter,
23
- model_api_key_required,
23
+ api_keys_required,
24
24
  )
25
25
 
26
26
 
@@ -33,6 +33,7 @@ class NemotronModel:
33
33
  self,
34
34
  model_type: ModelType,
35
35
  api_key: Optional[str] = None,
36
+ url: Optional[str] = None,
36
37
  ) -> None:
37
38
  r"""Constructor for Nvidia backend.
38
39
 
@@ -40,18 +41,25 @@ class NemotronModel:
40
41
  model_type (ModelType): Model for which a backend is created.
41
42
  api_key (Optional[str]): The API key for authenticating with the
42
43
  Nvidia service. (default: :obj:`None`)
44
+ url (Optional[str]): The url to the Nvidia service. (default:
45
+ :obj:`None`)
43
46
  """
44
47
  self.model_type = model_type
45
- url = os.environ.get('NVIDIA_API_BASE_URL', None)
48
+ self._url = url or os.environ.get("NVIDIA_API_BASE_URL")
46
49
  self._api_key = api_key or os.environ.get("NVIDIA_API_KEY")
47
- if not url or not self._api_key:
48
- raise ValueError("The NVIDIA API base url and key should be set.")
50
+ if not self._url or not self._api_key:
51
+ raise ValueError(
52
+ "NVIDIA_API_BASE_URL and NVIDIA_API_KEY should be set."
53
+ )
49
54
  self._client = OpenAI(
50
- timeout=60, max_retries=3, base_url=url, api_key=self._api_key
55
+ timeout=60,
56
+ max_retries=3,
57
+ base_url=self._url,
58
+ api_key=self._api_key,
51
59
  )
52
60
  self._token_counter: Optional[BaseTokenCounter] = None
53
61
 
54
- @model_api_key_required
62
+ @api_keys_required("NVIDIA_API_KEY")
55
63
  def run(
56
64
  self,
57
65
  messages: List[OpenAIMessage],
@@ -11,12 +11,11 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- import os
15
14
  from typing import Any, Dict, List, Optional, Union
16
15
 
17
16
  from openai import OpenAI, Stream
18
17
 
19
- from camel.configs import OPENAI_API_PARAMS
18
+ from camel.configs import OLLAMA_API_PARAMS
20
19
  from camel.messages import OpenAIMessage
21
20
  from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
22
21
  from camel.utils import BaseTokenCounter, OpenAITokenCounter
@@ -25,39 +24,34 @@ from camel.utils import BaseTokenCounter, OpenAITokenCounter
25
24
  class OllamaModel:
26
25
  r"""Ollama service interface."""
27
26
 
28
- # NOTE: Current `ModelType and `TokenCounter` desigen is not suitable,
29
- # stream mode is not supported
30
-
31
27
  def __init__(
32
28
  self,
33
29
  model_type: str,
34
30
  model_config_dict: Dict[str, Any],
35
- api_key: Optional[str] = None,
36
31
  url: Optional[str] = None,
37
32
  ) -> None:
38
33
  r"""Constructor for Ollama backend with OpenAI compatibility.
39
34
 
35
+ # Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
36
+
40
37
  Args:
41
38
  model_type (str): Model for which a backend is created.
42
39
  model_config_dict (Dict[str, Any]): A dictionary that will
43
40
  be fed into openai.ChatCompletion.create().
44
- api_key (Optional[str]): The API key for authenticating with the
45
- model service. (default: :obj:`None`)
46
- url (Optional[str]): The url to the model service.
41
+ url (Optional[str]): The url to the model service. (default:
42
+ :obj:`None`)
47
43
  """
48
44
  self.model_type = model_type
49
45
  self.model_config_dict = model_config_dict
50
- self._url = url or os.environ.get('OPENAI_API_BASE_URL')
51
- self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
52
46
  # Use OpenAI cilent as interface call Ollama
53
- # Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
54
47
  self._client = OpenAI(
55
48
  timeout=60,
56
49
  max_retries=3,
57
- base_url=self._url,
58
- api_key=self._api_key,
50
+ base_url=url,
51
+ api_key="ollama", # required but ignored
59
52
  )
60
53
  self._token_counter: Optional[BaseTokenCounter] = None
54
+ self.check_model_config()
61
55
 
62
56
  @property
63
57
  def token_counter(self) -> BaseTokenCounter:
@@ -74,17 +68,17 @@ class OllamaModel:
74
68
 
75
69
  def check_model_config(self):
76
70
  r"""Check whether the model configuration contains any
77
- unexpected arguments to OpenAI API.
71
+ unexpected arguments to Ollama API.
78
72
 
79
73
  Raises:
80
74
  ValueError: If the model configuration dictionary contains any
81
75
  unexpected arguments to OpenAI API.
82
76
  """
83
77
  for param in self.model_config_dict:
84
- if param not in OPENAI_API_PARAMS:
78
+ if param not in OLLAMA_API_PARAMS:
85
79
  raise ValueError(
86
80
  f"Unexpected argument `{param}` is "
87
- "input into OpenAI model backend."
81
+ "input into Ollama model backend."
88
82
  )
89
83
 
90
84
  def run(
@@ -25,10 +25,18 @@ class OpenAIAudioModels:
25
25
 
26
26
  def __init__(
27
27
  self,
28
+ api_key: Optional[str] = None,
29
+ url: Optional[str] = None,
28
30
  ) -> None:
29
31
  r"""Initialize an instance of OpenAI."""
30
- url = os.environ.get('OPENAI_API_BASE_URL')
31
- self._client = OpenAI(timeout=120, max_retries=3, base_url=url)
32
+ self._url = url or os.environ.get("OPENAI_API_BASE_URL")
33
+ self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
34
+ self._client = OpenAI(
35
+ timeout=120,
36
+ max_retries=3,
37
+ base_url=self._url,
38
+ api_key=self._api_key,
39
+ )
32
40
 
33
41
  def text_to_speech(
34
42
  self,
@@ -23,7 +23,7 @@ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
23
23
  from camel.utils import (
24
24
  BaseTokenCounter,
25
25
  OpenAITokenCounter,
26
- model_api_key_required,
26
+ api_keys_required,
27
27
  )
28
28
 
29
29
 
@@ -46,7 +46,8 @@ class OpenAIModel(BaseModelBackend):
46
46
  be fed into openai.ChatCompletion.create().
47
47
  api_key (Optional[str]): The API key for authenticating with the
48
48
  OpenAI service. (default: :obj:`None`)
49
- url (Optional[str]): The url to the OpenAI service.
49
+ url (Optional[str]): The url to the OpenAI service. (default:
50
+ :obj:`None`)
50
51
  """
51
52
  super().__init__(model_type, model_config_dict, api_key, url)
52
53
  self._url = url or os.environ.get("OPENAI_API_BASE_URL")
@@ -71,7 +72,7 @@ class OpenAIModel(BaseModelBackend):
71
72
  self._token_counter = OpenAITokenCounter(self.model_type)
72
73
  return self._token_counter
73
74
 
74
- @model_api_key_required
75
+ @api_keys_required("OPENAI_API_KEY")
75
76
  def run(
76
77
  self,
77
78
  messages: List[OpenAIMessage],
@@ -17,14 +17,14 @@ from typing import Any, Dict, List, Optional, Union
17
17
 
18
18
  from openai import OpenAI, Stream
19
19
 
20
- from camel.configs import OPENAI_API_PARAMS
20
+ from camel.configs import ZHIPUAI_API_PARAMS
21
21
  from camel.messages import OpenAIMessage
22
22
  from camel.models import BaseModelBackend
23
23
  from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
24
24
  from camel.utils import (
25
25
  BaseTokenCounter,
26
26
  OpenAITokenCounter,
27
- model_api_key_required,
27
+ api_keys_required,
28
28
  )
29
29
 
30
30
 
@@ -47,10 +47,16 @@ class ZhipuAIModel(BaseModelBackend):
47
47
  be fed into openai.ChatCompletion.create().
48
48
  api_key (Optional[str]): The API key for authenticating with the
49
49
  ZhipuAI service. (default: :obj:`None`)
50
+ url (Optional[str]): The url to the ZhipuAI service. (default:
51
+ :obj:`None`)
50
52
  """
51
53
  super().__init__(model_type, model_config_dict)
52
54
  self._url = url or os.environ.get("ZHIPUAI_API_BASE_URL")
53
55
  self._api_key = api_key or os.environ.get("ZHIPUAI_API_KEY")
56
+ if not self._url or not self._api_key:
57
+ raise ValueError(
58
+ "ZHIPUAI_API_BASE_URL and ZHIPUAI_API_KEY should be set."
59
+ )
54
60
  self._client = OpenAI(
55
61
  timeout=60,
56
62
  max_retries=3,
@@ -59,7 +65,7 @@ class ZhipuAIModel(BaseModelBackend):
59
65
  )
60
66
  self._token_counter: Optional[BaseTokenCounter] = None
61
67
 
62
- @model_api_key_required
68
+ @api_keys_required("ZHIPUAI_API_KEY")
63
69
  def run(
64
70
  self,
65
71
  messages: List[OpenAIMessage],
@@ -104,13 +110,13 @@ class ZhipuAIModel(BaseModelBackend):
104
110
 
105
111
  Raises:
106
112
  ValueError: If the model configuration dictionary contains any
107
- unexpected arguments to OpenAI API.
113
+ unexpected arguments to ZhipuAI API.
108
114
  """
109
115
  for param in self.model_config_dict:
110
- if param not in OPENAI_API_PARAMS:
116
+ if param not in ZHIPUAI_API_PARAMS:
111
117
  raise ValueError(
112
118
  f"Unexpected argument `{param}` is "
113
- "input into OpenAI model backend."
119
+ "input into ZhipuAI model backend."
114
120
  )
115
121
  pass
116
122
 
@@ -17,6 +17,7 @@ import numpy as np
17
17
 
18
18
  from camel.loaders import UnstructuredIO
19
19
  from camel.retrievers import BaseRetriever
20
+ from camel.utils import dependencies_required
20
21
 
21
22
  DEFAULT_TOP_K_RESULTS = 1
22
23
 
@@ -40,16 +41,10 @@ class BM25Retriever(BaseRetriever):
40
41
  https://github.com/dorianbrown/rank_bm25
41
42
  """
42
43
 
44
+ @dependencies_required('rank_bm25')
43
45
  def __init__(self) -> None:
44
46
  r"""Initializes the BM25Retriever."""
45
-
46
- try:
47
- from rank_bm25 import BM25Okapi
48
- except ImportError as e:
49
- raise ImportError(
50
- "Package `rank_bm25` not installed, install by running 'pip "
51
- "install rank_bm25'"
52
- ) from e
47
+ from rank_bm25 import BM25Okapi
53
48
 
54
49
  self.bm25: BM25Okapi = None
55
50
  self.content_input_path: str = ""
@@ -15,6 +15,7 @@ import os
15
15
  from typing import Any, Dict, List, Optional
16
16
 
17
17
  from camel.retrievers import BaseRetriever
18
+ from camel.utils import dependencies_required
18
19
 
19
20
  DEFAULT_TOP_K_RESULTS = 1
20
21
 
@@ -32,6 +33,7 @@ class CohereRerankRetriever(BaseRetriever):
32
33
  https://txt.cohere.com/rerank/
33
34
  """
34
35
 
36
+ @dependencies_required('cohere')
35
37
  def __init__(
36
38
  self,
37
39
  model_name: str = "rerank-multilingual-v2.0",
@@ -56,11 +58,7 @@ class CohereRerankRetriever(BaseRetriever):
56
58
  ValueError: If the API key is neither passed as an argument nor
57
59
  set in the environment variable.
58
60
  """
59
-
60
- try:
61
- import cohere
62
- except ImportError as e:
63
- raise ImportError("Package 'cohere' is not installed") from e
61
+ import cohere
64
62
 
65
63
  try:
66
64
  self.api_key = api_key or os.environ["COHERE_API_KEY"]
@@ -17,6 +17,7 @@ from .graph_storages.neo4j_graph import Neo4jGraph
17
17
  from .key_value_storages.base import BaseKeyValueStorage
18
18
  from .key_value_storages.in_memory import InMemoryKeyValueStorage
19
19
  from .key_value_storages.json import JsonStorage
20
+ from .key_value_storages.redis import RedisStorage
20
21
  from .vectordb_storages.base import (
21
22
  BaseVectorStorage,
22
23
  VectorDBQuery,
@@ -30,6 +31,7 @@ __all__ = [
30
31
  'BaseKeyValueStorage',
31
32
  'InMemoryKeyValueStorage',
32
33
  'JsonStorage',
34
+ 'RedisStorage',
33
35
  'VectorRecord',
34
36
  'BaseVectorStorage',
35
37
  'VectorDBQuery',