camel-ai 0.1.5.4__py3-none-any.whl → 0.1.5.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (48) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/knowledge_graph_agent.py +11 -15
  3. camel/agents/task_agent.py +0 -1
  4. camel/configs/__init__.py +12 -0
  5. camel/configs/gemini_config.py +97 -0
  6. camel/configs/litellm_config.py +8 -18
  7. camel/configs/ollama_config.py +85 -0
  8. camel/configs/zhipuai_config.py +78 -0
  9. camel/embeddings/openai_embedding.py +2 -2
  10. camel/functions/search_functions.py +5 -14
  11. camel/functions/slack_functions.py +5 -7
  12. camel/functions/twitter_function.py +3 -8
  13. camel/functions/weather_functions.py +3 -8
  14. camel/interpreters/__init__.py +2 -0
  15. camel/interpreters/docker_interpreter.py +235 -0
  16. camel/loaders/__init__.py +2 -0
  17. camel/loaders/base_io.py +5 -9
  18. camel/loaders/jina_url_reader.py +99 -0
  19. camel/loaders/unstructured_io.py +4 -6
  20. camel/models/__init__.py +2 -0
  21. camel/models/anthropic_model.py +6 -4
  22. camel/models/gemini_model.py +203 -0
  23. camel/models/litellm_model.py +49 -21
  24. camel/models/model_factory.py +4 -2
  25. camel/models/nemotron_model.py +14 -6
  26. camel/models/ollama_model.py +11 -17
  27. camel/models/openai_audio_models.py +10 -2
  28. camel/models/openai_model.py +4 -3
  29. camel/models/zhipuai_model.py +12 -6
  30. camel/retrievers/bm25_retriever.py +3 -8
  31. camel/retrievers/cohere_rerank_retriever.py +3 -5
  32. camel/storages/__init__.py +2 -0
  33. camel/storages/graph_storages/neo4j_graph.py +3 -7
  34. camel/storages/key_value_storages/__init__.py +2 -0
  35. camel/storages/key_value_storages/redis.py +169 -0
  36. camel/storages/vectordb_storages/milvus.py +3 -7
  37. camel/storages/vectordb_storages/qdrant.py +3 -7
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/code_execution.py +69 -0
  40. camel/toolkits/github_toolkit.py +5 -9
  41. camel/types/enums.py +53 -1
  42. camel/utils/__init__.py +4 -2
  43. camel/utils/async_func.py +42 -0
  44. camel/utils/commons.py +31 -49
  45. camel/utils/token_counting.py +74 -1
  46. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/METADATA +12 -3
  47. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/RECORD +48 -39
  48. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.6.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5'
15
+ __version__ = '0.1.5.6'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
@@ -78,17 +78,16 @@ Expected Output:
78
78
 
79
79
  Nodes:
80
80
 
81
- Node(id='John', type='Person', properties={'agent_generated'})
82
- Node(id='XYZ Corporation', type='Organization', properties={'agent_generated'})
83
- Node(id='New York City', type='Location', properties={'agent_generated'})
81
+ Node(id='John', type='Person')
82
+ Node(id='XYZ Corporation', type='Organization')
83
+ Node(id='New York City', type='Location')
84
84
 
85
85
  Relationships:
86
86
 
87
87
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
88
- Corporation', type='Organization'), type='WorksAt', properties=
89
- {'agent_generated'})
88
+ Corporation', type='Organization'), type='WorksAt')
90
89
  Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
91
- type='Location'), type='ResidesIn', properties={'agent_generated'})
90
+ type='Location'), type='ResidesIn')
92
91
 
93
92
  ===== TASK =====
94
93
  Please extracts nodes and relationships from given content and structures them
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
211
210
  import re
212
211
 
213
212
  # Regular expressions to extract nodes and relationships
214
- node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
213
+ node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
215
214
  rel_pattern = (
216
215
  r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
217
- r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', "
218
- r"properties=\{(.*?)\}\)"
216
+ r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
219
217
  )
220
218
 
221
219
  nodes = {}
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
223
221
 
224
222
  # Extract nodes
225
223
  for match in re.finditer(node_pattern, input_string):
226
- id, type, properties = match.groups()
227
- properties = eval(properties)
224
+ id, type = match.groups()
225
+ properties = {'source': 'agent_created'}
228
226
  if id not in nodes:
229
227
  node = Node(id, type, properties)
230
228
  if self._validate_node(node):
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
232
230
 
233
231
  # Extract relationships
234
232
  for match in re.finditer(rel_pattern, input_string):
235
- subj_id, subj_type, obj_id, obj_type, rel_type, properties_str = (
236
- match.groups()
237
- )
238
- properties = eval(properties_str)
233
+ subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
234
+ properties = {'source': 'agent_created'}
239
235
  if subj_id in nodes and obj_id in nodes:
240
236
  subj = nodes[subj_id]
241
237
  obj = nodes[obj_id]
@@ -100,7 +100,6 @@ class TaskSpecifyAgent(ChatAgent):
100
100
 
101
101
  if meta_dict is not None:
102
102
  task_specify_prompt = task_specify_prompt.format(**meta_dict)
103
-
104
103
  task_msg = BaseMessage.make_user_message(
105
104
  role_name="Task Specifier", content=task_specify_prompt
106
105
  )
camel/configs/__init__.py CHANGED
@@ -13,12 +13,18 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
+ from .gemini_config import (
17
+ Gemini_API_PARAMS,
18
+ GeminiConfig,
19
+ )
16
20
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
21
+ from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
17
22
  from .openai_config import (
18
23
  OPENAI_API_PARAMS,
19
24
  ChatGPTConfig,
20
25
  OpenSourceConfig,
21
26
  )
27
+ from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
22
28
 
23
29
  __all__ = [
24
30
  'BaseConfig',
@@ -29,4 +35,10 @@ __all__ = [
29
35
  'OpenSourceConfig',
30
36
  'LiteLLMConfig',
31
37
  'LITELLM_API_PARAMS',
38
+ 'OllamaConfig',
39
+ 'OLLAMA_API_PARAMS',
40
+ 'ZhipuAIConfig',
41
+ 'ZHIPUAI_API_PARAMS',
42
+ 'GeminiConfig',
43
+ 'Gemini_API_PARAMS',
32
44
  ]
@@ -0,0 +1,97 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+
16
+ from collections.abc import Iterable
17
+ from dataclasses import asdict, dataclass
18
+ from typing import Optional
19
+
20
+ from camel.configs.base_config import BaseConfig
21
+
22
+
23
+ @dataclass(frozen=True)
24
+ class GeminiConfig(BaseConfig):
25
+ r"""A simple dataclass used to configure the generation parameters of
26
+ `GenerativeModel.generate_content`.
27
+
28
+ Args:
29
+ candidate_count (int, optional): Number of responses to return.
30
+ stop_sequences (Iterable[str], optional): The set of character
31
+ sequences (up to 5) that will stop output generation. If specified
32
+ the API will stop at the first appearance of a stop sequence.
33
+ The stop sequence will not be included as part of the response.
34
+ max_output_tokens (int, optional): The maximum number of tokens to
35
+ include in a candidate. If unset, this will default to
36
+ output_token_limit specified in the model's specification.
37
+ temperature (float, optional): Controls the randomness of the output.
38
+ Note: The default value varies by model, see the
39
+ `Model.temperature` attribute of the `Model` returned
40
+ the `genai.get_model` function. Values can range from [0.0,1.0],
41
+ inclusive. A value closer to 1.0 will produce responses that are
42
+ more varied and creative, while a value closer to 0.0 will
43
+ typically result in more straightforward responses from the model.
44
+ top_p (int, optional): The maximum cumulative probability of tokens to
45
+ consider when sampling. The model uses combined Top-k and nucleus
46
+ sampling. Tokens are sorted based on their assigned probabilities
47
+ so that only the most likely tokens are considered. Top-k sampling
48
+ directly limits the maximum number of tokens to consider, while
49
+ Nucleus sampling limits number of tokens
50
+ based on the cumulative probability. Note: The default value varies
51
+ by model, see the `Model.top_p` attribute of the `Model` returned
52
+ the `genai.get_model` function.
53
+ top_k (int, optional): The maximum number of tokens to consider when
54
+ sampling. The model uses combined Top-k and nucleus sampling.Top-k
55
+ sampling considers the set of `top_k` most probable tokens.
56
+ Defaults to 40. Note: The default value varies by model, see the
57
+ `Model.top_k` attribute of the `Model` returned the
58
+ `genai.get_model` function.
59
+ response_mime_type (str, optional): Output response mimetype of the
60
+ generated candidate text. Supported mimetype:
61
+ `text/plain`: (default) Text output.
62
+ `application/json`: JSON response in the candidates.
63
+ response_schema (Schema, optional): Specifies the format of the
64
+ JSON requested if response_mime_type is `application/json`.
65
+ safety_settings (SafetySettingOptions, optional):
66
+ Overrides for the model's safety settings.
67
+ tools (FunctionLibraryType, optional):
68
+ `protos.Tools` more info coming soon.
69
+ tool_config (ToolConfigType, optional):
70
+ more info coming soon.
71
+ request_options (RequestOptionsType, optional):
72
+ Options for the request.
73
+ """
74
+
75
+ from google.generativeai.protos import Schema
76
+ from google.generativeai.types.content_types import (
77
+ FunctionLibraryType,
78
+ ToolConfigType,
79
+ )
80
+ from google.generativeai.types.helper_types import RequestOptionsType
81
+ from google.generativeai.types.safety_types import SafetySettingOptions
82
+
83
+ candidate_count: Optional[int] = None
84
+ stop_sequences: Optional[Iterable[str]] = None
85
+ max_output_tokens: Optional[int] = None
86
+ temperature: Optional[float] = None
87
+ top_p: Optional[float] = None
88
+ top_k: Optional[int] = None
89
+ response_mime_type: Optional[str] = None
90
+ response_schema: Optional[Schema] = None
91
+ safety_settings: Optional[SafetySettingOptions] = None
92
+ tools: Optional[FunctionLibraryType] = None
93
+ tool_config: Optional[ToolConfigType] = None
94
+ request_options: Optional[RequestOptionsType] = None
95
+
96
+
97
+ Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
@@ -13,11 +13,14 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass, field
17
- from typing import List, Optional, Union
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, List, Optional, Union
18
18
 
19
19
  from camel.configs.base_config import BaseConfig
20
20
 
21
+ if TYPE_CHECKING:
22
+ from camel.functions import OpenAIFunction
23
+
21
24
 
22
25
  @dataclass(frozen=True)
23
26
  class LiteLLMConfig(BaseConfig):
@@ -25,9 +28,6 @@ class LiteLLMConfig(BaseConfig):
25
28
  LiteLLM API.
26
29
 
27
30
  Args:
28
- model (str): The name of the language model to use for text completion.
29
- messages (List): A list of message objects representing the
30
- conversation context. (default: [])
31
31
  timeout (Optional[Union[float, str]], optional): Request timeout.
32
32
  (default: None)
33
33
  temperature (Optional[float], optional): Temperature parameter for
@@ -65,12 +65,7 @@ class LiteLLMConfig(BaseConfig):
65
65
  deployment_id (Optional[str], optional): Deployment ID. (default: None)
66
66
  extra_headers (Optional[dict], optional): Additional headers for the
67
67
  request. (default: None)
68
- base_url (Optional[str], optional): Base URL for the API. (default:
69
- None)
70
68
  api_version (Optional[str], optional): API version. (default: None)
71
- api_key (Optional[str], optional): API key. (default: None)
72
- model_list (Optional[list], optional): List of API base, version,
73
- keys. (default: None)
74
69
  mock_response (Optional[str], optional): Mock completion response for
75
70
  testing or debugging. (default: None)
76
71
  custom_llm_provider (Optional[str], optional): Non-OpenAI LLM
@@ -79,8 +74,6 @@ class LiteLLMConfig(BaseConfig):
79
74
  (default: None)
80
75
  """
81
76
 
82
- model: str = "gpt-3.5-turbo"
83
- messages: List = field(default_factory=list)
84
77
  timeout: Optional[Union[float, str]] = None
85
78
  temperature: Optional[float] = None
86
79
  top_p: Optional[float] = None
@@ -91,20 +84,17 @@ class LiteLLMConfig(BaseConfig):
91
84
  max_tokens: Optional[int] = None
92
85
  presence_penalty: Optional[float] = None
93
86
  frequency_penalty: Optional[float] = None
94
- logit_bias: Optional[dict] = field(default_factory=dict)
87
+ logit_bias: Optional[dict] = None
95
88
  user: Optional[str] = None
96
89
  response_format: Optional[dict] = None
97
90
  seed: Optional[int] = None
98
- tools: Optional[List] = field(default_factory=list)
91
+ tools: Optional[list[OpenAIFunction]] = None
99
92
  tool_choice: Optional[Union[str, dict]] = None
100
93
  logprobs: Optional[bool] = None
101
94
  top_logprobs: Optional[int] = None
102
95
  deployment_id: Optional[str] = None
103
- extra_headers: Optional[dict] = field(default_factory=dict)
104
- base_url: Optional[str] = None
96
+ extra_headers: Optional[dict] = None
105
97
  api_version: Optional[str] = None
106
- api_key: Optional[str] = None
107
- model_list: Optional[list] = field(default_factory=list)
108
98
  mock_response: Optional[str] = None
109
99
  custom_llm_provider: Optional[str] = None
110
100
  max_retries: Optional[int] = None
@@ -0,0 +1,85 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ @dataclass(frozen=True)
25
+ class OllamaConfig(BaseConfig):
26
+ r"""Defines the parameters for generating chat completions using OpenAI
27
+ compatibility
28
+
29
+ Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
30
+
31
+ Args:
32
+ temperature (float, optional): Sampling temperature to use, between
33
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
34
+ while lower values make it more focused and deterministic.
35
+ (default: :obj:`0.2`)
36
+ top_p (float, optional): An alternative to sampling with temperature,
37
+ called nucleus sampling, where the model considers the results of
38
+ the tokens with top_p probability mass. So :obj:`0.1` means only
39
+ the tokens comprising the top 10% probability mass are considered.
40
+ (default: :obj:`1.0`)
41
+ response_format (object, optional): An object specifying the format
42
+ that the model must output. Compatible with GPT-4 Turbo and all
43
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
44
+ {"type": "json_object"} enables JSON mode, which guarantees the
45
+ message the model generates is valid JSON. Important: when using
46
+ JSON mode, you must also instruct the model to produce JSON
47
+ yourself via a system or user message. Without this, the model
48
+ may generate an unending stream of whitespace until the generation
49
+ reaches the token limit, resulting in a long-running and seemingly
50
+ "stuck" request. Also note that the message content may be
51
+ partially cut off if finish_reason="length", which indicates the
52
+ generation exceeded max_tokens or the conversation exceeded the
53
+ max context length.
54
+ stream (bool, optional): If True, partial message deltas will be sent
55
+ as data-only server-sent events as they become available.
56
+ (default: :obj:`False`)
57
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
58
+ will stop generating further tokens. (default: :obj:`None`)
59
+ max_tokens (int, optional): The maximum number of tokens to generate
60
+ in the chat completion. The total length of input tokens and
61
+ generated tokens is limited by the model's context length.
62
+ (default: :obj:`None`)
63
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
64
+ :obj:`2.0`. Positive values penalize new tokens based on whether
65
+ they appear in the text so far, increasing the model's likelihood
66
+ to talk about new topics. See more information about frequency and
67
+ presence penalties. (default: :obj:`0.0`)
68
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
69
+ :obj:`2.0`. Positive values penalize new tokens based on their
70
+ existing frequency in the text so far, decreasing the model's
71
+ likelihood to repeat the same line verbatim. See more information
72
+ about frequency and presence penalties. (default: :obj:`0.0`)
73
+ """
74
+
75
+ temperature: float = 0.2
76
+ top_p: float = 1.0
77
+ stream: bool = False
78
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
79
+ max_tokens: int | NotGiven = NOT_GIVEN
80
+ presence_penalty: float = 0.0
81
+ response_format: dict | NotGiven = NOT_GIVEN
82
+ frequency_penalty: float = 0.0
83
+
84
+
85
+ OLLAMA_API_PARAMS = {param for param in asdict(OllamaConfig()).keys()}
@@ -0,0 +1,78 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, Optional, Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+ if TYPE_CHECKING:
24
+ from camel.functions import OpenAIFunction
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class ZhipuAIConfig(BaseConfig):
29
+ r"""Defines the parameters for generating chat completions using OpenAI
30
+ compatibility
31
+
32
+ Reference: https://open.bigmodel.cn/dev/api#glm-4v
33
+
34
+ Args:
35
+ temperature (float, optional): Sampling temperature to use, between
36
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
37
+ while lower values make it more focused and deterministic.
38
+ (default: :obj:`0.2`)
39
+ top_p (float, optional): An alternative to sampling with temperature,
40
+ called nucleus sampling, where the model considers the results of
41
+ the tokens with top_p probability mass. So :obj:`0.1` means only
42
+ the tokens comprising the top 10% probability mass are considered.
43
+ (default: :obj:`0.6`)
44
+ stream (bool, optional): If True, partial message deltas will be sent
45
+ as data-only server-sent events as they become available.
46
+ (default: :obj:`False`)
47
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
48
+ will stop generating further tokens. (default: :obj:`None`)
49
+ max_tokens (int, optional): The maximum number of tokens to generate
50
+ in the chat completion. The total length of input tokens and
51
+ generated tokens is limited by the model's context length.
52
+ (default: :obj:`None`)
53
+ tools (list[OpenAIFunction], optional): A list of tools the model may
54
+ call. Currently, only functions are supported as a tool. Use this
55
+ to provide a list of functions the model may generate JSON inputs
56
+ for. A max of 128 functions are supported.
57
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
58
+ any) tool is called by the model. :obj:`"none"` means the model
59
+ will not call any tool and instead generates a message.
60
+ :obj:`"auto"` means the model can pick between generating a
61
+ message or calling one or more tools. :obj:`"required"` means the
62
+ model must call one or more tools. Specifying a particular tool
63
+ via {"type": "function", "function": {"name": "my_function"}}
64
+ forces the model to call that tool. :obj:`"none"` is the default
65
+ when no tools are present. :obj:`"auto"` is the default if tools
66
+ are present.
67
+ """
68
+
69
+ temperature: float = 0.2
70
+ top_p: float = 0.6
71
+ stream: bool = False
72
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
73
+ max_tokens: int | NotGiven = NOT_GIVEN
74
+ tools: Optional[list[OpenAIFunction]] = None
75
+ tool_choice: Optional[dict[str, str] | str] = None
76
+
77
+
78
+ ZHIPUAI_API_PARAMS = {param for param in asdict(ZhipuAIConfig()).keys()}
@@ -20,7 +20,7 @@ from openai import NOT_GIVEN, NotGiven, OpenAI
20
20
 
21
21
  from camel.embeddings.base import BaseEmbedding
22
22
  from camel.types import EmbeddingModelType
23
- from camel.utils import model_api_key_required
23
+ from camel.utils import api_keys_required
24
24
 
25
25
 
26
26
  class OpenAIEmbedding(BaseEmbedding[str]):
@@ -58,7 +58,7 @@ class OpenAIEmbedding(BaseEmbedding[str]):
58
58
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
59
59
  self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
60
60
 
61
- @model_api_key_required
61
+ @api_keys_required("OPENAI_API_KEY")
62
62
  def embed_list(
63
63
  self,
64
64
  objs: list[str],
@@ -15,8 +15,10 @@ import os
15
15
  from typing import Any, Dict, List
16
16
 
17
17
  from camel.functions.openai_function import OpenAIFunction
18
+ from camel.utils import dependencies_required
18
19
 
19
20
 
21
+ @dependencies_required('wikipedia')
20
22
  def search_wiki(entity: str) -> str:
21
23
  r"""Search the entity in WikiPedia and return the summary of the required
22
24
  page, containing factual information about the given entity.
@@ -28,13 +30,7 @@ def search_wiki(entity: str) -> str:
28
30
  str: The search result. If the page corresponding to the entity
29
31
  exists, return the summary of this entity in a string.
30
32
  """
31
- try:
32
- import wikipedia
33
- except ImportError:
34
- raise ImportError(
35
- "Please install `wikipedia` first. You can install it by running "
36
- "`pip install wikipedia`."
37
- )
33
+ import wikipedia
38
34
 
39
35
  result: str
40
36
 
@@ -241,6 +237,7 @@ def search_google(
241
237
  return responses
242
238
 
243
239
 
240
+ @dependencies_required('wolframalpha')
244
241
  def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
245
242
  r"""Queries Wolfram|Alpha and returns the result. Wolfram|Alpha is an
246
243
  answer engine developed by Wolfram Research. It is offered as an online
@@ -255,13 +252,7 @@ def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
255
252
  Returns:
256
253
  str: The result from Wolfram Alpha, formatted as a string.
257
254
  """
258
- try:
259
- import wolframalpha
260
- except ImportError:
261
- raise ImportError(
262
- "Please install `wolframalpha` first. You can install it by"
263
- " running `pip install wolframalpha`."
264
- )
255
+ import wolframalpha
265
256
 
266
257
  WOLFRAMALPHA_APP_ID = os.environ.get('WOLFRAMALPHA_APP_ID')
267
258
  if not WOLFRAMALPHA_APP_ID:
@@ -19,6 +19,8 @@ import logging
19
19
  import os
20
20
  from typing import TYPE_CHECKING, List, Optional
21
21
 
22
+ from camel.utils import dependencies_required
23
+
22
24
  if TYPE_CHECKING:
23
25
  from ssl import SSLContext
24
26
 
@@ -29,6 +31,7 @@ from camel.functions import OpenAIFunction
29
31
  logger = logging.getLogger(__name__)
30
32
 
31
33
 
34
+ @dependencies_required('slack_sdk')
32
35
  def _login_slack(
33
36
  slack_token: Optional[str] = None,
34
37
  ssl: Optional[SSLContext] = None,
@@ -50,13 +53,8 @@ def _login_slack(
50
53
  KeyError: If SLACK_BOT_TOKEN or SLACK_USER_TOKEN environment variables
51
54
  are not set.
52
55
  """
53
- try:
54
- from slack_sdk import WebClient
55
- except ImportError as e:
56
- raise ImportError(
57
- "Cannot import slack_sdk. Please install the package with \
58
- `pip install slack_sdk`."
59
- ) from e
56
+ from slack_sdk import WebClient
57
+
60
58
  if not slack_token:
61
59
  slack_token = os.environ.get("SLACK_BOT_TOKEN") or os.environ.get(
62
60
  "SLACK_USER_TOKEN"
@@ -20,6 +20,7 @@ from typing import List, Optional, Tuple, Union
20
20
  import requests
21
21
 
22
22
  from camel.functions import OpenAIFunction
23
+ from camel.utils import dependencies_required
23
24
 
24
25
  TWEET_TEXT_LIMIT = 280
25
26
 
@@ -55,6 +56,7 @@ def get_twitter_api_key() -> Tuple[str, str]:
55
56
  return TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
56
57
 
57
58
 
59
+ @dependencies_required('requests_oauthlib')
58
60
  def get_oauth_session() -> requests.Session:
59
61
  r'''Initiates an OAuth1Session with Twitter's API and returns it.
60
62
 
@@ -75,14 +77,7 @@ def get_oauth_session() -> requests.Session:
75
77
  https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py
76
78
  https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/User-Lookup/get_users_me_user_context.py
77
79
  '''
78
- try:
79
- from requests_oauthlib import OAuth1Session
80
- except ImportError:
81
- raise ImportError(
82
- "Please install `requests_oauthlib` first. You can "
83
- "install it by running `pip install "
84
- "requests_oauthlib`."
85
- )
80
+ from requests_oauthlib import OAuth1Session
86
81
 
87
82
  consumer_key, consumer_secret = get_twitter_api_key()
88
83
 
@@ -15,6 +15,7 @@ import os
15
15
  from typing import List, Literal
16
16
 
17
17
  from camel.functions.openai_function import OpenAIFunction
18
+ from camel.utils import dependencies_required
18
19
 
19
20
 
20
21
  def get_openweathermap_api_key() -> str:
@@ -37,6 +38,7 @@ def get_openweathermap_api_key() -> str:
37
38
  return OPENWEATHERMAP_API_KEY
38
39
 
39
40
 
41
+ @dependencies_required('pyowm')
40
42
  def get_weather_data(
41
43
  city: str,
42
44
  temp_units: Literal['kelvin', 'celsius', 'fahrenheit'] = 'kelvin',
@@ -85,14 +87,7 @@ def get_weather_data(
85
87
  """
86
88
  # NOTE: This tool may not work as expected since the input arguments like
87
89
  # `time_units` should be enum types which are not supported yet.
88
-
89
- try:
90
- import pyowm
91
- except ImportError:
92
- raise ImportError(
93
- "Please install `pyowm` first. You can install it by running "
94
- "`pip install pyowm`."
95
- )
90
+ import pyowm
96
91
 
97
92
  OPENWEATHERMAP_API_KEY = get_openweathermap_api_key()
98
93
  owm = pyowm.OWM(OPENWEATHERMAP_API_KEY)
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .base import BaseInterpreter
16
+ from .docker_interpreter import DockerInterpreter
16
17
  from .internal_python_interpreter import InternalPythonInterpreter
17
18
  from .interpreter_error import InterpreterError
18
19
  from .subprocess_interpreter import SubprocessInterpreter
@@ -22,4 +23,5 @@ __all__ = [
22
23
  'InterpreterError',
23
24
  'InternalPythonInterpreter',
24
25
  'SubprocessInterpreter',
26
+ 'DockerInterpreter',
25
27
  ]