camel-ai 0.1.5.4__py3-none-any.whl → 0.1.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (43) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/litellm_config.py +8 -18
  4. camel/configs/ollama_config.py +85 -0
  5. camel/configs/zhipuai_config.py +78 -0
  6. camel/embeddings/openai_embedding.py +2 -2
  7. camel/functions/search_functions.py +5 -14
  8. camel/functions/slack_functions.py +5 -7
  9. camel/functions/twitter_function.py +3 -8
  10. camel/functions/weather_functions.py +3 -8
  11. camel/interpreters/__init__.py +2 -0
  12. camel/interpreters/docker_interpreter.py +235 -0
  13. camel/loaders/__init__.py +2 -0
  14. camel/loaders/base_io.py +5 -9
  15. camel/loaders/jina_url_reader.py +99 -0
  16. camel/loaders/unstructured_io.py +4 -6
  17. camel/models/anthropic_model.py +6 -4
  18. camel/models/litellm_model.py +49 -21
  19. camel/models/model_factory.py +1 -0
  20. camel/models/nemotron_model.py +14 -6
  21. camel/models/ollama_model.py +11 -17
  22. camel/models/openai_audio_models.py +10 -2
  23. camel/models/openai_model.py +4 -3
  24. camel/models/zhipuai_model.py +12 -6
  25. camel/retrievers/bm25_retriever.py +3 -8
  26. camel/retrievers/cohere_rerank_retriever.py +3 -5
  27. camel/storages/__init__.py +2 -0
  28. camel/storages/graph_storages/neo4j_graph.py +3 -7
  29. camel/storages/key_value_storages/__init__.py +2 -0
  30. camel/storages/key_value_storages/redis.py +169 -0
  31. camel/storages/vectordb_storages/milvus.py +3 -7
  32. camel/storages/vectordb_storages/qdrant.py +3 -7
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/code_execution.py +69 -0
  35. camel/toolkits/github_toolkit.py +5 -9
  36. camel/types/enums.py +35 -1
  37. camel/utils/__init__.py +2 -2
  38. camel/utils/async_func.py +42 -0
  39. camel/utils/commons.py +31 -49
  40. camel/utils/token_counting.py +40 -1
  41. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.5.dist-info}/METADATA +11 -3
  42. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.5.dist-info}/RECORD +43 -36
  43. {camel_ai-0.1.5.4.dist-info → camel_ai-0.1.5.5.dist-info}/WHEEL +0 -0
camel/__init__.py CHANGED
@@ -12,7 +12,7 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
- __version__ = '0.1.5'
15
+ __version__ = '0.1.5.5'
16
16
 
17
17
  __all__ = [
18
18
  '__version__',
camel/configs/__init__.py CHANGED
@@ -14,11 +14,13 @@
14
14
  from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
15
15
  from .base_config import BaseConfig
16
16
  from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
17
+ from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
17
18
  from .openai_config import (
18
19
  OPENAI_API_PARAMS,
19
20
  ChatGPTConfig,
20
21
  OpenSourceConfig,
21
22
  )
23
+ from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
22
24
 
23
25
  __all__ = [
24
26
  'BaseConfig',
@@ -29,4 +31,8 @@ __all__ = [
29
31
  'OpenSourceConfig',
30
32
  'LiteLLMConfig',
31
33
  'LITELLM_API_PARAMS',
34
+ 'OllamaConfig',
35
+ 'OLLAMA_API_PARAMS',
36
+ 'ZhipuAIConfig',
37
+ 'ZHIPUAI_API_PARAMS',
32
38
  ]
@@ -13,11 +13,14 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  from __future__ import annotations
15
15
 
16
- from dataclasses import asdict, dataclass, field
17
- from typing import List, Optional, Union
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, List, Optional, Union
18
18
 
19
19
  from camel.configs.base_config import BaseConfig
20
20
 
21
+ if TYPE_CHECKING:
22
+ from camel.functions import OpenAIFunction
23
+
21
24
 
22
25
  @dataclass(frozen=True)
23
26
  class LiteLLMConfig(BaseConfig):
@@ -25,9 +28,6 @@ class LiteLLMConfig(BaseConfig):
25
28
  LiteLLM API.
26
29
 
27
30
  Args:
28
- model (str): The name of the language model to use for text completion.
29
- messages (List): A list of message objects representing the
30
- conversation context. (default: [])
31
31
  timeout (Optional[Union[float, str]], optional): Request timeout.
32
32
  (default: None)
33
33
  temperature (Optional[float], optional): Temperature parameter for
@@ -65,12 +65,7 @@ class LiteLLMConfig(BaseConfig):
65
65
  deployment_id (Optional[str], optional): Deployment ID. (default: None)
66
66
  extra_headers (Optional[dict], optional): Additional headers for the
67
67
  request. (default: None)
68
- base_url (Optional[str], optional): Base URL for the API. (default:
69
- None)
70
68
  api_version (Optional[str], optional): API version. (default: None)
71
- api_key (Optional[str], optional): API key. (default: None)
72
- model_list (Optional[list], optional): List of API base, version,
73
- keys. (default: None)
74
69
  mock_response (Optional[str], optional): Mock completion response for
75
70
  testing or debugging. (default: None)
76
71
  custom_llm_provider (Optional[str], optional): Non-OpenAI LLM
@@ -79,8 +74,6 @@ class LiteLLMConfig(BaseConfig):
79
74
  (default: None)
80
75
  """
81
76
 
82
- model: str = "gpt-3.5-turbo"
83
- messages: List = field(default_factory=list)
84
77
  timeout: Optional[Union[float, str]] = None
85
78
  temperature: Optional[float] = None
86
79
  top_p: Optional[float] = None
@@ -91,20 +84,17 @@ class LiteLLMConfig(BaseConfig):
91
84
  max_tokens: Optional[int] = None
92
85
  presence_penalty: Optional[float] = None
93
86
  frequency_penalty: Optional[float] = None
94
- logit_bias: Optional[dict] = field(default_factory=dict)
87
+ logit_bias: Optional[dict] = None
95
88
  user: Optional[str] = None
96
89
  response_format: Optional[dict] = None
97
90
  seed: Optional[int] = None
98
- tools: Optional[List] = field(default_factory=list)
91
+ tools: Optional[list[OpenAIFunction]] = None
99
92
  tool_choice: Optional[Union[str, dict]] = None
100
93
  logprobs: Optional[bool] = None
101
94
  top_logprobs: Optional[int] = None
102
95
  deployment_id: Optional[str] = None
103
- extra_headers: Optional[dict] = field(default_factory=dict)
104
- base_url: Optional[str] = None
96
+ extra_headers: Optional[dict] = None
105
97
  api_version: Optional[str] = None
106
- api_key: Optional[str] = None
107
- model_list: Optional[list] = field(default_factory=list)
108
98
  mock_response: Optional[str] = None
109
99
  custom_llm_provider: Optional[str] = None
110
100
  max_retries: Optional[int] = None
@@ -0,0 +1,85 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+
24
+ @dataclass(frozen=True)
25
+ class OllamaConfig(BaseConfig):
26
+ r"""Defines the parameters for generating chat completions using OpenAI
27
+ compatibility
28
+
29
+ Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
30
+
31
+ Args:
32
+ temperature (float, optional): Sampling temperature to use, between
33
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
34
+ while lower values make it more focused and deterministic.
35
+ (default: :obj:`0.2`)
36
+ top_p (float, optional): An alternative to sampling with temperature,
37
+ called nucleus sampling, where the model considers the results of
38
+ the tokens with top_p probability mass. So :obj:`0.1` means only
39
+ the tokens comprising the top 10% probability mass are considered.
40
+ (default: :obj:`1.0`)
41
+ response_format (object, optional): An object specifying the format
42
+ that the model must output. Compatible with GPT-4 Turbo and all
43
+ GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
44
+ {"type": "json_object"} enables JSON mode, which guarantees the
45
+ message the model generates is valid JSON. Important: when using
46
+ JSON mode, you must also instruct the model to produce JSON
47
+ yourself via a system or user message. Without this, the model
48
+ may generate an unending stream of whitespace until the generation
49
+ reaches the token limit, resulting in a long-running and seemingly
50
+ "stuck" request. Also note that the message content may be
51
+ partially cut off if finish_reason="length", which indicates the
52
+ generation exceeded max_tokens or the conversation exceeded the
53
+ max context length.
54
+ stream (bool, optional): If True, partial message deltas will be sent
55
+ as data-only server-sent events as they become available.
56
+ (default: :obj:`False`)
57
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
58
+ will stop generating further tokens. (default: :obj:`None`)
59
+ max_tokens (int, optional): The maximum number of tokens to generate
60
+ in the chat completion. The total length of input tokens and
61
+ generated tokens is limited by the model's context length.
62
+ (default: :obj:`None`)
63
+ presence_penalty (float, optional): Number between :obj:`-2.0` and
64
+ :obj:`2.0`. Positive values penalize new tokens based on whether
65
+ they appear in the text so far, increasing the model's likelihood
66
+ to talk about new topics. See more information about frequency and
67
+ presence penalties. (default: :obj:`0.0`)
68
+ frequency_penalty (float, optional): Number between :obj:`-2.0` and
69
+ :obj:`2.0`. Positive values penalize new tokens based on their
70
+ existing frequency in the text so far, decreasing the model's
71
+ likelihood to repeat the same line verbatim. See more information
72
+ about frequency and presence penalties. (default: :obj:`0.0`)
73
+ """
74
+
75
+ temperature: float = 0.2
76
+ top_p: float = 1.0
77
+ stream: bool = False
78
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
79
+ max_tokens: int | NotGiven = NOT_GIVEN
80
+ presence_penalty: float = 0.0
81
+ response_format: dict | NotGiven = NOT_GIVEN
82
+ frequency_penalty: float = 0.0
83
+
84
+
85
+ OLLAMA_API_PARAMS = {param for param in asdict(OllamaConfig()).keys()}
@@ -0,0 +1,78 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import asdict, dataclass
17
+ from typing import TYPE_CHECKING, Optional, Sequence
18
+
19
+ from openai._types import NOT_GIVEN, NotGiven
20
+
21
+ from camel.configs.base_config import BaseConfig
22
+
23
+ if TYPE_CHECKING:
24
+ from camel.functions import OpenAIFunction
25
+
26
+
27
+ @dataclass(frozen=True)
28
+ class ZhipuAIConfig(BaseConfig):
29
+ r"""Defines the parameters for generating chat completions using OpenAI
30
+ compatibility
31
+
32
+ Reference: https://open.bigmodel.cn/dev/api#glm-4v
33
+
34
+ Args:
35
+ temperature (float, optional): Sampling temperature to use, between
36
+ :obj:`0` and :obj:`2`. Higher values make the output more random,
37
+ while lower values make it more focused and deterministic.
38
+ (default: :obj:`0.2`)
39
+ top_p (float, optional): An alternative to sampling with temperature,
40
+ called nucleus sampling, where the model considers the results of
41
+ the tokens with top_p probability mass. So :obj:`0.1` means only
42
+ the tokens comprising the top 10% probability mass are considered.
43
+ (default: :obj:`0.6`)
44
+ stream (bool, optional): If True, partial message deltas will be sent
45
+ as data-only server-sent events as they become available.
46
+ (default: :obj:`False`)
47
+ stop (str or list, optional): Up to :obj:`4` sequences where the API
48
+ will stop generating further tokens. (default: :obj:`None`)
49
+ max_tokens (int, optional): The maximum number of tokens to generate
50
+ in the chat completion. The total length of input tokens and
51
+ generated tokens is limited by the model's context length.
52
+ (default: :obj:`None`)
53
+ tools (list[OpenAIFunction], optional): A list of tools the model may
54
+ call. Currently, only functions are supported as a tool. Use this
55
+ to provide a list of functions the model may generate JSON inputs
56
+ for. A max of 128 functions are supported.
57
+ tool_choice (Union[dict[str, str], str], optional): Controls which (if
58
+ any) tool is called by the model. :obj:`"none"` means the model
59
+ will not call any tool and instead generates a message.
60
+ :obj:`"auto"` means the model can pick between generating a
61
+ message or calling one or more tools. :obj:`"required"` means the
62
+ model must call one or more tools. Specifying a particular tool
63
+ via {"type": "function", "function": {"name": "my_function"}}
64
+ forces the model to call that tool. :obj:`"none"` is the default
65
+ when no tools are present. :obj:`"auto"` is the default if tools
66
+ are present.
67
+ """
68
+
69
+ temperature: float = 0.2
70
+ top_p: float = 0.6
71
+ stream: bool = False
72
+ stop: str | Sequence[str] | NotGiven = NOT_GIVEN
73
+ max_tokens: int | NotGiven = NOT_GIVEN
74
+ tools: Optional[list[OpenAIFunction]] = None
75
+ tool_choice: Optional[dict[str, str] | str] = None
76
+
77
+
78
+ ZHIPUAI_API_PARAMS = {param for param in asdict(ZhipuAIConfig()).keys()}
@@ -20,7 +20,7 @@ from openai import NOT_GIVEN, NotGiven, OpenAI
20
20
 
21
21
  from camel.embeddings.base import BaseEmbedding
22
22
  from camel.types import EmbeddingModelType
23
- from camel.utils import model_api_key_required
23
+ from camel.utils import api_keys_required
24
24
 
25
25
 
26
26
  class OpenAIEmbedding(BaseEmbedding[str]):
@@ -58,7 +58,7 @@ class OpenAIEmbedding(BaseEmbedding[str]):
58
58
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
59
59
  self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
60
60
 
61
- @model_api_key_required
61
+ @api_keys_required("OPENAI_API_KEY")
62
62
  def embed_list(
63
63
  self,
64
64
  objs: list[str],
@@ -15,8 +15,10 @@ import os
15
15
  from typing import Any, Dict, List
16
16
 
17
17
  from camel.functions.openai_function import OpenAIFunction
18
+ from camel.utils import dependencies_required
18
19
 
19
20
 
21
+ @dependencies_required('wikipedia')
20
22
  def search_wiki(entity: str) -> str:
21
23
  r"""Search the entity in WikiPedia and return the summary of the required
22
24
  page, containing factual information about the given entity.
@@ -28,13 +30,7 @@ def search_wiki(entity: str) -> str:
28
30
  str: The search result. If the page corresponding to the entity
29
31
  exists, return the summary of this entity in a string.
30
32
  """
31
- try:
32
- import wikipedia
33
- except ImportError:
34
- raise ImportError(
35
- "Please install `wikipedia` first. You can install it by running "
36
- "`pip install wikipedia`."
37
- )
33
+ import wikipedia
38
34
 
39
35
  result: str
40
36
 
@@ -241,6 +237,7 @@ def search_google(
241
237
  return responses
242
238
 
243
239
 
240
+ @dependencies_required('wolframalpha')
244
241
  def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
245
242
  r"""Queries Wolfram|Alpha and returns the result. Wolfram|Alpha is an
246
243
  answer engine developed by Wolfram Research. It is offered as an online
@@ -255,13 +252,7 @@ def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
255
252
  Returns:
256
253
  str: The result from Wolfram Alpha, formatted as a string.
257
254
  """
258
- try:
259
- import wolframalpha
260
- except ImportError:
261
- raise ImportError(
262
- "Please install `wolframalpha` first. You can install it by"
263
- " running `pip install wolframalpha`."
264
- )
255
+ import wolframalpha
265
256
 
266
257
  WOLFRAMALPHA_APP_ID = os.environ.get('WOLFRAMALPHA_APP_ID')
267
258
  if not WOLFRAMALPHA_APP_ID:
@@ -19,6 +19,8 @@ import logging
19
19
  import os
20
20
  from typing import TYPE_CHECKING, List, Optional
21
21
 
22
+ from camel.utils import dependencies_required
23
+
22
24
  if TYPE_CHECKING:
23
25
  from ssl import SSLContext
24
26
 
@@ -29,6 +31,7 @@ from camel.functions import OpenAIFunction
29
31
  logger = logging.getLogger(__name__)
30
32
 
31
33
 
34
+ @dependencies_required('slack_sdk')
32
35
  def _login_slack(
33
36
  slack_token: Optional[str] = None,
34
37
  ssl: Optional[SSLContext] = None,
@@ -50,13 +53,8 @@ def _login_slack(
50
53
  KeyError: If SLACK_BOT_TOKEN or SLACK_USER_TOKEN environment variables
51
54
  are not set.
52
55
  """
53
- try:
54
- from slack_sdk import WebClient
55
- except ImportError as e:
56
- raise ImportError(
57
- "Cannot import slack_sdk. Please install the package with \
58
- `pip install slack_sdk`."
59
- ) from e
56
+ from slack_sdk import WebClient
57
+
60
58
  if not slack_token:
61
59
  slack_token = os.environ.get("SLACK_BOT_TOKEN") or os.environ.get(
62
60
  "SLACK_USER_TOKEN"
@@ -20,6 +20,7 @@ from typing import List, Optional, Tuple, Union
20
20
  import requests
21
21
 
22
22
  from camel.functions import OpenAIFunction
23
+ from camel.utils import dependencies_required
23
24
 
24
25
  TWEET_TEXT_LIMIT = 280
25
26
 
@@ -55,6 +56,7 @@ def get_twitter_api_key() -> Tuple[str, str]:
55
56
  return TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
56
57
 
57
58
 
59
+ @dependencies_required('requests_oauthlib')
58
60
  def get_oauth_session() -> requests.Session:
59
61
  r'''Initiates an OAuth1Session with Twitter's API and returns it.
60
62
 
@@ -75,14 +77,7 @@ def get_oauth_session() -> requests.Session:
75
77
  https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py
76
78
  https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/User-Lookup/get_users_me_user_context.py
77
79
  '''
78
- try:
79
- from requests_oauthlib import OAuth1Session
80
- except ImportError:
81
- raise ImportError(
82
- "Please install `requests_oauthlib` first. You can "
83
- "install it by running `pip install "
84
- "requests_oauthlib`."
85
- )
80
+ from requests_oauthlib import OAuth1Session
86
81
 
87
82
  consumer_key, consumer_secret = get_twitter_api_key()
88
83
 
@@ -15,6 +15,7 @@ import os
15
15
  from typing import List, Literal
16
16
 
17
17
  from camel.functions.openai_function import OpenAIFunction
18
+ from camel.utils import dependencies_required
18
19
 
19
20
 
20
21
  def get_openweathermap_api_key() -> str:
@@ -37,6 +38,7 @@ def get_openweathermap_api_key() -> str:
37
38
  return OPENWEATHERMAP_API_KEY
38
39
 
39
40
 
41
+ @dependencies_required('pyowm')
40
42
  def get_weather_data(
41
43
  city: str,
42
44
  temp_units: Literal['kelvin', 'celsius', 'fahrenheit'] = 'kelvin',
@@ -85,14 +87,7 @@ def get_weather_data(
85
87
  """
86
88
  # NOTE: This tool may not work as expected since the input arguments like
87
89
  # `time_units` should be enum types which are not supported yet.
88
-
89
- try:
90
- import pyowm
91
- except ImportError:
92
- raise ImportError(
93
- "Please install `pyowm` first. You can install it by running "
94
- "`pip install pyowm`."
95
- )
90
+ import pyowm
96
91
 
97
92
  OPENWEATHERMAP_API_KEY = get_openweathermap_api_key()
98
93
  owm = pyowm.OWM(OPENWEATHERMAP_API_KEY)
@@ -13,6 +13,7 @@
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
 
15
15
  from .base import BaseInterpreter
16
+ from .docker_interpreter import DockerInterpreter
16
17
  from .internal_python_interpreter import InternalPythonInterpreter
17
18
  from .interpreter_error import InterpreterError
18
19
  from .subprocess_interpreter import SubprocessInterpreter
@@ -22,4 +23,5 @@ __all__ = [
22
23
  'InterpreterError',
23
24
  'InternalPythonInterpreter',
24
25
  'SubprocessInterpreter',
26
+ 'DockerInterpreter',
25
27
  ]
@@ -0,0 +1,235 @@
1
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
2
+ # Licensed under the Apache License, Version 2.0 (the “License”);
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an “AS IS” BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
+
15
+ import io
16
+ import shlex
17
+ import tarfile
18
+ import uuid
19
+ from pathlib import Path
20
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional
21
+
22
+ from colorama import Fore
23
+
24
+ from camel.interpreters.base import BaseInterpreter
25
+ from camel.interpreters.interpreter_error import InterpreterError
26
+ from camel.utils import is_docker_running
27
+
28
+ if TYPE_CHECKING:
29
+ from docker.models.containers import Container
30
+
31
+
32
+ class DockerInterpreter(BaseInterpreter):
33
+ r"""A class for executing code files or code strings in a docker container.
34
+
35
+ This class handles the execution of code in different scripting languages
36
+ (currently Python and Bash) within a docker container, capturing their
37
+ stdout and stderr streams, and allowing user checking before executing code
38
+ strings.
39
+
40
+ Args:
41
+ require_confirm (bool, optional): If `True`, prompt user before
42
+ running code strings for security. Defaults to `True`.
43
+ print_stdout (bool, optional): If `True`, print the standard
44
+ output of the executed code. Defaults to `False`.
45
+ print_stderr (bool, optional): If `True`, print the standard error
46
+ of the executed code. Defaults to `True`.
47
+ """
48
+
49
+ _CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, str]] = {
50
+ "python": "python {file_name}",
51
+ "bash": "bash {file_name}",
52
+ }
53
+
54
+ _CODE_EXTENSION_MAPPING: ClassVar[Dict[str, str]] = {
55
+ "python": "py",
56
+ "bash": "sh",
57
+ }
58
+
59
+ _CODE_TYPE_MAPPING: ClassVar[Dict[str, str]] = {
60
+ "python": "python",
61
+ "py3": "python",
62
+ "python3": "python",
63
+ "py": "python",
64
+ "shell": "bash",
65
+ "bash": "bash",
66
+ "sh": "bash",
67
+ }
68
+
69
+ def __init__(
70
+ self,
71
+ require_confirm: bool = True,
72
+ print_stdout: bool = False,
73
+ print_stderr: bool = True,
74
+ ) -> None:
75
+ self.require_confirm = require_confirm
76
+ self.print_stdout = print_stdout
77
+ self.print_stderr = print_stderr
78
+
79
+ # lazy initialization of container
80
+ self._container: Optional[Container] = None
81
+
82
+ def __del__(self) -> None:
83
+ if self._container is not None:
84
+ self._container.remove(force=True)
85
+
86
+ def _initialize_if_needed(self) -> None:
87
+ if self._container is not None:
88
+ return
89
+
90
+ if not is_docker_running():
91
+ raise InterpreterError(
92
+ "Docker daemon is not running. Please install/start docker "
93
+ "and try again."
94
+ )
95
+
96
+ import docker
97
+
98
+ client = docker.from_env()
99
+ self._container = client.containers.run(
100
+ "python:3.10",
101
+ detach=True,
102
+ name=f"camel-interpreter-{uuid.uuid4()}",
103
+ command="tail -f /dev/null",
104
+ )
105
+
106
+ def _create_file_in_container(self, content: str) -> Path:
107
+ # get a random name for the file
108
+ filename = str(uuid.uuid4())
109
+ # create a tar in memory
110
+ tar_stream = io.BytesIO()
111
+ with tarfile.open(fileobj=tar_stream, mode='w') as tar:
112
+ tarinfo = tarfile.TarInfo(name=filename)
113
+ tarinfo.size = len(content)
114
+ tar.addfile(tarinfo, io.BytesIO(content.encode('utf-8')))
115
+ tar_stream.seek(0)
116
+
117
+ # copy the tar into the container
118
+ if self._container is None:
119
+ raise InterpreterError(
120
+ "Container is not initialized. Try running the code again."
121
+ )
122
+ self._container.put_archive("/tmp", tar_stream)
123
+ return Path(f"/tmp/{filename}")
124
+
125
+ def _run_file_in_container(
126
+ self,
127
+ file: Path,
128
+ code_type: str,
129
+ ) -> str:
130
+ code_type = self._check_code_type(code_type)
131
+ commands = shlex.split(
132
+ self._CODE_EXECUTE_CMD_MAPPING[code_type].format(
133
+ file_name=str(file)
134
+ )
135
+ )
136
+ if self._container is None:
137
+ raise InterpreterError(
138
+ "Container is not initialized. Try running the code again."
139
+ )
140
+ stdout, stderr = self._container.exec_run(
141
+ commands,
142
+ demux=True,
143
+ ).output
144
+
145
+ if self.print_stdout and stdout:
146
+ print("======stdout======")
147
+ print(Fore.GREEN + stdout.decode() + Fore.RESET)
148
+ print("==================")
149
+ if self.print_stderr and stderr:
150
+ print("======stderr======")
151
+ print(Fore.RED + stderr.decode() + Fore.RESET)
152
+ print("==================")
153
+ exec_result = f"{stdout.decode()}" if stdout else ""
154
+ exec_result += f"(stderr: {stderr.decode()})" if stderr else ""
155
+ return exec_result
156
+
157
+ def run(
158
+ self,
159
+ code: str,
160
+ code_type: str,
161
+ ) -> str:
162
+ r"""Executes the given code in the conatiner attached to the
163
+ interpreter, and captures the stdout and stderr streams.
164
+
165
+ Args:
166
+ code (str): The code string to execute.
167
+ code_type (str): The type of code to execute (e.g., 'python',
168
+ 'bash').
169
+
170
+ Returns:
171
+ str: A string containing the captured stdout and stderr of the
172
+ executed code.
173
+
174
+ Raises:
175
+ InterpreterError: If the user declines to run the code, or the
176
+ code type is unsupported, or there is an error in the docker
177
+ API/container
178
+ """
179
+ import docker.errors
180
+
181
+ code_type = self._check_code_type(code_type)
182
+
183
+ # Print code for security checking
184
+ if self.require_confirm:
185
+ print(f"The following {code_type} code will run in container:")
186
+ print(Fore.CYAN + code + Fore.RESET)
187
+ while True:
188
+ choice = input("Running code? [Y/n]:").lower()
189
+ if choice in ["y", "yes", "ye", ""]:
190
+ break
191
+ elif choice not in ["no", "n"]:
192
+ continue
193
+ raise InterpreterError(
194
+ "Execution halted: User opted not to run the code. "
195
+ "This choice stops the current operation and any "
196
+ "further code execution."
197
+ )
198
+
199
+ self._initialize_if_needed()
200
+
201
+ try:
202
+ temp_file_path = self._create_file_in_container(code)
203
+ result = self._run_file_in_container(temp_file_path, code_type)
204
+ except docker.errors.APIError as e:
205
+ raise InterpreterError(
206
+ f"Execution halted due to docker API error: {e.explanation}. "
207
+ "This choice stops the current operation and any "
208
+ "further code execution."
209
+ ) from e
210
+ except docker.errors.DockerException as e:
211
+ raise InterpreterError(
212
+ f"Execution halted due to docker exceptoin: {e}. "
213
+ "This choice stops the current operation and any "
214
+ "further code execution."
215
+ ) from e
216
+ return result
217
+
218
+ def _check_code_type(self, code_type: str) -> str:
219
+ if code_type not in self._CODE_TYPE_MAPPING:
220
+ raise InterpreterError(
221
+ f"Unsupported code type {code_type}. Currently "
222
+ f"`{self.__class__.__name__}` only supports "
223
+ f"{', '.join(self._CODE_EXTENSION_MAPPING.keys())}."
224
+ )
225
+ return self._CODE_TYPE_MAPPING[code_type]
226
+
227
+ def supported_code_types(self) -> List[str]:
228
+ r"""Provides supported code types by the interpreter."""
229
+ return list(self._CODE_EXTENSION_MAPPING.keys())
230
+
231
+ def update_action_space(self, action_space: Dict[str, Any]) -> None:
232
+ r"""Updates action space for *python* interpreter"""
233
+ raise RuntimeError(
234
+ "SubprocessInterpreter doesn't support " "`action_space`."
235
+ )