camel-ai 0.2.11__py3-none-any.whl → 0.2.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (81) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +14 -2
  3. camel/benchmarks/__init__.py +18 -0
  4. camel/benchmarks/base.py +152 -0
  5. camel/benchmarks/gaia.py +478 -0
  6. camel/configs/__init__.py +3 -0
  7. camel/configs/ollama_config.py +4 -2
  8. camel/configs/sglang_config.py +71 -0
  9. camel/data_collector/__init__.py +19 -0
  10. camel/data_collector/alpaca_collector.py +127 -0
  11. camel/data_collector/base.py +211 -0
  12. camel/data_collector/sharegpt_collector.py +205 -0
  13. camel/datahubs/__init__.py +23 -0
  14. camel/datahubs/base.py +136 -0
  15. camel/datahubs/huggingface.py +433 -0
  16. camel/datahubs/models.py +22 -0
  17. camel/embeddings/openai_compatible_embedding.py +1 -1
  18. camel/embeddings/openai_embedding.py +1 -1
  19. camel/interpreters/__init__.py +2 -0
  20. camel/interpreters/e2b_interpreter.py +136 -0
  21. camel/loaders/__init__.py +3 -1
  22. camel/loaders/base_io.py +41 -41
  23. camel/messages/__init__.py +2 -0
  24. camel/messages/base.py +5 -5
  25. camel/models/__init__.py +4 -0
  26. camel/models/anthropic_model.py +15 -5
  27. camel/models/azure_openai_model.py +1 -1
  28. camel/models/base_model.py +28 -0
  29. camel/models/deepseek_model.py +1 -1
  30. camel/models/fish_audio_model.py +146 -0
  31. camel/models/gemini_model.py +1 -1
  32. camel/models/groq_model.py +2 -2
  33. camel/models/model_factory.py +3 -0
  34. camel/models/nemotron_model.py +1 -1
  35. camel/models/nvidia_model.py +1 -1
  36. camel/models/ollama_model.py +13 -1
  37. camel/models/openai_compatible_model.py +1 -1
  38. camel/models/openai_model.py +1 -27
  39. camel/models/qwen_model.py +1 -1
  40. camel/models/reward/__init__.py +22 -0
  41. camel/models/reward/base_reward_model.py +58 -0
  42. camel/models/reward/evaluator.py +63 -0
  43. camel/models/reward/nemotron_model.py +112 -0
  44. camel/models/samba_model.py +1 -1
  45. camel/models/sglang_model.py +225 -0
  46. camel/models/togetherai_model.py +1 -1
  47. camel/models/vllm_model.py +2 -2
  48. camel/models/yi_model.py +1 -1
  49. camel/models/zhipuai_model.py +1 -1
  50. camel/personas/persona_hub.py +2 -2
  51. camel/runtime/configs.py +12 -12
  52. camel/runtime/docker_runtime.py +7 -7
  53. camel/runtime/llm_guard_runtime.py +3 -3
  54. camel/runtime/remote_http_runtime.py +5 -5
  55. camel/runtime/utils/function_risk_toolkit.py +1 -1
  56. camel/runtime/utils/ignore_risk_toolkit.py +2 -2
  57. camel/schemas/openai_converter.py +2 -2
  58. camel/societies/workforce/role_playing_worker.py +2 -2
  59. camel/societies/workforce/single_agent_worker.py +2 -2
  60. camel/societies/workforce/workforce.py +3 -3
  61. camel/storages/object_storages/amazon_s3.py +2 -2
  62. camel/storages/object_storages/azure_blob.py +2 -2
  63. camel/storages/object_storages/google_cloud.py +2 -2
  64. camel/toolkits/__init__.py +2 -0
  65. camel/toolkits/arxiv_toolkit.py +6 -6
  66. camel/toolkits/ask_news_toolkit.py +2 -2
  67. camel/toolkits/code_execution.py +5 -1
  68. camel/toolkits/function_tool.py +41 -0
  69. camel/toolkits/github_toolkit.py +3 -3
  70. camel/toolkits/google_scholar_toolkit.py +16 -2
  71. camel/toolkits/math_toolkit.py +47 -16
  72. camel/toolkits/meshy_toolkit.py +2 -2
  73. camel/toolkits/search_toolkit.py +155 -3
  74. camel/toolkits/stripe_toolkit.py +273 -0
  75. camel/types/__init__.py +2 -0
  76. camel/types/enums.py +27 -2
  77. camel/utils/token_counting.py +31 -12
  78. {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/METADATA +24 -14
  79. {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/RECORD +81 -61
  80. {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/LICENSE +0 -0
  81. {camel_ai-0.2.11.dist-info → camel_ai-0.2.13.dist-info}/WHEEL +0 -0
@@ -0,0 +1,63 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from typing import Dict, List
15
+
16
+ from camel.models.reward import BaseRewardModel
17
+
18
+
19
+ class Evaluator:
20
+ r"""Evaluator class to evaluate messages using a reward model and filter
21
+ data based on the scores.
22
+
23
+ Args:
24
+ reward_model (BaseRewardModel): A reward model to evaluate messages.
25
+ """
26
+
27
+ def __init__(self, reward_model: BaseRewardModel):
28
+ self.reward_model = reward_model
29
+
30
+ def evaluate(self, messages: List[Dict[str, str]]) -> Dict[str, float]:
31
+ r"""Evaluate the messages using the reward model.
32
+
33
+ Args:
34
+ messages (List[Dict[str, str]]): A list of messages where each
35
+ message is a dictionary with 'role' and 'content'.
36
+
37
+ Returns:
38
+ Dict[str, float]: A dictionary mapping score types to their values.
39
+ """
40
+ scores = self.reward_model.evaluate(messages)
41
+ return scores
42
+
43
+ def filter_data(
44
+ self, messages: List[Dict[str, str]], thresholds: Dict[str, float]
45
+ ) -> bool:
46
+ r"""Filter messages based on the scores.
47
+
48
+ Args:
49
+ messages (List[Dict[str, str]]): A list of messages where each
50
+ message is a dictionary with 'role' and 'content'.
51
+ thresholds (Dict[str, float]): A dictionary mapping score types to
52
+ their values.
53
+
54
+ Returns:
55
+ bool: A boolean indicating whether the messages pass the filter.
56
+ """
57
+ scores = self.evaluate(messages)
58
+ for score_type, threshold in thresholds.items():
59
+ if score_type not in scores:
60
+ raise ValueError(f"Score type {score_type} not found.")
61
+ if scores.get(score_type, 0) < threshold:
62
+ return False
63
+ return True
@@ -0,0 +1,112 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Dict, List, Optional, Union
16
+
17
+ from openai import OpenAI
18
+
19
+ from camel.models.reward import BaseRewardModel
20
+ from camel.types import ChatCompletion, ModelType
21
+ from camel.utils import api_keys_required
22
+
23
+
24
+ class NemotronRewardModel(BaseRewardModel):
25
+ r"""Reward model based on the Nemotron model with OpenAI compatibility.
26
+
27
+ Args:
28
+ model_type (Union[ModelType, str]): Model for which a backend is
29
+ created.
30
+ api_key (Optional[str], optional): The API key for authenticating
31
+ with the model service. (default: :obj:`None`)
32
+ url (Optional[str], optional): The url to the model service.
33
+
34
+ Note:
35
+ The Nemotron model does not support model config.
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ model_type: Union[ModelType, str],
41
+ api_key: Optional[str] = None,
42
+ url: Optional[str] = None,
43
+ ) -> None:
44
+ url = url or os.environ.get(
45
+ "NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
46
+ )
47
+ api_key = api_key or os.environ.get("NVIDIA_API_KEY")
48
+ super().__init__(model_type, api_key, url)
49
+ self._client = OpenAI(
50
+ timeout=180,
51
+ max_retries=3,
52
+ base_url=self.url,
53
+ api_key=self.api_key,
54
+ )
55
+
56
+ @api_keys_required("NVIDIA_API_KEY")
57
+ def evaluate(self, messages: List[Dict[str, str]]) -> Dict[str, float]:
58
+ r"""Evaluate the messages using the Nemotron model.
59
+
60
+ Args:
61
+ messages (List[Dict[str, str]]): A list of messages where each
62
+ message is a dictionary format.
63
+
64
+ Returns:
65
+ Dict[str, float]: A dictionary mapping score types to their
66
+ values.
67
+ """
68
+ response = self._client.chat.completions.create(
69
+ messages=messages, # type: ignore[arg-type]
70
+ model=self.model_type,
71
+ )
72
+ scores = self._parse_scores(response)
73
+ return scores
74
+
75
+ def get_scores_types(self) -> List[str]:
76
+ r"""Get the list of score types that the reward model can return.
77
+
78
+ Returns:
79
+ List[str]: A list of score types that the reward model can return.
80
+ """
81
+ return [
82
+ "helpfulness",
83
+ "correctness",
84
+ "coherence",
85
+ "complexity",
86
+ "verbosity",
87
+ ]
88
+
89
+ def _parse_scores(self, response: ChatCompletion) -> Dict[str, float]:
90
+ r"""Parse the scores from the response.
91
+
92
+ Args:
93
+ response (ChatCompletion): A ChatCompletion object with the scores.
94
+
95
+ Returns:
96
+ Dict[str, float]: A dictionary mapping score types to their values.
97
+ """
98
+ try:
99
+ choices = response.choices
100
+ logprobs = (
101
+ choices[0].logprobs.content
102
+ if choices and choices[0].logprobs
103
+ else None
104
+ )
105
+ scores = (
106
+ {entry.token: entry.logprob for entry in logprobs if entry}
107
+ if logprobs
108
+ else {}
109
+ )
110
+ return scores
111
+ except Exception as e:
112
+ raise ValueError(f"Failed to parse scores: {e}")
@@ -95,7 +95,7 @@ class SambaModel(BaseModelBackend):
95
95
 
96
96
  if self._url == "https://api.sambanova.ai/v1":
97
97
  self._client = OpenAI(
98
- timeout=60,
98
+ timeout=180,
99
99
  max_retries=3,
100
100
  base_url=self._url,
101
101
  api_key=self._api_key,
@@ -0,0 +1,225 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import logging
15
+ import threading
16
+ import time
17
+ from typing import Any, Dict, List, Optional, Union
18
+
19
+ from openai import OpenAI, Stream
20
+
21
+ from camel.configs import SGLANG_API_PARAMS, SGLangConfig
22
+ from camel.messages import OpenAIMessage
23
+ from camel.models import BaseModelBackend
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
29
+ from camel.utils import BaseTokenCounter, OpenAITokenCounter
30
+
31
+
32
+ class SGLangModel(BaseModelBackend):
33
+ r"""SGLang service interface.
34
+
35
+ Args:
36
+ model_type (Union[ModelType, str]): Model for which a backend is
37
+ created.
38
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
39
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
40
+ :obj:`None`, :obj:`SGLangConfig().as_dict()` will be used.
41
+ (default: :obj:`None`)
42
+ api_key (Optional[str], optional): The API key for authenticating with
43
+ the model service. SGLang doesn't need API key, it would be ignored
44
+ if set. (default: :obj:`None`)
45
+ url (Optional[str], optional): The url to the model service. If not
46
+ provided, :obj:`"http://127.0.0.1:30000/v1"` will be used.
47
+ (default: :obj:`None`)
48
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
49
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
50
+ ModelType.GPT_4O_MINI)` will be used.
51
+ (default: :obj:`None`)
52
+
53
+ Reference: https://sgl-project.github.io/backend/openai_api_completions.html
54
+ """
55
+
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ ) -> None:
64
+ if model_config_dict is None:
65
+ model_config_dict = SGLangConfig().as_dict()
66
+
67
+ self.server_process = None
68
+ self.last_run_time: Optional[float] = (
69
+ None # Will be set when the server starts
70
+ )
71
+ self._lock = threading.Lock()
72
+ self._inactivity_thread: Optional[threading.Thread] = None
73
+
74
+ super().__init__(
75
+ model_type, model_config_dict, api_key, url, token_counter
76
+ )
77
+
78
+ self._client = None
79
+
80
+ if self._url:
81
+ # Initialize the client if an existing URL is provided
82
+ self._client = OpenAI(
83
+ timeout=180,
84
+ max_retries=3,
85
+ api_key="Set-but-ignored", # required but ignored
86
+ base_url=self._url,
87
+ )
88
+
89
+ def _start_server(self) -> None:
90
+ from sglang.utils import ( # type: ignore[import-untyped]
91
+ execute_shell_command,
92
+ wait_for_server,
93
+ )
94
+
95
+ try:
96
+ if not self._url:
97
+ cmd = (
98
+ f"python -m sglang.launch_server "
99
+ f"--model-path {self.model_type} "
100
+ f"--port 30000 "
101
+ f"--host 0.0.0.0"
102
+ )
103
+
104
+ server_process = execute_shell_command(cmd)
105
+ wait_for_server("http://localhost:30000")
106
+ self._url = "http://127.0.0.1:30000/v1"
107
+ self.server_process = server_process
108
+ # Start the inactivity monitor in a background thread
109
+ self._inactivity_thread = threading.Thread(
110
+ target=self._monitor_inactivity, daemon=True
111
+ )
112
+ self._inactivity_thread.start()
113
+ self.last_run_time = time.time()
114
+ # Initialize the client after the server starts
115
+ self._client = OpenAI(
116
+ timeout=180,
117
+ max_retries=3,
118
+ api_key="Set-but-ignored", # required but ignored
119
+ base_url=self._url,
120
+ )
121
+ except Exception as e:
122
+ raise RuntimeError(f"Failed to start SGLang server: {e}") from e
123
+
124
+ def _ensure_server_running(self) -> None:
125
+ r"""Ensures that the server is running. If not, starts the server."""
126
+ with self._lock:
127
+ if self.server_process is None:
128
+ self._start_server()
129
+
130
+ def _monitor_inactivity(self):
131
+ r"""Monitor whether the server process has been inactive for over 10
132
+ minutes.
133
+ """
134
+ from sglang.utils import terminate_process
135
+
136
+ while True:
137
+ # Check every 10 seconds
138
+ time.sleep(10)
139
+ # Over 10 minutes
140
+ with self._lock:
141
+ # Over 10 minutes
142
+ if self.last_run_time and (
143
+ time.time() - self.last_run_time > 600
144
+ ):
145
+ if self.server_process:
146
+ terminate_process(self.server_process)
147
+ self.server_process = None
148
+ self._client = None # Invalidate the client
149
+ logging.info(
150
+ "Server process terminated due to inactivity."
151
+ )
152
+ break
153
+
154
+ @property
155
+ def token_counter(self) -> BaseTokenCounter:
156
+ r"""Initialize the token counter for the model backend.
157
+
158
+ Returns:
159
+ BaseTokenCounter: The token counter following the model's
160
+ tokenization style.
161
+ """
162
+ if not self._token_counter:
163
+ self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
164
+ return self._token_counter
165
+
166
+ def check_model_config(self):
167
+ r"""Check whether the model configuration contains any
168
+ unexpected arguments to SGLang API.
169
+
170
+ Raises:
171
+ ValueError: If the model configuration dictionary contains any
172
+ unexpected arguments to OpenAI API.
173
+ """
174
+ for param in self.model_config_dict:
175
+ if param not in SGLANG_API_PARAMS:
176
+ raise ValueError(
177
+ f"Unexpected argument `{param}` is "
178
+ "input into SGLang model backend."
179
+ )
180
+
181
+ def run(
182
+ self,
183
+ messages: List[OpenAIMessage],
184
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
185
+ r"""Runs inference of OpenAI chat completion.
186
+
187
+ Args:
188
+ messages (List[OpenAIMessage]): Message list with the chat history
189
+ in OpenAI API format.
190
+
191
+ Returns:
192
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
193
+ `ChatCompletion` in the non-stream mode, or
194
+ `Stream[ChatCompletionChunk]` in the stream mode.
195
+ """
196
+
197
+ # Ensure server is running
198
+ self._ensure_server_running()
199
+
200
+ with self._lock:
201
+ # Update last run time
202
+ self.last_run_time = time.time()
203
+
204
+ if self._client is None:
205
+ raise RuntimeError(
206
+ "Client is not initialized. Ensure the server is running."
207
+ )
208
+
209
+ response = self._client.chat.completions.create(
210
+ messages=messages,
211
+ model=self.model_type,
212
+ **self.model_config_dict,
213
+ )
214
+
215
+ return response
216
+
217
+ @property
218
+ def stream(self) -> bool:
219
+ r"""Returns whether the model is in stream mode, which sends partial
220
+ results each time.
221
+
222
+ Returns:
223
+ bool: Whether the model is in stream mode.
224
+ """
225
+ return self.model_config_dict.get('stream', False)
@@ -72,7 +72,7 @@ class TogetherAIModel(BaseModelBackend):
72
72
  )
73
73
 
74
74
  self._client = OpenAI(
75
- timeout=60,
75
+ timeout=180,
76
76
  max_retries=3,
77
77
  api_key=self._api_key,
78
78
  base_url=self._url,
@@ -72,9 +72,9 @@ class VLLMModel(BaseModelBackend):
72
72
  self._start_server()
73
73
  # Use OpenAI cilent as interface call vLLM
74
74
  self._client = OpenAI(
75
- timeout=60,
75
+ timeout=180,
76
76
  max_retries=3,
77
- api_key="Set-but-ignored", # required but ignored
77
+ api_key="EMPTY", # required but ignored
78
78
  base_url=self._url,
79
79
  )
80
80
 
camel/models/yi_model.py CHANGED
@@ -70,7 +70,7 @@ class YiModel(BaseModelBackend):
70
70
  model_type, model_config_dict, api_key, url, token_counter
71
71
  )
72
72
  self._client = OpenAI(
73
- timeout=60,
73
+ timeout=180,
74
74
  max_retries=3,
75
75
  api_key=self._api_key,
76
76
  base_url=self._url,
@@ -70,7 +70,7 @@ class ZhipuAIModel(BaseModelBackend):
70
70
  model_type, model_config_dict, api_key, url, token_counter
71
71
  )
72
72
  self._client = OpenAI(
73
- timeout=60,
73
+ timeout=180,
74
74
  max_retries=3,
75
75
  api_key=self._api_key,
76
76
  base_url=self._url,
@@ -11,7 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- import ast
14
+ import json
15
15
  import re
16
16
  import uuid
17
17
  from functools import lru_cache
@@ -130,7 +130,7 @@ class PersonaHub:
130
130
  text_to_persona_prompt_instruction,
131
131
  response_format=PersonaResponse, # type: ignore[arg-type]
132
132
  )
133
- parsed_content = ast.literal_eval(response.msg.content)
133
+ parsed_content = json.loads(response.msg.content)
134
134
  persona.name = parsed_content["persona_name"]
135
135
  persona.description = parsed_content["persona_description"]
136
136
  except Exception as e:
camel/runtime/configs.py CHANGED
@@ -21,23 +21,23 @@ class TaskConfig(BaseModel):
21
21
 
22
22
  Arttributes:
23
23
  cmd (str or list): Command to be executed
24
- stdout (bool): Attach to stdout. (default::obj: `True`)
25
- stderr (bool): Attach to stderr. (default::obj: `True`)
26
- stdin (bool): Attach to stdin. (default::obj: `False`)
27
- tty (bool): Allocate a pseudo-TTY. (default::obj: `False`)
28
- privileged (bool): Run as privileged. (default::obj: `False`)
29
- user (str): User to execute command as. (default::obj: `""`)
24
+ stdout (bool): Attach to stdout. (default: :obj: `True`)
25
+ stderr (bool): Attach to stderr. (default: :obj: `True`)
26
+ stdin (bool): Attach to stdin. (default: :obj: `False`)
27
+ tty (bool): Allocate a pseudo-TTY. (default: :obj: `False`)
28
+ privileged (bool): Run as privileged. (default: :obj: `False`)
29
+ user (str): User to execute command as. (default: :obj: `""`)
30
30
  detach (bool): If true, detach from the exec command.
31
- (default::obj: `False`)
32
- stream (bool): Stream response data. (default::obj: `False`)
31
+ (default: :obj: `False`)
32
+ stream (bool): Stream response data. (default: :obj: `False`)
33
33
  socket (bool): Return the connection socket to allow custom
34
- read/write operations. (default::obj: `False`)
34
+ read/write operations. (default: :obj: `False`)
35
35
  environment (dict or list): A dictionary or a list of strings in
36
36
  the following format ``["PASSWORD=xxx"]`` or
37
- ``{"PASSWORD": "xxx"}``. (default::obj: `None`)
37
+ ``{"PASSWORD": "xxx"}``. (default: :obj: `None`)
38
38
  workdir (str): Path to working directory for this exec session.
39
- (default::obj: `None`)
40
- demux (bool): Return stdout and stderr separately. (default::obj:
39
+ (default: :obj: `None`)
40
+ demux (bool): Return stdout and stderr separately. (default: :obj:
41
41
  `False`)
42
42
  """
43
43
 
@@ -42,10 +42,10 @@ class DockerRuntime(BaseRuntime):
42
42
 
43
43
  Args:
44
44
  image (str): The name of the Docker image to use for the runtime.
45
- port (int): The port number to use for the runtime API. (default::obj:
45
+ port (int): The port number to use for the runtime API. (default: :obj:
46
46
  `8000`)
47
47
  remove (bool): Whether to remove the container after stopping it. '
48
- (default::obj: `True`)
48
+ (default: :obj: `True`)
49
49
  kwargs (dict): Additional keyword arguments to pass to the
50
50
  Docker client.
51
51
  """
@@ -170,7 +170,7 @@ class DockerRuntime(BaseRuntime):
170
170
 
171
171
  Args:
172
172
  time_out (int): The number of seconds to wait for the container to
173
- start. (default::obj: `15`)
173
+ start. (default: :obj: `15`)
174
174
 
175
175
  Returns:
176
176
  DockerRuntime: The DockerRuntime instance.
@@ -259,9 +259,9 @@ class DockerRuntime(BaseRuntime):
259
259
  list of functions to add.
260
260
  entrypoint (str): The entrypoint for the function.
261
261
  redirect_stdout (bool): Whether to return the stdout of
262
- the function. (default::obj: `False`)
262
+ the function. (default: :obj: `False`)
263
263
  arguments (Optional[Dict[str, Any]]): The arguments for the
264
- function. (default::obj: `None`)
264
+ function. (default: :obj: `None`)
265
265
 
266
266
  Returns:
267
267
  DockerRuntime: The DockerRuntime instance.
@@ -330,7 +330,7 @@ class DockerRuntime(BaseRuntime):
330
330
 
331
331
  Args:
332
332
  remove (Optional[bool]): Whether to remove the container
333
- after stopping it. (default::obj: `None`)
333
+ after stopping it. (default: :obj: `None`)
334
334
 
335
335
  Returns:
336
336
  DockerRuntime: The DockerRuntime instance.
@@ -366,7 +366,7 @@ class DockerRuntime(BaseRuntime):
366
366
  r"""Wait for the API Server to be ready.
367
367
 
368
368
  Args:
369
- timeout (int): The number of seconds to wait. (default::obj: `10`)
369
+ timeout (int): The number of seconds to wait. (default: :obj: `10`)
370
370
 
371
371
  Returns:
372
372
  bool: Whether the API Server is ready.
@@ -68,9 +68,9 @@ class LLMGuardRuntime(BaseRuntime):
68
68
  Arguments:
69
69
  prompt (str): The prompt to use for the language model. (default:
70
70
  :obj:`GUARDPROMPT`)
71
- model (BaseModelBackend): The language model to use. (default::obj:
71
+ model (BaseModelBackend): The language model to use. (default: :obj:
72
72
  `None`)
73
- verbose (bool): Whether to print verbose output. (default::obj:
73
+ verbose (bool): Whether to print verbose output. (default: :obj:
74
74
  `False`)
75
75
  """
76
76
 
@@ -114,7 +114,7 @@ class LLMGuardRuntime(BaseRuntime):
114
114
  funcs (FunctionTool or List[FunctionTool]): The function or
115
115
  list of functions to add.
116
116
  threshold (int): The risk threshold for functions.
117
- (default::obj:`2`)
117
+ (default: :obj:`2`)
118
118
 
119
119
  Returns:
120
120
  LLMGuardRuntime: The current runtime.
@@ -36,9 +36,9 @@ class RemoteHttpRuntime(BaseRuntime):
36
36
 
37
37
  Args:
38
38
  host (str): The host of the remote server.
39
- port (int): The port of the remote server. (default::obj: `8000`)
39
+ port (int): The port of the remote server. (default: :obj: `8000`)
40
40
  python_exec (str): The python executable to run the API server.
41
- (default::obj: `python3`)
41
+ (default: :obj: `python3`)
42
42
  """
43
43
 
44
44
  def __init__(
@@ -90,9 +90,9 @@ class RemoteHttpRuntime(BaseRuntime):
90
90
  list of functions to add.
91
91
  entrypoint (str): The entrypoint for the function.
92
92
  redirect_stdout (bool): Whether to return the stdout of
93
- the function. (default::obj: `False`)
93
+ the function. (default: :obj: `False`)
94
94
  arguments (Optional[Dict[str, Any]]): The arguments for the
95
- function. (default::obj: `None`)
95
+ function. (default: :obj: `None`)
96
96
 
97
97
  Returns:
98
98
  RemoteHttpRuntime: The current runtime.
@@ -162,7 +162,7 @@ class RemoteHttpRuntime(BaseRuntime):
162
162
  r"""Wait for the API Server to be ready.
163
163
 
164
164
  Args:
165
- timeout (int): The number of seconds to wait. (default::obj: `10`)
165
+ timeout (int): The number of seconds to wait. (default: :obj: `10`)
166
166
 
167
167
  Returns:
168
168
  bool: Whether the API Server is ready.
@@ -22,7 +22,7 @@ class FunctionRiskToolkit(BaseToolkit):
22
22
 
23
23
  Args:
24
24
  verbose (Optional[bool]): Whether to print verbose output.
25
- (default::obj:`False`)
25
+ (default: :obj:`False`)
26
26
  """
27
27
 
28
28
  def __init__(self, verbose: Optional[bool] = False):
@@ -22,9 +22,9 @@ class IgnoreRiskToolkit(BaseToolkit):
22
22
 
23
23
  Args:
24
24
  function_names (Optional[List[str]]): A list of function names to
25
- ignore risks for. (default::obj:`None`)
25
+ ignore risks for. (default: :obj:`None`)
26
26
  verbose (Optional[bool]): Whether to print verbose output.
27
- (default::obj:`False`)
27
+ (default: :obj:`False`)
28
28
  """
29
29
 
30
30
  def __init__(
@@ -28,8 +28,8 @@ from camel.utils import (
28
28
  from .base import BaseConverter
29
29
 
30
30
  DEFAULT_CONVERTER_PROMPTS = """
31
- Extract key entities and attributes from the provided text,
32
- and convert them into a structured JSON format.
31
+ Extract key entities and attributes from the user
32
+ provided text, and convert them into a structured JSON format.
33
33
  """
34
34
 
35
35
 
@@ -13,7 +13,7 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  from __future__ import annotations
15
15
 
16
- import ast
16
+ import json
17
17
  from typing import Dict, List, Optional
18
18
 
19
19
  from colorama import Fore
@@ -173,7 +173,7 @@ class RolePlayingWorker(Worker):
173
173
  content=prompt,
174
174
  )
175
175
  response = self.summarize_agent.step(req, response_format=TaskResult)
176
- result_dict = ast.literal_eval(response.msg.content)
176
+ result_dict = json.loads(response.msg.content)
177
177
  task_result = TaskResult(**result_dict)
178
178
  task.result = task_result.content
179
179