camel-ai 0.2.23a0__py3-none-any.whl → 0.2.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +16 -2
- camel/configs/anthropic_config.py +45 -11
- camel/configs/sglang_config.py +7 -5
- camel/datagen/self_improving_cot.py +2 -2
- camel/datagen/self_instruct/self_instruct.py +46 -2
- camel/interpreters/subprocess_interpreter.py +187 -46
- camel/models/__init__.py +2 -0
- camel/models/anthropic_model.py +5 -1
- camel/models/base_audio_model.py +92 -0
- camel/models/fish_audio_model.py +18 -8
- camel/models/model_manager.py +9 -0
- camel/models/openai_audio_models.py +80 -1
- camel/models/sglang_model.py +35 -5
- camel/societies/role_playing.py +119 -0
- camel/toolkits/__init__.py +17 -1
- camel/toolkits/audio_analysis_toolkit.py +238 -0
- camel/toolkits/excel_toolkit.py +172 -0
- camel/toolkits/file_write_toolkit.py +371 -0
- camel/toolkits/image_analysis_toolkit.py +202 -0
- camel/toolkits/mcp_toolkit.py +251 -0
- camel/toolkits/page_script.js +376 -0
- camel/toolkits/terminal_toolkit.py +421 -0
- camel/toolkits/video_analysis_toolkit.py +407 -0
- camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +19 -25
- camel/toolkits/web_toolkit.py +1306 -0
- camel/types/enums.py +3 -0
- {camel_ai-0.2.23a0.dist-info → camel_ai-0.2.25.dist-info}/METADATA +241 -106
- {camel_ai-0.2.23a0.dist-info → camel_ai-0.2.25.dist-info}/RECORD +60 -50
- {camel_ai-0.2.23a0.dist-info → camel_ai-0.2.25.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.23a0.dist-info → camel_ai-0.2.25.dist-info/licenses}/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -694,11 +694,18 @@ class ChatAgent(BaseAgent):
|
|
|
694
694
|
f"index: {self.model_backend.current_model_index}",
|
|
695
695
|
exc_info=exc,
|
|
696
696
|
)
|
|
697
|
-
|
|
697
|
+
error_info = str(exc)
|
|
698
|
+
|
|
699
|
+
if not response and self.model_backend.num_models > 1:
|
|
698
700
|
raise ModelProcessingError(
|
|
699
701
|
"Unable to process messages: none of the provided models "
|
|
700
702
|
"run succesfully."
|
|
701
703
|
)
|
|
704
|
+
elif not response:
|
|
705
|
+
raise ModelProcessingError(
|
|
706
|
+
f"Unable to process messages: the only provided model "
|
|
707
|
+
f"did not run succesfully. Error: {error_info}"
|
|
708
|
+
)
|
|
702
709
|
|
|
703
710
|
logger.info(
|
|
704
711
|
f"Model {self.model_backend.model_type}, "
|
|
@@ -732,11 +739,18 @@ class ChatAgent(BaseAgent):
|
|
|
732
739
|
f"index: {self.model_backend.current_model_index}",
|
|
733
740
|
exc_info=exc,
|
|
734
741
|
)
|
|
735
|
-
|
|
742
|
+
error_info = str(exc)
|
|
743
|
+
|
|
744
|
+
if not response and self.model_backend.num_models > 1:
|
|
736
745
|
raise ModelProcessingError(
|
|
737
746
|
"Unable to process messages: none of the provided models "
|
|
738
747
|
"run succesfully."
|
|
739
748
|
)
|
|
749
|
+
elif not response:
|
|
750
|
+
raise ModelProcessingError(
|
|
751
|
+
f"Unable to process messages: the only provided model "
|
|
752
|
+
f"did not run succesfully. Error: {error_info}"
|
|
753
|
+
)
|
|
740
754
|
|
|
741
755
|
logger.info(
|
|
742
756
|
f"Model {self.model_backend.model_type}, "
|
|
@@ -23,23 +23,24 @@ class AnthropicConfig(BaseConfig):
|
|
|
23
23
|
r"""Defines the parameters for generating chat completions using the
|
|
24
24
|
Anthropic API.
|
|
25
25
|
|
|
26
|
-
See: https://docs.anthropic.com/
|
|
26
|
+
See: https://docs.anthropic.com/en/api/messages
|
|
27
27
|
Args:
|
|
28
28
|
max_tokens (int, optional): The maximum number of tokens to
|
|
29
29
|
generate before stopping. Note that Anthropic models may stop
|
|
30
30
|
before reaching this maximum. This parameter only specifies the
|
|
31
31
|
absolute maximum number of tokens to generate.
|
|
32
32
|
(default: :obj:`8192`)
|
|
33
|
-
stop_sequences (List[str], optional):
|
|
34
|
-
model to stop generating
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
33
|
+
stop_sequences (List[str], optional): Custom text sequences that will
|
|
34
|
+
cause the model to stop generating. The models will normally stop
|
|
35
|
+
when they have naturally completed their turn. If the model
|
|
36
|
+
encounters one of these custom sequences, the response will be
|
|
37
|
+
terminated and the stop_reason will be "stop_sequence".
|
|
38
|
+
(default: :obj:`[]`)
|
|
39
39
|
temperature (float, optional): Amount of randomness injected into the
|
|
40
40
|
response. Defaults to 1. Ranges from 0 to 1. Use temp closer to 0
|
|
41
41
|
for analytical / multiple choice, and closer to 1 for creative
|
|
42
|
-
and generative tasks.
|
|
42
|
+
and generative tasks. Note that even with temperature of 0.0, the
|
|
43
|
+
results will not be fully deterministic. (default: :obj:`1`)
|
|
43
44
|
top_p (float, optional): Use nucleus sampling. In nucleus sampling, we
|
|
44
45
|
compute the cumulative distribution over all the options for each
|
|
45
46
|
subsequent token in decreasing probability order and cut it off
|
|
@@ -49,9 +50,20 @@ class AnthropicConfig(BaseConfig):
|
|
|
49
50
|
top_k (int, optional): Only sample from the top K options for each
|
|
50
51
|
subsequent token. Used to remove "long tail" low probability
|
|
51
52
|
responses. (default: :obj:`5`)
|
|
52
|
-
metadata: An object describing metadata about the request.
|
|
53
53
|
stream (bool, optional): Whether to incrementally stream the response
|
|
54
54
|
using server-sent events. (default: :obj:`False`)
|
|
55
|
+
metadata (Union[dict, NotGiven], optional): An object describing
|
|
56
|
+
metadata about the request. Can include user_id as an external
|
|
57
|
+
identifier for the user associated with the request.
|
|
58
|
+
(default: :obj:`NotGiven()`)
|
|
59
|
+
thinking (Union[dict, NotGiven], optional): Configuration for enabling
|
|
60
|
+
Claude's extended thinking. When enabled, responses include
|
|
61
|
+
thinking content blocks showing Claude's thinking process.
|
|
62
|
+
(default: :obj:`NotGiven()`)
|
|
63
|
+
tool_choice (Union[dict, NotGiven], optional): How the model should
|
|
64
|
+
use the provided tools. The model can use a specific tool, any
|
|
65
|
+
available tool, decide by itself, or not use tools at all.
|
|
66
|
+
(default: :obj:`NotGiven()`)
|
|
55
67
|
"""
|
|
56
68
|
|
|
57
69
|
max_tokens: int = 8192
|
|
@@ -60,11 +72,33 @@ class AnthropicConfig(BaseConfig):
|
|
|
60
72
|
top_p: Union[float, NotGiven] = 0.7
|
|
61
73
|
top_k: Union[int, NotGiven] = 5
|
|
62
74
|
stream: bool = False
|
|
75
|
+
metadata: Union[dict, NotGiven] = NotGiven()
|
|
76
|
+
thinking: Union[dict, NotGiven] = NotGiven()
|
|
77
|
+
tool_choice: Union[dict, NotGiven] = NotGiven()
|
|
63
78
|
|
|
64
79
|
def as_dict(self) -> dict[str, Any]:
|
|
65
80
|
config_dict = super().as_dict()
|
|
66
|
-
|
|
67
|
-
|
|
81
|
+
# Create a list of keys to remove to avoid modifying dict
|
|
82
|
+
keys_to_remove = [
|
|
83
|
+
key
|
|
84
|
+
for key, value in config_dict.items()
|
|
85
|
+
if isinstance(value, NotGiven)
|
|
86
|
+
]
|
|
87
|
+
|
|
88
|
+
for key in keys_to_remove:
|
|
89
|
+
del config_dict[key]
|
|
90
|
+
|
|
91
|
+
# remove some keys if thinking is enabled
|
|
92
|
+
thinking_enabled = (
|
|
93
|
+
not isinstance(self.thinking, NotGiven)
|
|
94
|
+
and self.thinking["type"] == "enabled"
|
|
95
|
+
)
|
|
96
|
+
if thinking_enabled:
|
|
97
|
+
# `top_p`, `top_k`, `temperature` must be unset when thinking is
|
|
98
|
+
# enabled.
|
|
99
|
+
config_dict.pop("top_k", None)
|
|
100
|
+
config_dict.pop("top_p", None)
|
|
101
|
+
config_dict.pop("temperature", None)
|
|
68
102
|
return config_dict
|
|
69
103
|
|
|
70
104
|
|
camel/configs/sglang_config.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from typing import Sequence, Union
|
|
16
|
+
from typing import Any, Dict, List, Optional, Sequence, Union
|
|
17
17
|
|
|
18
18
|
from camel.configs.base_config import BaseConfig
|
|
19
19
|
from camel.types import NOT_GIVEN, NotGiven
|
|
@@ -56,10 +56,11 @@ class SGLangConfig(BaseConfig):
|
|
|
56
56
|
in the chat completion. The total length of input tokens and
|
|
57
57
|
generated tokens is limited by the model's context length.
|
|
58
58
|
(default: :obj:`None`)
|
|
59
|
-
tools (list[
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
59
|
+
tools (list[Dict[str, Any]], optional): A list of tool definitions
|
|
60
|
+
that the model can dynamically invoke. Each tool should be
|
|
61
|
+
defined as a dictionary following OpenAI's function calling
|
|
62
|
+
specification format. For more details, refer to the OpenAI
|
|
63
|
+
documentation.
|
|
63
64
|
"""
|
|
64
65
|
|
|
65
66
|
stop: Union[str, Sequence[str], NotGiven] = NOT_GIVEN
|
|
@@ -70,6 +71,7 @@ class SGLangConfig(BaseConfig):
|
|
|
70
71
|
presence_penalty: float = 0.0
|
|
71
72
|
stream: bool = False
|
|
72
73
|
max_tokens: Union[int, NotGiven] = NOT_GIVEN
|
|
74
|
+
tools: Optional[Union[List[Dict[str, Any]]]] = None
|
|
73
75
|
|
|
74
76
|
|
|
75
77
|
SGLANG_API_PARAMS = {param for param in SGLangConfig.model_fields.keys()}
|
|
@@ -161,13 +161,13 @@ class SelfImprovingCoTPipeline:
|
|
|
161
161
|
# Initialize output file with empty results if path is specified
|
|
162
162
|
if self.output_path:
|
|
163
163
|
with open(self.output_path, 'w') as f:
|
|
164
|
-
json.dump({'traces': []}, f, indent=2)
|
|
164
|
+
json.dump({'traces': []}, f, indent=2, ensure_ascii=False)
|
|
165
165
|
self.lock = threading.Lock()
|
|
166
166
|
|
|
167
167
|
def safe_write_json(self, file_path, data):
|
|
168
168
|
temp_path = file_path + ".tmp"
|
|
169
169
|
with open(temp_path, "w") as f:
|
|
170
|
-
json.dump(data, f, indent=2)
|
|
170
|
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
171
171
|
os.replace(temp_path, file_path)
|
|
172
172
|
|
|
173
173
|
def clean_json(self, data):
|
|
@@ -15,16 +15,20 @@
|
|
|
15
15
|
import json
|
|
16
16
|
import os
|
|
17
17
|
import random
|
|
18
|
+
import time
|
|
18
19
|
from typing import Any, Dict, List, Optional
|
|
19
20
|
|
|
20
21
|
from pydantic import BaseModel, Field
|
|
21
22
|
|
|
22
23
|
from camel.agents import ChatAgent
|
|
24
|
+
from camel.logger import get_logger
|
|
23
25
|
|
|
24
26
|
from .filter import RougeSimilarityFilter
|
|
25
27
|
from .filter.instruction_filter import InstructionFilter
|
|
26
28
|
from .templates import SelfInstructTemplates
|
|
27
29
|
|
|
30
|
+
logger = get_logger(__name__)
|
|
31
|
+
|
|
28
32
|
|
|
29
33
|
class SelfInstructPipeline:
|
|
30
34
|
r"""A pipeline to generate and manage machine-generated instructions for
|
|
@@ -210,18 +214,28 @@ class SelfInstructPipeline:
|
|
|
210
214
|
)
|
|
211
215
|
return structured_response.answer
|
|
212
216
|
except ValueError as e:
|
|
213
|
-
|
|
217
|
+
logger.error(f"Error parsing agent response: {e}")
|
|
214
218
|
return False
|
|
215
219
|
|
|
216
220
|
def generate_machine_instances(self):
|
|
217
221
|
r"""Generate instances for each machine task based on its
|
|
218
222
|
classification status.
|
|
219
223
|
"""
|
|
224
|
+
logger.info(
|
|
225
|
+
f"Starting output generation: target {len(self.machine_tasks)} "
|
|
226
|
+
f"instructions"
|
|
227
|
+
)
|
|
228
|
+
attempt_count = 0
|
|
220
229
|
for instruction in self.machine_tasks:
|
|
221
230
|
instance = self.generate_machine_instance(
|
|
222
231
|
instruction['instruction'], instruction['is_classification']
|
|
223
232
|
)
|
|
224
233
|
instruction['instances'] = instance
|
|
234
|
+
attempt_count += 1
|
|
235
|
+
logger.info(
|
|
236
|
+
f"Attempt[Output]: Progress {attempt_count}/"
|
|
237
|
+
f"{len(self.machine_tasks)} instructions"
|
|
238
|
+
)
|
|
225
239
|
|
|
226
240
|
def generate_machine_instance(
|
|
227
241
|
self, instruction: str, classification: bool
|
|
@@ -368,11 +382,30 @@ class SelfInstructPipeline:
|
|
|
368
382
|
with open(self.data_output_path, 'w') as f:
|
|
369
383
|
json.dump(self.machine_tasks, f, indent=4, ensure_ascii=False)
|
|
370
384
|
|
|
371
|
-
def generate(self):
|
|
385
|
+
def generate(self, timeout_minutes=600):
|
|
372
386
|
r"""Execute the entire pipeline to generate machine instructions
|
|
373
387
|
and instances.
|
|
388
|
+
|
|
389
|
+
Args:
|
|
390
|
+
timeout_minutes (int): Maximum time in minutes to run the
|
|
391
|
+
generation process before timing out. (default: :obj:`600`)
|
|
374
392
|
"""
|
|
393
|
+
start_time = time.time()
|
|
394
|
+
timeout_seconds = timeout_minutes * 60
|
|
395
|
+
logger.info(
|
|
396
|
+
f"Starting instruction generation: target "
|
|
397
|
+
f"{self.num_machine_instructions} instructions"
|
|
398
|
+
)
|
|
375
399
|
while len(self.machine_tasks) < self.num_machine_instructions:
|
|
400
|
+
# Check for timeout
|
|
401
|
+
elapsed = time.time() - start_time
|
|
402
|
+
if elapsed > timeout_seconds:
|
|
403
|
+
logger.info(
|
|
404
|
+
f"Generation timed out after {elapsed / 60:.1f} minutes. "
|
|
405
|
+
f"Generated {len(self.machine_tasks)}/"
|
|
406
|
+
f"{self.num_machine_instructions} instructions."
|
|
407
|
+
)
|
|
408
|
+
break
|
|
376
409
|
prompt, instruction = self.generate_machine_instruction()
|
|
377
410
|
existing_instructions = [
|
|
378
411
|
t["instruction"] for t in self.human_tasks
|
|
@@ -389,6 +422,17 @@ class SelfInstructPipeline:
|
|
|
389
422
|
),
|
|
390
423
|
}
|
|
391
424
|
self.machine_tasks.append(instruction_dict)
|
|
425
|
+
logger.info(
|
|
426
|
+
f"Attempt[Instruction]: Progress "
|
|
427
|
+
f"{len(self.machine_tasks)}/"
|
|
428
|
+
f"{self.num_machine_instructions} "
|
|
429
|
+
f"instructions"
|
|
430
|
+
)
|
|
431
|
+
else:
|
|
432
|
+
logger.warning(
|
|
433
|
+
f"Instruction failed filters. Skipping instruction: "
|
|
434
|
+
f"{instruction}"
|
|
435
|
+
)
|
|
392
436
|
self.generate_machine_instances()
|
|
393
437
|
self.construct_data()
|
|
394
438
|
|
|
@@ -12,8 +12,9 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
|
-
import
|
|
15
|
+
import os
|
|
16
16
|
import subprocess
|
|
17
|
+
import sys
|
|
17
18
|
import tempfile
|
|
18
19
|
from pathlib import Path
|
|
19
20
|
from typing import Any, ClassVar, Dict, List
|
|
@@ -43,12 +44,14 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
43
44
|
the executed code. (default: :obj:`False`)
|
|
44
45
|
print_stderr (bool, optional): If True, print the standard error of the
|
|
45
46
|
executed code. (default: :obj:`True`)
|
|
47
|
+
execution_timeout (int, optional): Maximum time in seconds to wait for
|
|
48
|
+
code execution to complete. (default: :obj:`60`)
|
|
46
49
|
"""
|
|
47
50
|
|
|
48
|
-
_CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, str]] = {
|
|
49
|
-
"python": "python {file_name}",
|
|
50
|
-
"bash": "bash {file_name}",
|
|
51
|
-
"r": "Rscript {file_name}",
|
|
51
|
+
_CODE_EXECUTE_CMD_MAPPING: ClassVar[Dict[str, Dict[str, str]]] = {
|
|
52
|
+
"python": {"posix": "python {file_name}", "nt": "python {file_name}"},
|
|
53
|
+
"bash": {"posix": "bash {file_name}", "nt": "bash {file_name}"},
|
|
54
|
+
"r": {"posix": "Rscript {file_name}", "nt": "Rscript {file_name}"},
|
|
52
55
|
}
|
|
53
56
|
|
|
54
57
|
_CODE_EXTENSION_MAPPING: ClassVar[Dict[str, str]] = {
|
|
@@ -74,10 +77,12 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
74
77
|
require_confirm: bool = True,
|
|
75
78
|
print_stdout: bool = False,
|
|
76
79
|
print_stderr: bool = True,
|
|
80
|
+
execution_timeout: int = 60,
|
|
77
81
|
) -> None:
|
|
78
82
|
self.require_confirm = require_confirm
|
|
79
83
|
self.print_stdout = print_stdout
|
|
80
84
|
self.print_stderr = print_stderr
|
|
85
|
+
self.execution_timeout = execution_timeout
|
|
81
86
|
|
|
82
87
|
def run_file(
|
|
83
88
|
self,
|
|
@@ -94,13 +99,9 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
94
99
|
Returns:
|
|
95
100
|
str: A string containing the captured stdout and stderr of the
|
|
96
101
|
executed code.
|
|
97
|
-
|
|
98
|
-
Raises:
|
|
99
|
-
RuntimeError: If the provided file path does not point to a file.
|
|
100
|
-
InterpreterError: If the code type provided is not supported.
|
|
101
102
|
"""
|
|
102
103
|
if not file.is_file():
|
|
103
|
-
|
|
104
|
+
return f"{file} is not a file."
|
|
104
105
|
code_type = self._check_code_type(code_type)
|
|
105
106
|
if self._CODE_TYPE_MAPPING[code_type] == "python":
|
|
106
107
|
# For Python code, use ast to analyze and modify the code
|
|
@@ -108,7 +109,7 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
108
109
|
|
|
109
110
|
import astor
|
|
110
111
|
|
|
111
|
-
with open(file, 'r') as f:
|
|
112
|
+
with open(file, 'r', encoding='utf-8') as f:
|
|
112
113
|
source = f.read()
|
|
113
114
|
|
|
114
115
|
# Parse the source code
|
|
@@ -158,34 +159,88 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
158
159
|
modified_source = astor.to_source(tree)
|
|
159
160
|
# Create a temporary file with the modified source
|
|
160
161
|
temp_file = self._create_temp_file(modified_source, "py")
|
|
161
|
-
cmd =
|
|
162
|
-
except SyntaxError:
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
162
|
+
cmd = ["python", str(temp_file)]
|
|
163
|
+
except (SyntaxError, TypeError, ValueError) as e:
|
|
164
|
+
logger.warning(f"Failed to parse Python code with AST: {e}")
|
|
165
|
+
platform_type = 'posix' if os.name != 'nt' else 'nt'
|
|
166
|
+
cmd_template = self._CODE_EXECUTE_CMD_MAPPING[code_type][
|
|
167
|
+
platform_type
|
|
168
|
+
]
|
|
169
|
+
base_cmd = cmd_template.split()[0]
|
|
170
|
+
|
|
171
|
+
# Check if command is available
|
|
172
|
+
if not self._is_command_available(base_cmd):
|
|
173
|
+
raise InterpreterError(
|
|
174
|
+
f"Command '{base_cmd}' not found. Please ensure it "
|
|
175
|
+
f"is installed and available in your PATH."
|
|
167
176
|
)
|
|
168
|
-
|
|
177
|
+
|
|
178
|
+
cmd = [base_cmd, str(file)]
|
|
169
179
|
else:
|
|
170
180
|
# For non-Python code, use standard execution
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
181
|
+
platform_type = 'posix' if os.name != 'nt' else 'nt'
|
|
182
|
+
cmd_template = self._CODE_EXECUTE_CMD_MAPPING[code_type][
|
|
183
|
+
platform_type
|
|
184
|
+
]
|
|
185
|
+
base_cmd = cmd_template.split()[0] # Get 'python', 'bash', etc.
|
|
186
|
+
|
|
187
|
+
# Check if command is available
|
|
188
|
+
if not self._is_command_available(base_cmd):
|
|
189
|
+
raise InterpreterError(
|
|
190
|
+
f"Command '{base_cmd}' not found. Please ensure it "
|
|
191
|
+
f"is installed and available in your PATH."
|
|
174
192
|
)
|
|
175
|
-
)
|
|
176
193
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
194
|
+
cmd = [base_cmd, str(file)]
|
|
195
|
+
|
|
196
|
+
# Get current Python executable's environment
|
|
197
|
+
env = os.environ.copy()
|
|
198
|
+
|
|
199
|
+
# On Windows, ensure we use the correct Python executable path
|
|
200
|
+
if os.name == 'nt':
|
|
201
|
+
python_path = os.path.dirname(sys.executable)
|
|
202
|
+
if 'PATH' in env:
|
|
203
|
+
env['PATH'] = python_path + os.pathsep + env['PATH']
|
|
204
|
+
else:
|
|
205
|
+
env['PATH'] = python_path
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
proc = subprocess.Popen(
|
|
209
|
+
cmd,
|
|
210
|
+
stdout=subprocess.PIPE,
|
|
211
|
+
stderr=subprocess.PIPE,
|
|
212
|
+
text=True,
|
|
213
|
+
env=env,
|
|
214
|
+
shell=False, # Never use shell=True for security
|
|
215
|
+
)
|
|
216
|
+
# Add timeout to prevent hanging processes
|
|
217
|
+
stdout, stderr = proc.communicate(timeout=self.execution_timeout)
|
|
218
|
+
return_code = proc.returncode
|
|
219
|
+
except subprocess.TimeoutExpired:
|
|
220
|
+
proc.kill()
|
|
221
|
+
stdout, stderr = proc.communicate()
|
|
222
|
+
return_code = proc.returncode
|
|
223
|
+
timeout_msg = (
|
|
224
|
+
f"Process timed out after {self.execution_timeout} seconds "
|
|
225
|
+
f"and was terminated."
|
|
226
|
+
)
|
|
227
|
+
stderr = f"{stderr}\n{timeout_msg}"
|
|
182
228
|
|
|
183
229
|
# Clean up temporary file if it was created
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
230
|
+
temp_file_to_clean = locals().get('temp_file')
|
|
231
|
+
if temp_file_to_clean is not None:
|
|
232
|
+
try:
|
|
233
|
+
if temp_file_to_clean.exists():
|
|
234
|
+
try:
|
|
235
|
+
temp_file_to_clean.unlink()
|
|
236
|
+
except PermissionError:
|
|
237
|
+
# On Windows, files might be locked
|
|
238
|
+
logger.warning(
|
|
239
|
+
f"Could not delete temp file "
|
|
240
|
+
f"{temp_file_to_clean} (may be locked)"
|
|
241
|
+
)
|
|
242
|
+
except Exception as e:
|
|
243
|
+
logger.warning(f"Failed to cleanup temporary file: {e}")
|
|
189
244
|
|
|
190
245
|
if self.print_stdout and stdout:
|
|
191
246
|
print("======stdout======")
|
|
@@ -240,7 +295,7 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
240
295
|
"computer: {code}"
|
|
241
296
|
)
|
|
242
297
|
while True:
|
|
243
|
-
choice = input("Running code? [Y/n]:").lower()
|
|
298
|
+
choice = input("Running code? [Y/n]:").lower().strip()
|
|
244
299
|
if choice in ["y", "yes", "ye", ""]:
|
|
245
300
|
break
|
|
246
301
|
elif choice in ["no", "n"]:
|
|
@@ -249,22 +304,72 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
249
304
|
"This choice stops the current operation and any "
|
|
250
305
|
"further code execution."
|
|
251
306
|
)
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
307
|
+
else:
|
|
308
|
+
print("Please enter 'y' or 'n'.")
|
|
309
|
+
|
|
310
|
+
temp_file_path = None
|
|
311
|
+
temp_dir = None
|
|
312
|
+
try:
|
|
313
|
+
temp_file_path = self._create_temp_file(
|
|
314
|
+
code=code, extension=self._CODE_EXTENSION_MAPPING[code_type]
|
|
315
|
+
)
|
|
316
|
+
temp_dir = temp_file_path.parent
|
|
317
|
+
return self.run_file(temp_file_path, code_type)
|
|
318
|
+
finally:
|
|
319
|
+
# Clean up temp file and directory
|
|
320
|
+
try:
|
|
321
|
+
if temp_file_path and temp_file_path.exists():
|
|
322
|
+
try:
|
|
323
|
+
temp_file_path.unlink()
|
|
324
|
+
except PermissionError:
|
|
325
|
+
# On Windows, files might be locked
|
|
326
|
+
logger.warning(
|
|
327
|
+
f"Could not delete temp file {temp_file_path}"
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
if temp_dir and temp_dir.exists():
|
|
331
|
+
try:
|
|
332
|
+
import shutil
|
|
333
|
+
|
|
334
|
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
|
335
|
+
except Exception as e:
|
|
336
|
+
logger.warning(f"Could not delete temp directory: {e}")
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.warning(f"Error during cleanup: {e}")
|
|
255
339
|
|
|
256
|
-
|
|
340
|
+
def _create_temp_file(self, code: str, extension: str) -> Path:
|
|
341
|
+
r"""Creates a temporary file with the given code and extension.
|
|
257
342
|
|
|
258
|
-
|
|
259
|
-
|
|
343
|
+
Args:
|
|
344
|
+
code (str): The code to write to the temporary file.
|
|
345
|
+
extension (str): The file extension to use.
|
|
260
346
|
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
347
|
+
Returns:
|
|
348
|
+
Path: The path to the created temporary file.
|
|
349
|
+
"""
|
|
350
|
+
try:
|
|
351
|
+
# Create a temporary directory first to ensure we have write
|
|
352
|
+
# permissions
|
|
353
|
+
temp_dir = tempfile.mkdtemp()
|
|
354
|
+
# Create file path with appropriate extension
|
|
355
|
+
file_path = Path(temp_dir) / f"temp_code.{extension}"
|
|
356
|
+
|
|
357
|
+
# Write code to file with appropriate encoding
|
|
358
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
359
|
+
f.write(code)
|
|
360
|
+
|
|
361
|
+
return file_path
|
|
362
|
+
except Exception as e:
|
|
363
|
+
# Clean up temp directory if creation failed
|
|
364
|
+
if 'temp_dir' in locals():
|
|
365
|
+
try:
|
|
366
|
+
import shutil
|
|
367
|
+
|
|
368
|
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
|
369
|
+
except Exception:
|
|
370
|
+
pass
|
|
371
|
+
logger.error(f"Failed to create temporary file: {e}")
|
|
372
|
+
raise
|
|
268
373
|
|
|
269
374
|
def _check_code_type(self, code_type: str) -> str:
|
|
270
375
|
if code_type not in self._CODE_TYPE_MAPPING:
|
|
@@ -284,3 +389,39 @@ class SubprocessInterpreter(BaseInterpreter):
|
|
|
284
389
|
raise RuntimeError(
|
|
285
390
|
"SubprocessInterpreter doesn't support " "`action_space`."
|
|
286
391
|
)
|
|
392
|
+
|
|
393
|
+
def _is_command_available(self, command: str) -> bool:
|
|
394
|
+
r"""Check if a command is available in the system PATH.
|
|
395
|
+
|
|
396
|
+
Args:
|
|
397
|
+
command (str): The command to check.
|
|
398
|
+
|
|
399
|
+
Returns:
|
|
400
|
+
bool: True if the command is available, False otherwise.
|
|
401
|
+
"""
|
|
402
|
+
if os.name == 'nt': # Windows
|
|
403
|
+
# On Windows, use where.exe to find the command
|
|
404
|
+
try:
|
|
405
|
+
with open(os.devnull, 'w') as devnull:
|
|
406
|
+
subprocess.check_call(
|
|
407
|
+
['where', command],
|
|
408
|
+
stdout=devnull,
|
|
409
|
+
stderr=devnull,
|
|
410
|
+
shell=False,
|
|
411
|
+
)
|
|
412
|
+
return True
|
|
413
|
+
except subprocess.CalledProcessError:
|
|
414
|
+
return False
|
|
415
|
+
else: # Unix-like systems
|
|
416
|
+
# On Unix-like systems, use which to find the command
|
|
417
|
+
try:
|
|
418
|
+
with open(os.devnull, 'w') as devnull:
|
|
419
|
+
subprocess.check_call(
|
|
420
|
+
['which', command],
|
|
421
|
+
stdout=devnull,
|
|
422
|
+
stderr=devnull,
|
|
423
|
+
shell=False,
|
|
424
|
+
)
|
|
425
|
+
return True
|
|
426
|
+
except subprocess.CalledProcessError:
|
|
427
|
+
return False
|
camel/models/__init__.py
CHANGED
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
from .aiml_model import AIMLModel
|
|
15
15
|
from .anthropic_model import AnthropicModel
|
|
16
16
|
from .azure_openai_model import AzureOpenAIModel
|
|
17
|
+
from .base_audio_model import BaseAudioModel
|
|
17
18
|
from .base_model import BaseModelBackend
|
|
18
19
|
from .cohere_model import CohereModel
|
|
19
20
|
from .deepseek_model import DeepSeekModel
|
|
@@ -74,4 +75,5 @@ __all__ = [
|
|
|
74
75
|
'InternLMModel',
|
|
75
76
|
'MoonshotModel',
|
|
76
77
|
'AIMLModel',
|
|
78
|
+
'BaseAudioModel',
|
|
77
79
|
]
|
camel/models/anthropic_model.py
CHANGED
|
@@ -84,7 +84,11 @@ class AnthropicModel(BaseModelBackend):
|
|
|
84
84
|
index=0,
|
|
85
85
|
message={
|
|
86
86
|
"role": "assistant",
|
|
87
|
-
"content":
|
|
87
|
+
"content": next(
|
|
88
|
+
content.text
|
|
89
|
+
for content in response.content
|
|
90
|
+
if content.type == "text"
|
|
91
|
+
),
|
|
88
92
|
},
|
|
89
93
|
finish_reason=response.stop_reason,
|
|
90
94
|
)
|