camel-ai 0.2.73a4__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +2217 -519
- camel/agents/mcp_agent.py +30 -27
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +3 -12
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/chunkr_reader.py +9 -0
- camel/memories/agent_memories.py +48 -4
- camel/memories/base.py +26 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/context_creators/score_based.py +25 -384
- camel/memories/records.py +88 -8
- camel/messages/base.py +153 -34
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +6 -19
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +114 -89
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +32 -14
- camel/models/cohere_model.py +1 -16
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +1 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +36 -18
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +1 -16
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +105 -24
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +62 -41
- camel/models/openai_model.py +62 -57
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/__init__.py +2 -0
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +146 -66
- camel/societies/workforce/role_playing_worker.py +15 -11
- camel/societies/workforce/single_agent_worker.py +302 -65
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/task_channel.py +163 -27
- camel/societies/workforce/utils.py +107 -13
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +1949 -579
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +168 -145
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +4 -3
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/base.py +6 -4
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/dappier_toolkit.py +5 -1
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
- camel/toolkits/excel_toolkit.py +1 -1
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +430 -36
- camel/toolkits/function_tool.py +13 -3
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +12 -31
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +15 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +77 -8
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +884 -88
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +959 -89
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +9 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +281 -213
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +23 -3
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +72 -7
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +582 -132
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +158 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +55 -8
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +43 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +321 -8
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +10 -4
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +45 -4
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +151 -53
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +366 -71
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_integration.py +18 -13
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +19 -10
- camel/toolkits/notion_mcp_toolkit.py +16 -26
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +8 -49
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/search_toolkit.py +264 -91
- camel/toolkits/slack_toolkit.py +64 -10
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +17 -11
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/enums.py +274 -7
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +15 -0
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/token_counting.py +43 -20
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +223 -83
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +170 -141
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1550
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
|
@@ -12,14 +12,12 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
|
-
from typing import
|
|
16
|
-
|
|
17
|
-
from camel.toolkits import BaseToolkit, FunctionTool
|
|
15
|
+
from typing import Optional
|
|
18
16
|
|
|
19
17
|
from .mcp_toolkit import MCPToolkit
|
|
20
18
|
|
|
21
19
|
|
|
22
|
-
class EdgeOnePagesMCPToolkit(
|
|
20
|
+
class EdgeOnePagesMCPToolkit(MCPToolkit):
|
|
23
21
|
r"""EdgeOnePagesMCPToolkit provides an interface for interacting with
|
|
24
22
|
EdgeOne pages using the EdgeOne Pages MCP server.
|
|
25
23
|
|
|
@@ -38,32 +36,14 @@ class EdgeOnePagesMCPToolkit(BaseToolkit):
|
|
|
38
36
|
timeout (Optional[float]): Connection timeout in seconds.
|
|
39
37
|
(default: :obj:`None`)
|
|
40
38
|
"""
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
"edgeone-pages-mcp-server": {
|
|
47
|
-
"command": "npx",
|
|
48
|
-
"args": ["edgeone-pages-mcp"],
|
|
49
|
-
}
|
|
39
|
+
config_dict = {
|
|
40
|
+
"mcpServers": {
|
|
41
|
+
"edgeone-pages-mcp-server": {
|
|
42
|
+
"command": "npx",
|
|
43
|
+
"args": ["edgeone-pages-mcp"],
|
|
50
44
|
}
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
async def connect(self):
|
|
56
|
-
r"""Explicitly connect to the EdgeOne Pages MCP server."""
|
|
57
|
-
await self._mcp_toolkit.connect()
|
|
45
|
+
}
|
|
46
|
+
}
|
|
58
47
|
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
await self._mcp_toolkit.disconnect()
|
|
62
|
-
|
|
63
|
-
def get_tools(self) -> List[FunctionTool]:
|
|
64
|
-
r"""Returns a list of tools provided by the EdgeOnePagesMCPToolkit.
|
|
65
|
-
|
|
66
|
-
Returns:
|
|
67
|
-
List[FunctionTool]: List of available tools.
|
|
68
|
-
"""
|
|
69
|
-
return self._mcp_toolkit.get_tools()
|
|
48
|
+
# Initialize parent MCPToolkit with EdgeOne Pages configuration
|
|
49
|
+
super().__init__(config_dict=config_dict, timeout=timeout)
|
camel/toolkits/excel_toolkit.py
CHANGED
|
@@ -872,7 +872,7 @@ class ExcelToolkit(BaseToolkit):
|
|
|
872
872
|
import csv
|
|
873
873
|
|
|
874
874
|
with open(
|
|
875
|
-
resolved_csv_path, 'w', newline='', encoding='utf-8'
|
|
875
|
+
resolved_csv_path, 'w', newline='', encoding='utf-8-sig'
|
|
876
876
|
) as csvfile:
|
|
877
877
|
writer = csv.writer(csvfile)
|
|
878
878
|
writer.writerows(data)
|
|
@@ -15,7 +15,7 @@ import os
|
|
|
15
15
|
import re
|
|
16
16
|
from datetime import datetime
|
|
17
17
|
from pathlib import Path
|
|
18
|
-
from typing import List, Optional, Tuple, Union
|
|
18
|
+
from typing import Dict, List, Optional, Tuple, Union
|
|
19
19
|
|
|
20
20
|
from camel.logger import get_logger
|
|
21
21
|
from camel.toolkits.base import BaseToolkit
|
|
@@ -26,14 +26,17 @@ logger = get_logger(__name__)
|
|
|
26
26
|
|
|
27
27
|
|
|
28
28
|
@MCPServer()
|
|
29
|
-
class
|
|
30
|
-
r"""A toolkit for
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
29
|
+
class FileToolkit(BaseToolkit):
|
|
30
|
+
r"""A comprehensive toolkit for file operations including reading,
|
|
31
|
+
writing, and editing files.
|
|
32
|
+
|
|
33
|
+
This class provides cross-platform (macOS, Linux, Windows) support for:
|
|
34
|
+
- Reading various file formats (text, JSON, YAML, PDF, DOCX)
|
|
35
|
+
- Writing to multiple formats (Markdown, DOCX, PDF, plaintext, JSON,
|
|
36
|
+
YAML, CSV, HTML)
|
|
37
|
+
- Editing and modifying existing files with content replacement
|
|
38
|
+
- Automatic backup creation before modifications
|
|
39
|
+
- Custom encoding and enhanced formatting options
|
|
37
40
|
"""
|
|
38
41
|
|
|
39
42
|
def __init__(
|
|
@@ -126,36 +129,32 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
126
129
|
with file_path.open("w", encoding=encoding) as f:
|
|
127
130
|
f.write(content)
|
|
128
131
|
|
|
129
|
-
def
|
|
130
|
-
r"""
|
|
132
|
+
def _create_backup(self, file_path: Path) -> Optional[Path]:
|
|
133
|
+
r"""Create a backup of the file if it exists and backup is enabled.
|
|
131
134
|
|
|
132
135
|
Args:
|
|
133
|
-
file_path (Path): The
|
|
136
|
+
file_path (Path): The file path to backup.
|
|
134
137
|
|
|
135
138
|
Returns:
|
|
136
|
-
Path:
|
|
139
|
+
Optional[Path]: Path to the backup file if created, None otherwise.
|
|
137
140
|
"""
|
|
138
|
-
if not file_path.exists():
|
|
139
|
-
return
|
|
141
|
+
if not self.backup_enabled or not file_path.exists():
|
|
142
|
+
return None
|
|
140
143
|
|
|
141
|
-
# Generate
|
|
144
|
+
# Generate backup filename with .bak extension and timestamp
|
|
142
145
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
new_path = parent / f"{stem}_{timestamp}_{counter}{suffix}"
|
|
156
|
-
if not new_path.exists():
|
|
157
|
-
return new_path
|
|
158
|
-
counter += 1
|
|
146
|
+
backup_path = file_path.parent / f"{file_path.name}.{timestamp}.bak"
|
|
147
|
+
|
|
148
|
+
# Copy the file to backup location
|
|
149
|
+
import shutil
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
shutil.copy2(file_path, backup_path)
|
|
153
|
+
logger.info(f"Created backup: {backup_path}")
|
|
154
|
+
return backup_path
|
|
155
|
+
except Exception as e:
|
|
156
|
+
logger.warning(f"Failed to create backup: {e}")
|
|
157
|
+
return None
|
|
159
158
|
|
|
160
159
|
def _write_docx_file(self, file_path: Path, content: str) -> None:
|
|
161
160
|
r"""Write text content to a DOCX file with default formatting.
|
|
@@ -840,11 +839,74 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
840
839
|
|
|
841
840
|
return text
|
|
842
841
|
|
|
842
|
+
def _ensure_html_utf8_meta(self, content: str) -> str:
|
|
843
|
+
r"""Ensure HTML content has UTF-8 meta tag.
|
|
844
|
+
|
|
845
|
+
Args:
|
|
846
|
+
content (str): The HTML content.
|
|
847
|
+
|
|
848
|
+
Returns:
|
|
849
|
+
str: HTML content with UTF-8 meta tag.
|
|
850
|
+
"""
|
|
851
|
+
# Check if content already has a charset meta tag
|
|
852
|
+
has_charset = re.search(
|
|
853
|
+
r'<meta[^>]*charset[^>]*>', content, re.IGNORECASE
|
|
854
|
+
)
|
|
855
|
+
|
|
856
|
+
# UTF-8 meta tag
|
|
857
|
+
utf8_meta = '<meta charset="utf-8">'
|
|
858
|
+
|
|
859
|
+
if has_charset:
|
|
860
|
+
# Replace existing charset with UTF-8
|
|
861
|
+
content = re.sub(
|
|
862
|
+
r'<meta[^>]*charset[^>]*>',
|
|
863
|
+
utf8_meta,
|
|
864
|
+
content,
|
|
865
|
+
flags=re.IGNORECASE,
|
|
866
|
+
)
|
|
867
|
+
else:
|
|
868
|
+
# Add UTF-8 meta tag
|
|
869
|
+
# Try to find <head> tag
|
|
870
|
+
head_match = re.search(r'<head[^>]*>', content, re.IGNORECASE)
|
|
871
|
+
if head_match:
|
|
872
|
+
# Insert after <head> tag
|
|
873
|
+
insert_pos = head_match.end()
|
|
874
|
+
content = (
|
|
875
|
+
content[:insert_pos]
|
|
876
|
+
+ '\n '
|
|
877
|
+
+ utf8_meta
|
|
878
|
+
+ content[insert_pos:]
|
|
879
|
+
)
|
|
880
|
+
else:
|
|
881
|
+
# No <head> tag found, check if there's <html> tag
|
|
882
|
+
html_match = re.search(r'<html[^>]*>', content, re.IGNORECASE)
|
|
883
|
+
if html_match:
|
|
884
|
+
# Insert <head> with meta tag after <html>
|
|
885
|
+
insert_pos = html_match.end()
|
|
886
|
+
content = (
|
|
887
|
+
content[:insert_pos]
|
|
888
|
+
+ '\n<head>\n '
|
|
889
|
+
+ utf8_meta
|
|
890
|
+
+ '\n</head>'
|
|
891
|
+
+ content[insert_pos:]
|
|
892
|
+
)
|
|
893
|
+
else:
|
|
894
|
+
# No proper HTML structure, wrap content
|
|
895
|
+
content = (
|
|
896
|
+
'<!DOCTYPE html>\n<html>\n<head>\n '
|
|
897
|
+
+ utf8_meta
|
|
898
|
+
+ '\n</head>\n<body>\n'
|
|
899
|
+
+ content
|
|
900
|
+
+ '\n</body>\n</html>'
|
|
901
|
+
)
|
|
902
|
+
|
|
903
|
+
return content
|
|
904
|
+
|
|
843
905
|
def _write_csv_file(
|
|
844
906
|
self,
|
|
845
907
|
file_path: Path,
|
|
846
908
|
content: Union[str, List[List]],
|
|
847
|
-
encoding: str = "utf-8",
|
|
909
|
+
encoding: str = "utf-8-sig",
|
|
848
910
|
) -> None:
|
|
849
911
|
r"""Write CSV content to a file.
|
|
850
912
|
|
|
@@ -852,7 +914,8 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
852
914
|
file_path (Path): The target file path.
|
|
853
915
|
content (Union[str, List[List]]): The CSV content as a string or
|
|
854
916
|
list of lists.
|
|
855
|
-
encoding (str): Character encoding to use.
|
|
917
|
+
encoding (str): Character encoding to use.
|
|
918
|
+
(default: :obj:`utf-8-sig`)
|
|
856
919
|
"""
|
|
857
920
|
import csv
|
|
858
921
|
|
|
@@ -901,6 +964,10 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
901
964
|
content (str): The content to write.
|
|
902
965
|
encoding (str): Character encoding to use. (default: :obj:`utf-8`)
|
|
903
966
|
"""
|
|
967
|
+
# For HTML files, ensure UTF-8 meta tag is present
|
|
968
|
+
if file_path.suffix.lower() in ['.html', '.htm']:
|
|
969
|
+
content = self._ensure_html_utf8_meta(content)
|
|
970
|
+
|
|
904
971
|
with file_path.open("w", encoding=encoding) as f:
|
|
905
972
|
f.write(content)
|
|
906
973
|
|
|
@@ -939,8 +1006,9 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
939
1006
|
file_path = self._resolve_filepath(filename)
|
|
940
1007
|
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
941
1008
|
|
|
942
|
-
#
|
|
943
|
-
file_path
|
|
1009
|
+
# Create backup of existing file if backup is enabled
|
|
1010
|
+
if file_path.exists() and self.backup_enabled:
|
|
1011
|
+
self._create_backup(file_path)
|
|
944
1012
|
|
|
945
1013
|
extension = file_path.suffix.lower()
|
|
946
1014
|
|
|
@@ -995,6 +1063,309 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
995
1063
|
logger.error(error_msg)
|
|
996
1064
|
return error_msg
|
|
997
1065
|
|
|
1066
|
+
# ----------------------------------------------
|
|
1067
|
+
# Read File Functions
|
|
1068
|
+
# ----------------------------------------------
|
|
1069
|
+
def read_file(
|
|
1070
|
+
self, file_paths: Union[str, List[str]]
|
|
1071
|
+
) -> Union[str, Dict[str, str]]:
|
|
1072
|
+
r"""Read and return content of one or more files using MarkItDown
|
|
1073
|
+
for better format support.
|
|
1074
|
+
|
|
1075
|
+
This method uses MarkItDownLoader to convert various file formats
|
|
1076
|
+
to Markdown. It supports a wide range of formats including:
|
|
1077
|
+
- PDF (.pdf)
|
|
1078
|
+
- Microsoft Office: Word (.doc, .docx), Excel (.xls, .xlsx),
|
|
1079
|
+
PowerPoint (.ppt, .pptx)
|
|
1080
|
+
- EPUB (.epub)
|
|
1081
|
+
- HTML (.html, .htm)
|
|
1082
|
+
- Images (.jpg, .jpeg, .png) for OCR
|
|
1083
|
+
- Audio (.mp3, .wav) for transcription
|
|
1084
|
+
- Text-based formats (.csv, .json, .xml, .txt, .md)
|
|
1085
|
+
- ZIP archives (.zip)
|
|
1086
|
+
|
|
1087
|
+
Args:
|
|
1088
|
+
file_paths (Union[str, List[str]]): A single file path or a list
|
|
1089
|
+
of file paths to read. Paths can be relative or absolute.
|
|
1090
|
+
If relative, they will be resolved relative to the working
|
|
1091
|
+
directory.
|
|
1092
|
+
|
|
1093
|
+
Returns:
|
|
1094
|
+
Union[str, Dict[str, str]]:
|
|
1095
|
+
- If a single file path is provided: Returns the content as
|
|
1096
|
+
a string.
|
|
1097
|
+
- If multiple file paths are provided: Returns a dictionary
|
|
1098
|
+
where keys are file paths and values are the corresponding
|
|
1099
|
+
content in Markdown format.
|
|
1100
|
+
If conversion fails, returns an error message.
|
|
1101
|
+
"""
|
|
1102
|
+
from camel.loaders.markitdown import MarkItDownLoader
|
|
1103
|
+
|
|
1104
|
+
try:
|
|
1105
|
+
# Handle single file path for backward compatibility
|
|
1106
|
+
if isinstance(file_paths, str):
|
|
1107
|
+
resolved_path = self._resolve_filepath(file_paths)
|
|
1108
|
+
|
|
1109
|
+
# Use MarkItDownLoader to convert the file
|
|
1110
|
+
result = MarkItDownLoader().convert_files(
|
|
1111
|
+
file_paths=[str(resolved_path)], parallel=False
|
|
1112
|
+
)
|
|
1113
|
+
|
|
1114
|
+
# Return the converted content or error message
|
|
1115
|
+
return result.get(
|
|
1116
|
+
str(resolved_path), f"Failed to read file: {resolved_path}"
|
|
1117
|
+
)
|
|
1118
|
+
|
|
1119
|
+
# Handle multiple file paths
|
|
1120
|
+
else:
|
|
1121
|
+
resolved_paths = [
|
|
1122
|
+
str(self._resolve_filepath(fp)) for fp in file_paths
|
|
1123
|
+
]
|
|
1124
|
+
|
|
1125
|
+
# Use MarkItDownLoader to convert files in parallel
|
|
1126
|
+
result = MarkItDownLoader().convert_files(
|
|
1127
|
+
file_paths=resolved_paths, parallel=True
|
|
1128
|
+
)
|
|
1129
|
+
|
|
1130
|
+
# Map back to original paths if needed
|
|
1131
|
+
return_dict = {}
|
|
1132
|
+
for original, resolved in zip(file_paths, resolved_paths):
|
|
1133
|
+
return_dict[original] = result.get(
|
|
1134
|
+
resolved, f"Failed to read file: {resolved}"
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
return return_dict
|
|
1138
|
+
|
|
1139
|
+
except Exception as e:
|
|
1140
|
+
return f"Error reading file(s): {e}"
|
|
1141
|
+
|
|
1142
|
+
# ----------------------------------------------
|
|
1143
|
+
# Edit File Functions
|
|
1144
|
+
# ----------------------------------------------
|
|
1145
|
+
def edit_file(
|
|
1146
|
+
self, file_path: str, old_content: str, new_content: str
|
|
1147
|
+
) -> str:
|
|
1148
|
+
r"""Edit a file by replacing specified content.
|
|
1149
|
+
|
|
1150
|
+
This method performs simple text replacement in files. It reads
|
|
1151
|
+
the file, replaces all occurrences of old_content with new_content,
|
|
1152
|
+
and writes the result back.
|
|
1153
|
+
|
|
1154
|
+
Args:
|
|
1155
|
+
file_path (str): The path to the file to edit. Can be
|
|
1156
|
+
relative or absolute. If relative, it will be resolved
|
|
1157
|
+
relative to the working directory.
|
|
1158
|
+
old_content (str): The exact text to find and replace.
|
|
1159
|
+
new_content (str): The text to replace old_content with.
|
|
1160
|
+
|
|
1161
|
+
Returns:
|
|
1162
|
+
str: A success message if the edit was successful, or an
|
|
1163
|
+
error message if the content wasn't found or an error occurred.
|
|
1164
|
+
"""
|
|
1165
|
+
try:
|
|
1166
|
+
working_path = self._resolve_filepath(file_path)
|
|
1167
|
+
|
|
1168
|
+
if not working_path.exists():
|
|
1169
|
+
return f"Error: File {working_path} does not exist"
|
|
1170
|
+
|
|
1171
|
+
# Create backup before editing if enabled
|
|
1172
|
+
self._create_backup(working_path)
|
|
1173
|
+
|
|
1174
|
+
# Read the file content
|
|
1175
|
+
try:
|
|
1176
|
+
file_text = working_path.read_text(
|
|
1177
|
+
encoding=self.default_encoding
|
|
1178
|
+
)
|
|
1179
|
+
except Exception as e:
|
|
1180
|
+
return f"Error reading file: {e}"
|
|
1181
|
+
|
|
1182
|
+
# Check if the old_content exists in the file
|
|
1183
|
+
if old_content not in file_text:
|
|
1184
|
+
return (
|
|
1185
|
+
f"No replacement performed: '{old_content}' not found in "
|
|
1186
|
+
f"{working_path}."
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
# Replace the content
|
|
1190
|
+
new_file_text = file_text.replace(old_content, new_content)
|
|
1191
|
+
|
|
1192
|
+
# Write back to file
|
|
1193
|
+
try:
|
|
1194
|
+
working_path.write_text(
|
|
1195
|
+
new_file_text, encoding=self.default_encoding
|
|
1196
|
+
)
|
|
1197
|
+
return f"Successfully edited {working_path}"
|
|
1198
|
+
except Exception as e:
|
|
1199
|
+
return f"Error writing file: {e}"
|
|
1200
|
+
|
|
1201
|
+
except Exception as e:
|
|
1202
|
+
return f"Error editing file: {e}"
|
|
1203
|
+
|
|
1204
|
+
def search_files(
|
|
1205
|
+
self,
|
|
1206
|
+
pattern: str,
|
|
1207
|
+
file_types: Optional[List[str]] = None,
|
|
1208
|
+
file_pattern: Optional[str] = None,
|
|
1209
|
+
path: Optional[str] = None,
|
|
1210
|
+
) -> str:
|
|
1211
|
+
r"""Search for a text pattern in files with specified extensions or
|
|
1212
|
+
file patterns.
|
|
1213
|
+
|
|
1214
|
+
This method searches for a text pattern (case-insensitive substring
|
|
1215
|
+
match) in files matching either the specified file types or a file
|
|
1216
|
+
pattern. It returns structured results showing which files contain
|
|
1217
|
+
the pattern, along with line numbers and matching content.
|
|
1218
|
+
|
|
1219
|
+
Args:
|
|
1220
|
+
pattern (str): The text pattern to search for (case-insensitive
|
|
1221
|
+
string match).
|
|
1222
|
+
file_types (Optional[List[str]]): List of file extensions to
|
|
1223
|
+
search (e.g., ["md", "txt", "py"]). Do not include the dot.
|
|
1224
|
+
If not provided and file_pattern is also not provided,
|
|
1225
|
+
defaults to ["md"] (markdown files). Ignored if file_pattern
|
|
1226
|
+
is provided. (default: :obj:`None`)
|
|
1227
|
+
file_pattern (Optional[str]): Glob pattern for matching files
|
|
1228
|
+
(e.g., "*_workflow.md", "test_*.py"). If provided, this
|
|
1229
|
+
overrides file_types. (default: :obj:`None`)
|
|
1230
|
+
path (Optional[str]): Directory to search in. If not provided,
|
|
1231
|
+
uses the working_directory. Can be relative or absolute.
|
|
1232
|
+
(default: :obj:`None`)
|
|
1233
|
+
|
|
1234
|
+
Returns:
|
|
1235
|
+
str: JSON-formatted string containing search results with the
|
|
1236
|
+
structure:
|
|
1237
|
+
{
|
|
1238
|
+
"pattern": "search_pattern",
|
|
1239
|
+
"searched_path": "/absolute/path",
|
|
1240
|
+
"file_types": ["md", "txt"],
|
|
1241
|
+
"file_pattern": "*_workflow.md",
|
|
1242
|
+
"matches": [
|
|
1243
|
+
{
|
|
1244
|
+
"file": "relative/path/to/file.md",
|
|
1245
|
+
"line": 42,
|
|
1246
|
+
"content": "matching line content"
|
|
1247
|
+
},
|
|
1248
|
+
...
|
|
1249
|
+
],
|
|
1250
|
+
"total_matches": 10,
|
|
1251
|
+
"files_searched": 5
|
|
1252
|
+
}
|
|
1253
|
+
If an error occurs, returns a JSON string with an "error" key.
|
|
1254
|
+
"""
|
|
1255
|
+
import json
|
|
1256
|
+
|
|
1257
|
+
try:
|
|
1258
|
+
# resolve search path
|
|
1259
|
+
if path:
|
|
1260
|
+
path_obj = Path(path)
|
|
1261
|
+
if not path_obj.is_absolute():
|
|
1262
|
+
search_path = (self.working_directory / path_obj).resolve()
|
|
1263
|
+
else:
|
|
1264
|
+
search_path = path_obj.resolve()
|
|
1265
|
+
else:
|
|
1266
|
+
search_path = self.working_directory
|
|
1267
|
+
|
|
1268
|
+
# validate that search path exists
|
|
1269
|
+
if not search_path.exists():
|
|
1270
|
+
return json.dumps(
|
|
1271
|
+
{"error": f"Search path does not exist: {search_path}"}
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
if not search_path.is_dir():
|
|
1275
|
+
return json.dumps(
|
|
1276
|
+
{"error": f"Search path is not a directory: {search_path}"}
|
|
1277
|
+
)
|
|
1278
|
+
|
|
1279
|
+
# collect all matching files
|
|
1280
|
+
matching_files: List[Path] = []
|
|
1281
|
+
|
|
1282
|
+
if file_pattern:
|
|
1283
|
+
# use file_pattern if provided (overrides file_types)
|
|
1284
|
+
pattern_glob = f"**/{file_pattern}"
|
|
1285
|
+
matching_files.extend(search_path.rglob(pattern_glob))
|
|
1286
|
+
else:
|
|
1287
|
+
# use file_types if file_pattern not provided
|
|
1288
|
+
if file_types is None:
|
|
1289
|
+
file_types = ["md"]
|
|
1290
|
+
|
|
1291
|
+
# normalize and deduplicate file types
|
|
1292
|
+
normalized_types = set()
|
|
1293
|
+
for file_type in file_types:
|
|
1294
|
+
file_type = file_type.lstrip('.')
|
|
1295
|
+
if file_type: # skip empty strings
|
|
1296
|
+
normalized_types.add(file_type)
|
|
1297
|
+
|
|
1298
|
+
for file_type in normalized_types:
|
|
1299
|
+
# use rglob for recursive search
|
|
1300
|
+
pattern_glob = f"**/*.{file_type}"
|
|
1301
|
+
matching_files.extend(search_path.rglob(pattern_glob))
|
|
1302
|
+
|
|
1303
|
+
# search through files (case-insensitive)
|
|
1304
|
+
matches = []
|
|
1305
|
+
files_searched = 0
|
|
1306
|
+
pattern_lower = pattern.lower()
|
|
1307
|
+
|
|
1308
|
+
for file_path in matching_files:
|
|
1309
|
+
files_searched += 1
|
|
1310
|
+
try:
|
|
1311
|
+
# read file content
|
|
1312
|
+
content = file_path.read_text(
|
|
1313
|
+
encoding=self.default_encoding
|
|
1314
|
+
)
|
|
1315
|
+
lines = content.splitlines()
|
|
1316
|
+
|
|
1317
|
+
# search each line for pattern (case-insensitive)
|
|
1318
|
+
for line_num, line in enumerate(lines, start=1):
|
|
1319
|
+
if pattern_lower in line.lower():
|
|
1320
|
+
# get relative path for cleaner output
|
|
1321
|
+
try:
|
|
1322
|
+
relative_path = file_path.relative_to(
|
|
1323
|
+
search_path
|
|
1324
|
+
)
|
|
1325
|
+
except ValueError:
|
|
1326
|
+
relative_path = file_path
|
|
1327
|
+
|
|
1328
|
+
matches.append(
|
|
1329
|
+
{
|
|
1330
|
+
"file": str(relative_path),
|
|
1331
|
+
"line": line_num,
|
|
1332
|
+
"content": line.strip(),
|
|
1333
|
+
}
|
|
1334
|
+
)
|
|
1335
|
+
|
|
1336
|
+
except (UnicodeDecodeError, PermissionError) as e:
|
|
1337
|
+
# skip files that can't be read
|
|
1338
|
+
logger.debug(f"Skipping file {file_path}: {e}")
|
|
1339
|
+
continue
|
|
1340
|
+
|
|
1341
|
+
# build result
|
|
1342
|
+
result = {
|
|
1343
|
+
"pattern": pattern,
|
|
1344
|
+
"searched_path": str(search_path),
|
|
1345
|
+
"matches": matches,
|
|
1346
|
+
"total_matches": len(matches),
|
|
1347
|
+
"files_searched": files_searched,
|
|
1348
|
+
}
|
|
1349
|
+
|
|
1350
|
+
# include file_pattern or file_types in result
|
|
1351
|
+
if file_pattern:
|
|
1352
|
+
result["file_pattern"] = file_pattern
|
|
1353
|
+
else:
|
|
1354
|
+
result["file_types"] = (
|
|
1355
|
+
sorted(normalized_types) if normalized_types else ["md"]
|
|
1356
|
+
)
|
|
1357
|
+
|
|
1358
|
+
logger.info(
|
|
1359
|
+
f"Search completed: found {len(matches)} matches "
|
|
1360
|
+
f"in {files_searched} files"
|
|
1361
|
+
)
|
|
1362
|
+
return json.dumps(result, indent=2)
|
|
1363
|
+
|
|
1364
|
+
except Exception as e:
|
|
1365
|
+
error_msg = f"Error during file search: {e}"
|
|
1366
|
+
logger.error(error_msg)
|
|
1367
|
+
return json.dumps({"error": error_msg})
|
|
1368
|
+
|
|
998
1369
|
def get_tools(self) -> List[FunctionTool]:
|
|
999
1370
|
r"""Return a list of FunctionTool objects representing the functions
|
|
1000
1371
|
in the toolkit.
|
|
@@ -1005,4 +1376,27 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
1005
1376
|
"""
|
|
1006
1377
|
return [
|
|
1007
1378
|
FunctionTool(self.write_to_file),
|
|
1379
|
+
FunctionTool(self.read_file),
|
|
1380
|
+
FunctionTool(self.edit_file),
|
|
1381
|
+
FunctionTool(self.search_files),
|
|
1008
1382
|
]
|
|
1383
|
+
|
|
1384
|
+
|
|
1385
|
+
# Backward compatibility: FileWriteToolkit as deprecated alias
|
|
1386
|
+
class FileWriteToolkit(FileToolkit):
|
|
1387
|
+
r"""Deprecated: Use FileToolkit instead.
|
|
1388
|
+
|
|
1389
|
+
This class is maintained for backward compatibility only.
|
|
1390
|
+
Please use FileToolkit for new code.
|
|
1391
|
+
"""
|
|
1392
|
+
|
|
1393
|
+
def __init__(self, *args, **kwargs):
|
|
1394
|
+
import warnings
|
|
1395
|
+
|
|
1396
|
+
warnings.warn(
|
|
1397
|
+
"FileWriteToolkit is deprecated and will be removed in a "
|
|
1398
|
+
"future version. Please use FileToolkit instead.",
|
|
1399
|
+
DeprecationWarning,
|
|
1400
|
+
stacklevel=2,
|
|
1401
|
+
)
|
|
1402
|
+
super().__init__(*args, **kwargs)
|
camel/toolkits/function_tool.py
CHANGED
|
@@ -156,7 +156,12 @@ def get_openai_tool_schema(func: Callable) -> Dict[str, Any]:
|
|
|
156
156
|
if (name := param.arg_name) in parameters_dict["properties"] and (
|
|
157
157
|
description := param.description
|
|
158
158
|
):
|
|
159
|
-
|
|
159
|
+
# OpenAI does not allow descriptions on properties that use $ref.
|
|
160
|
+
# To avoid schema errors, we only add the description if "$ref" is
|
|
161
|
+
# not present.
|
|
162
|
+
prop = parameters_dict["properties"][name]
|
|
163
|
+
if "$ref" not in prop:
|
|
164
|
+
prop["description"] = description
|
|
160
165
|
|
|
161
166
|
short_description = docstring.short_description or ""
|
|
162
167
|
long_description = docstring.long_description or ""
|
|
@@ -477,10 +482,15 @@ class FunctionTool:
|
|
|
477
482
|
result = self.func(*args, **kwargs)
|
|
478
483
|
return result
|
|
479
484
|
except Exception as e:
|
|
485
|
+
parts = []
|
|
486
|
+
if args:
|
|
487
|
+
parts.append(f"args={args}")
|
|
488
|
+
if kwargs:
|
|
489
|
+
parts.append(f"kwargs={kwargs}")
|
|
490
|
+
args_str = ", ".join(parts) if parts else "no arguments"
|
|
480
491
|
raise ValueError(
|
|
481
492
|
f"Execution of function {self.func.__name__} failed with "
|
|
482
|
-
f"
|
|
483
|
-
f"Error: {e}"
|
|
493
|
+
f"{args_str}. Error: {e}"
|
|
484
494
|
)
|
|
485
495
|
|
|
486
496
|
async def async_call(self, *args: Any, **kwargs: Any) -> Any:
|