camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +159 -38
- camel/configs/__init__.py +3 -0
- camel/configs/amd_config.py +70 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/memories/__init__.py +2 -1
- camel/memories/agent_memories.py +3 -1
- camel/memories/blocks/chat_history_block.py +17 -2
- camel/models/__init__.py +2 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +2 -0
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/workforce/single_agent_worker.py +44 -38
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/toolkits/__init__.py +14 -5
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +683 -0
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +19 -2
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +401 -80
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +18 -8
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/types/enums.py +11 -0
- camel/utils/commons.py +2 -0
- camel/utils/context_utils.py +395 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/METADATA +6 -3
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/RECORD +52 -41
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76a1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,395 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
19
|
+
|
|
20
|
+
from camel.logger import get_logger
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from camel.agents import ChatAgent
|
|
24
|
+
from camel.memories.records import MemoryRecord
|
|
25
|
+
|
|
26
|
+
logger = get_logger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ContextUtility:
|
|
30
|
+
r"""Utility class for context management and file operations.
|
|
31
|
+
|
|
32
|
+
This utility provides generic functionality for managing context files,
|
|
33
|
+
markdown generation, and session management that can be used by
|
|
34
|
+
context-related features.
|
|
35
|
+
|
|
36
|
+
Key features:
|
|
37
|
+
- Session-based directory management
|
|
38
|
+
- Generic markdown file operations
|
|
39
|
+
- Text-based search through files
|
|
40
|
+
- File metadata handling
|
|
41
|
+
- Agent memory record retrieval
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(self, working_directory: Optional[str] = None):
|
|
45
|
+
r"""Initialize the ContextUtility.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
working_directory (str, optional): The directory path where files
|
|
49
|
+
will be stored. If not provided, a default directory will be
|
|
50
|
+
used.
|
|
51
|
+
"""
|
|
52
|
+
self.working_directory_param = working_directory
|
|
53
|
+
self._setup_storage(working_directory)
|
|
54
|
+
|
|
55
|
+
def _setup_storage(self, working_directory: Optional[str]) -> None:
|
|
56
|
+
r"""Initialize session-specific storage paths and create directory
|
|
57
|
+
structure for context file management."""
|
|
58
|
+
self.session_id = self._generate_session_id()
|
|
59
|
+
|
|
60
|
+
if working_directory:
|
|
61
|
+
self.working_directory = Path(working_directory).resolve()
|
|
62
|
+
else:
|
|
63
|
+
camel_workdir = os.environ.get("CAMEL_WORKDIR")
|
|
64
|
+
if camel_workdir:
|
|
65
|
+
self.working_directory = Path(camel_workdir) / "context_files"
|
|
66
|
+
else:
|
|
67
|
+
self.working_directory = Path("context_files")
|
|
68
|
+
|
|
69
|
+
# Create session-specific directory
|
|
70
|
+
self.working_directory = self.working_directory / self.session_id
|
|
71
|
+
self.working_directory.mkdir(parents=True, exist_ok=True)
|
|
72
|
+
|
|
73
|
+
def _generate_session_id(self) -> str:
|
|
74
|
+
r"""Create timestamp-based unique identifier for isolating
|
|
75
|
+
current session files from other sessions."""
|
|
76
|
+
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
|
|
77
|
+
return f"session_{timestamp}"
|
|
78
|
+
|
|
79
|
+
# ========= GENERIC FILE MANAGEMENT METHODS =========
|
|
80
|
+
|
|
81
|
+
def _create_or_update_note(self, note_name: str, content: str) -> str:
|
|
82
|
+
r"""Write content to markdown file, creating new file or
|
|
83
|
+
overwriting existing one with UTF-8 encoding.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
note_name (str): Name of the note (without .md extension).
|
|
87
|
+
content (str): Content to write to the note.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
str: Success message.
|
|
91
|
+
"""
|
|
92
|
+
try:
|
|
93
|
+
file_path = self.working_directory / f"{note_name}.md"
|
|
94
|
+
with open(file_path, 'w', encoding='utf-8') as f:
|
|
95
|
+
f.write(content)
|
|
96
|
+
return f"Note '{note_name}.md' created successfully"
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.error(f"Error creating note {note_name}: {e}")
|
|
99
|
+
return f"Error creating note: {e}"
|
|
100
|
+
|
|
101
|
+
def save_markdown_file(
|
|
102
|
+
self,
|
|
103
|
+
filename: str,
|
|
104
|
+
content: str,
|
|
105
|
+
title: Optional[str] = None,
|
|
106
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
107
|
+
) -> str:
|
|
108
|
+
r"""Generic method to save any markdown content to a file.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
filename (str): Name without .md extension.
|
|
112
|
+
content (str): Main content to save.
|
|
113
|
+
title (str, optional): Title for the markdown file.
|
|
114
|
+
metadata (Dict, optional): Additional metadata to include.
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
str: Success message or error message.
|
|
118
|
+
"""
|
|
119
|
+
try:
|
|
120
|
+
markdown_content = ""
|
|
121
|
+
|
|
122
|
+
# Add title if provided
|
|
123
|
+
if title:
|
|
124
|
+
markdown_content += f"# {title}\n\n"
|
|
125
|
+
|
|
126
|
+
# Add metadata section if provided
|
|
127
|
+
if metadata:
|
|
128
|
+
markdown_content += "## Metadata\n\n"
|
|
129
|
+
for key, value in metadata.items():
|
|
130
|
+
markdown_content += f"- {key}: {value}\n"
|
|
131
|
+
markdown_content += "\n"
|
|
132
|
+
|
|
133
|
+
# Add main content
|
|
134
|
+
markdown_content += content
|
|
135
|
+
|
|
136
|
+
self._create_or_update_note(filename, markdown_content)
|
|
137
|
+
logger.info(
|
|
138
|
+
f"Markdown file saved to "
|
|
139
|
+
f"{self.working_directory / f'{filename}.md'}"
|
|
140
|
+
)
|
|
141
|
+
return f"Markdown file '{filename}.md' saved successfully"
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
logger.error(f"Error saving markdown file {filename}: {e}")
|
|
145
|
+
return f"Error saving markdown file: {e}"
|
|
146
|
+
|
|
147
|
+
def load_markdown_file(self, filename: str) -> str:
|
|
148
|
+
r"""Generic method to load any markdown file.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
filename (str): Name without .md extension.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
str: File content or empty string if not found.
|
|
155
|
+
"""
|
|
156
|
+
try:
|
|
157
|
+
file_path = self.working_directory / f"{filename}.md"
|
|
158
|
+
if file_path.exists():
|
|
159
|
+
return file_path.read_text(encoding="utf-8")
|
|
160
|
+
return ""
|
|
161
|
+
except Exception as e:
|
|
162
|
+
logger.error(f"Error loading markdown file {filename}: {e}")
|
|
163
|
+
return ""
|
|
164
|
+
|
|
165
|
+
def file_exists(self, filename: str) -> bool:
|
|
166
|
+
r"""Verify presence of markdown file in current session directory.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
filename (str): Name without .md extension.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
bool: True if file exists, False otherwise.
|
|
173
|
+
"""
|
|
174
|
+
file_path = self.working_directory / f"{filename}.md"
|
|
175
|
+
return file_path.exists()
|
|
176
|
+
|
|
177
|
+
def list_markdown_files(self) -> List[str]:
|
|
178
|
+
r"""Discover all markdown files in current session directory
|
|
179
|
+
and return their base names for reference.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
List[str]: List of filenames without .md extension.
|
|
183
|
+
"""
|
|
184
|
+
try:
|
|
185
|
+
md_files = list(self.working_directory.glob("*.md"))
|
|
186
|
+
return [f.stem for f in md_files]
|
|
187
|
+
except Exception as e:
|
|
188
|
+
logger.error(f"Error listing markdown files: {e}")
|
|
189
|
+
return []
|
|
190
|
+
|
|
191
|
+
# ========= GENERIC AGENT MEMORY METHODS =========
|
|
192
|
+
|
|
193
|
+
def get_agent_memory_records(
|
|
194
|
+
self, agent: "ChatAgent"
|
|
195
|
+
) -> List["MemoryRecord"]:
|
|
196
|
+
r"""Retrieve conversation history from agent's memory system.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
agent (ChatAgent): The agent to extract memory records from.
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List[MemoryRecord]: List of memory records from the agent.
|
|
203
|
+
"""
|
|
204
|
+
try:
|
|
205
|
+
context_records = agent.memory.retrieve()
|
|
206
|
+
return [cr.memory_record for cr in context_records]
|
|
207
|
+
except Exception as e:
|
|
208
|
+
logger.error(f"Error extracting memory records: {e}")
|
|
209
|
+
return []
|
|
210
|
+
|
|
211
|
+
def format_memory_as_conversation(
|
|
212
|
+
self, memory_records: List["MemoryRecord"]
|
|
213
|
+
) -> str:
|
|
214
|
+
r"""Transform structured memory records into human-readable
|
|
215
|
+
conversation format with role labels and message content.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
memory_records (List[MemoryRecord]): Memory records to format.
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
str: Formatted conversation text.
|
|
222
|
+
"""
|
|
223
|
+
conversation_lines = []
|
|
224
|
+
|
|
225
|
+
for record in memory_records:
|
|
226
|
+
role = (
|
|
227
|
+
record.role_at_backend.value
|
|
228
|
+
if hasattr(record.role_at_backend, 'value')
|
|
229
|
+
else str(record.role_at_backend)
|
|
230
|
+
)
|
|
231
|
+
content = record.message.content
|
|
232
|
+
conversation_lines.append(f"{role}: {content}")
|
|
233
|
+
|
|
234
|
+
return "\n".join(conversation_lines)
|
|
235
|
+
|
|
236
|
+
# ========= SESSION MANAGEMENT METHODS =========
|
|
237
|
+
|
|
238
|
+
def create_session_directory(
|
|
239
|
+
self, base_dir: Optional[str] = None, session_id: Optional[str] = None
|
|
240
|
+
) -> Path:
|
|
241
|
+
r"""Create a session-specific directory.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
base_dir (str, optional): Base directory. If None, uses current
|
|
245
|
+
working directory.
|
|
246
|
+
session_id (str, optional): Custom session ID. If None, generates
|
|
247
|
+
new one.
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
Path: The created session directory path.
|
|
251
|
+
"""
|
|
252
|
+
if session_id is None:
|
|
253
|
+
session_id = self._generate_session_id()
|
|
254
|
+
|
|
255
|
+
if base_dir:
|
|
256
|
+
base_path = Path(base_dir).resolve()
|
|
257
|
+
else:
|
|
258
|
+
base_path = self.working_directory.parent
|
|
259
|
+
|
|
260
|
+
session_dir = base_path / session_id
|
|
261
|
+
session_dir.mkdir(parents=True, exist_ok=True)
|
|
262
|
+
return session_dir
|
|
263
|
+
|
|
264
|
+
def get_session_metadata(self) -> Dict[str, Any]:
|
|
265
|
+
r"""Collect comprehensive session information including identifiers,
|
|
266
|
+
timestamps, and directory paths for tracking and reference.
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
Dict[str, Any]: Session metadata including ID, timestamp,
|
|
270
|
+
directory.
|
|
271
|
+
"""
|
|
272
|
+
return {
|
|
273
|
+
'session_id': self.session_id,
|
|
274
|
+
'working_directory': str(self.working_directory),
|
|
275
|
+
'created_at': datetime.now().isoformat(),
|
|
276
|
+
'base_directory': str(self.working_directory.parent),
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
def list_sessions(self, base_dir: Optional[str] = None) -> List[str]:
|
|
280
|
+
r"""Discover all available session directories for browsing
|
|
281
|
+
historical conversations and context files.
|
|
282
|
+
|
|
283
|
+
Args:
|
|
284
|
+
base_dir (str, optional): Base directory to search. If None, uses
|
|
285
|
+
parent of working directory.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
List[str]: List of session directory names.
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
if base_dir:
|
|
292
|
+
search_dir = Path(base_dir)
|
|
293
|
+
else:
|
|
294
|
+
search_dir = self.working_directory.parent
|
|
295
|
+
|
|
296
|
+
session_dirs = [
|
|
297
|
+
d.name
|
|
298
|
+
for d in search_dir.iterdir()
|
|
299
|
+
if d.is_dir() and d.name.startswith('session_')
|
|
300
|
+
]
|
|
301
|
+
return sorted(session_dirs)
|
|
302
|
+
except Exception as e:
|
|
303
|
+
logger.error(f"Error listing sessions: {e}")
|
|
304
|
+
return []
|
|
305
|
+
|
|
306
|
+
# ========= GENERIC SEARCH METHODS =========
|
|
307
|
+
|
|
308
|
+
def search_in_file(
|
|
309
|
+
self, file_path: Path, keywords: List[str], top_k: int = 4
|
|
310
|
+
) -> str:
|
|
311
|
+
r"""Perform keyword-based search through file sections,
|
|
312
|
+
ranking results by keyword frequency and returning top matches.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
file_path (Path): Path to the file to search.
|
|
316
|
+
keywords (List[str]): Keywords to search for.
|
|
317
|
+
top_k (int): Maximum number of results to return.
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
str: Formatted search results.
|
|
321
|
+
"""
|
|
322
|
+
results: List[Dict[str, Any]] = []
|
|
323
|
+
keyword_terms = [keyword.lower() for keyword in keywords]
|
|
324
|
+
|
|
325
|
+
try:
|
|
326
|
+
if not file_path.exists():
|
|
327
|
+
return ""
|
|
328
|
+
|
|
329
|
+
with open(file_path, 'r', encoding='utf-8') as f:
|
|
330
|
+
content = f.read()
|
|
331
|
+
|
|
332
|
+
# Split content into sections (assuming ### headers)
|
|
333
|
+
sections = content.split('### ')[1:] # Skip the header part
|
|
334
|
+
|
|
335
|
+
for i, section in enumerate(sections):
|
|
336
|
+
if not section.strip():
|
|
337
|
+
continue
|
|
338
|
+
|
|
339
|
+
section_lower = section.lower()
|
|
340
|
+
|
|
341
|
+
# count how many keywords appear in this section
|
|
342
|
+
keyword_matches = sum(
|
|
343
|
+
1 for keyword in keyword_terms if keyword in section_lower
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
if keyword_matches > 0:
|
|
347
|
+
results.append(
|
|
348
|
+
{
|
|
349
|
+
'content': f"### {section.strip()}",
|
|
350
|
+
'keyword_count': keyword_matches,
|
|
351
|
+
'section_num': i + 1,
|
|
352
|
+
}
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
logger.warning(f"Error reading file {file_path}: {e}")
|
|
357
|
+
return ""
|
|
358
|
+
|
|
359
|
+
# sort by keyword count and limit results
|
|
360
|
+
results.sort(key=lambda x: x['keyword_count'], reverse=True)
|
|
361
|
+
results = results[:top_k]
|
|
362
|
+
|
|
363
|
+
if not results:
|
|
364
|
+
return ""
|
|
365
|
+
|
|
366
|
+
# format results
|
|
367
|
+
formatted_sections = []
|
|
368
|
+
for result in results:
|
|
369
|
+
formatted_sections.append(
|
|
370
|
+
f"Section {result['section_num']} "
|
|
371
|
+
f"(keyword matches: {result['keyword_count']}):\n"
|
|
372
|
+
f"{result['content']}\n"
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
return "\n---\n".join(formatted_sections)
|
|
376
|
+
|
|
377
|
+
# ========= UTILITY METHODS =========
|
|
378
|
+
|
|
379
|
+
def get_working_directory(self) -> Path:
|
|
380
|
+
r"""Retrieve the session-specific directory path where
|
|
381
|
+
all context files are stored.
|
|
382
|
+
|
|
383
|
+
Returns:
|
|
384
|
+
Path: The working directory path.
|
|
385
|
+
"""
|
|
386
|
+
return self.working_directory
|
|
387
|
+
|
|
388
|
+
def get_session_id(self) -> str:
|
|
389
|
+
r"""Retrieve the unique identifier for the current session
|
|
390
|
+
used for file organization and tracking.
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
str: The session ID.
|
|
394
|
+
"""
|
|
395
|
+
return self.session_id
|
camel/utils/mcp.py
CHANGED
|
@@ -13,7 +13,121 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import functools
|
|
15
15
|
import inspect
|
|
16
|
-
|
|
16
|
+
import warnings
|
|
17
|
+
from typing import (
|
|
18
|
+
Any,
|
|
19
|
+
Callable,
|
|
20
|
+
List,
|
|
21
|
+
Optional,
|
|
22
|
+
Tuple,
|
|
23
|
+
Union,
|
|
24
|
+
get_args,
|
|
25
|
+
get_origin,
|
|
26
|
+
get_type_hints,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from pydantic import create_model
|
|
30
|
+
from pydantic.errors import PydanticSchemaGenerationError
|
|
31
|
+
|
|
32
|
+
from camel.logger import get_logger
|
|
33
|
+
|
|
34
|
+
logger = get_logger(__name__)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _is_pydantic_serializable(type_annotation: Any) -> Tuple[bool, str]:
|
|
38
|
+
r"""Check if a type annotation is Pydantic serializable.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
type_annotation: The type annotation to check
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
Tuple[bool, str]: (is_serializable, error_message)
|
|
45
|
+
"""
|
|
46
|
+
# Handle None type
|
|
47
|
+
if type_annotation is type(None) or type_annotation is None:
|
|
48
|
+
return True, ""
|
|
49
|
+
|
|
50
|
+
# Handle generic types (List, Dict, Optional, etc.)
|
|
51
|
+
origin = get_origin(type_annotation)
|
|
52
|
+
if origin is not None:
|
|
53
|
+
args = get_args(type_annotation)
|
|
54
|
+
|
|
55
|
+
# For Union types (including Optional), check all args
|
|
56
|
+
if origin is Union:
|
|
57
|
+
for arg in args:
|
|
58
|
+
is_serializable, error_msg = _is_pydantic_serializable(arg)
|
|
59
|
+
if not is_serializable:
|
|
60
|
+
return False, error_msg
|
|
61
|
+
return True, ""
|
|
62
|
+
|
|
63
|
+
# For List, Set, Tuple, etc., check the contained types
|
|
64
|
+
if origin in (list, set, tuple, frozenset):
|
|
65
|
+
for arg in args:
|
|
66
|
+
is_serializable, error_msg = _is_pydantic_serializable(arg)
|
|
67
|
+
if not is_serializable:
|
|
68
|
+
return False, error_msg
|
|
69
|
+
return True, ""
|
|
70
|
+
|
|
71
|
+
# For Dict, check both key and value types
|
|
72
|
+
if origin is dict:
|
|
73
|
+
for arg in args:
|
|
74
|
+
is_serializable, error_msg = _is_pydantic_serializable(arg)
|
|
75
|
+
if not is_serializable:
|
|
76
|
+
return False, error_msg
|
|
77
|
+
return True, ""
|
|
78
|
+
|
|
79
|
+
# Try to create a simple pydantic model with this type
|
|
80
|
+
try:
|
|
81
|
+
create_model("TestModel", test_field=(type_annotation, ...))
|
|
82
|
+
# If model creation succeeds, the type is serializable
|
|
83
|
+
return True, ""
|
|
84
|
+
except (PydanticSchemaGenerationError, TypeError, ValueError) as e:
|
|
85
|
+
error_msg = (
|
|
86
|
+
f"Type '{type_annotation}' is not Pydantic serializable. "
|
|
87
|
+
f"Consider using a custom serializable type or converting "
|
|
88
|
+
f"to bytes/base64. Error: {e!s}"
|
|
89
|
+
)
|
|
90
|
+
return False, error_msg
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _validate_function_types(func: Callable[..., Any]) -> List[str]:
|
|
94
|
+
r"""Validate function parameter and return types are Pydantic serializable.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
func (Callable[..., Any]): The function to validate.
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
List[str]: List of error messages for incompatible types.
|
|
101
|
+
"""
|
|
102
|
+
errors = []
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
type_hints = get_type_hints(func)
|
|
106
|
+
except (NameError, AttributeError) as e:
|
|
107
|
+
# If we can't get type hints, skip validation
|
|
108
|
+
logger.warning(f"Could not get type hints for {func.__name__}: {e}")
|
|
109
|
+
return []
|
|
110
|
+
|
|
111
|
+
# Check return type
|
|
112
|
+
return_type = type_hints.get('return', Any)
|
|
113
|
+
if return_type != Any:
|
|
114
|
+
is_serializable, error_msg = _is_pydantic_serializable(return_type)
|
|
115
|
+
if not is_serializable:
|
|
116
|
+
errors.append(f"Return type: {error_msg}")
|
|
117
|
+
|
|
118
|
+
# Check parameter types
|
|
119
|
+
sig = inspect.signature(func)
|
|
120
|
+
for param_name, _param in sig.parameters.items():
|
|
121
|
+
if param_name == 'self':
|
|
122
|
+
continue
|
|
123
|
+
|
|
124
|
+
param_type = type_hints.get(param_name, Any)
|
|
125
|
+
if param_type != Any:
|
|
126
|
+
is_serializable, error_msg = _is_pydantic_serializable(param_type)
|
|
127
|
+
if not is_serializable:
|
|
128
|
+
errors.append(f"Parameter '{param_name}': {error_msg}")
|
|
129
|
+
|
|
130
|
+
return errors
|
|
17
131
|
|
|
18
132
|
|
|
19
133
|
class MCPServer:
|
|
@@ -55,7 +169,7 @@ class MCPServer:
|
|
|
55
169
|
|
|
56
170
|
def __init__(
|
|
57
171
|
self,
|
|
58
|
-
function_names: Optional[
|
|
172
|
+
function_names: Optional[List[str]] = None,
|
|
59
173
|
server_name: Optional[str] = None,
|
|
60
174
|
):
|
|
61
175
|
self.function_names = function_names
|
|
@@ -135,6 +249,26 @@ class MCPServer:
|
|
|
135
249
|
f"Method {name} not found in class {cls.__name__} or "
|
|
136
250
|
"cannot be called."
|
|
137
251
|
)
|
|
252
|
+
|
|
253
|
+
# Validate function types for Pydantic compatibility
|
|
254
|
+
type_errors = _validate_function_types(func)
|
|
255
|
+
if type_errors:
|
|
256
|
+
error_message = (
|
|
257
|
+
f"Method '{name}' in class '{cls.__name__}' has "
|
|
258
|
+
f"non-Pydantic-serializable types:\n"
|
|
259
|
+
+ "\n".join(f" - {error}" for error in type_errors)
|
|
260
|
+
+ "\n\nSuggestions:"
|
|
261
|
+
+ "\n - Use standard Python types (str, int, float, bool, bytes)" # noqa: E501
|
|
262
|
+
+ "\n - Convert complex objects to JSON strings or bytes" # noqa: E501
|
|
263
|
+
+ "\n - Create custom Pydantic models for complex data" # noqa: E501
|
|
264
|
+
+ "\n - Use base64 encoding for binary data like images" # noqa: E501
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
# For now, issue a warning instead of raising an error
|
|
268
|
+
# This allows gradual migration while alerting developers
|
|
269
|
+
warnings.warn(error_message, UserWarning, stacklevel=3)
|
|
270
|
+
logger.warning(error_message)
|
|
271
|
+
|
|
138
272
|
wrapper = self.make_wrapper(func)
|
|
139
273
|
instance.mcp.tool(name=name)(wrapper)
|
|
140
274
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.76a1
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Project-URL: Homepage, https://www.camel-ai.org/
|
|
6
6
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
@@ -39,7 +39,7 @@ Requires-Dist: dappier<0.4,>=0.3.3; extra == 'all'
|
|
|
39
39
|
Requires-Dist: datacommons-pandas<0.0.4,>=0.0.3; extra == 'all'
|
|
40
40
|
Requires-Dist: datacommons<2,>=1.4.3; extra == 'all'
|
|
41
41
|
Requires-Dist: datasets<4,>=3; extra == 'all'
|
|
42
|
-
Requires-Dist: daytona-sdk
|
|
42
|
+
Requires-Dist: daytona-sdk>=0.20.0; extra == 'all'
|
|
43
43
|
Requires-Dist: diffusers<0.26,>=0.25.0; extra == 'all'
|
|
44
44
|
Requires-Dist: discord-py<3,>=2.3.2; extra == 'all'
|
|
45
45
|
Requires-Dist: docker<8,>=7.1.0; extra == 'all'
|
|
@@ -57,6 +57,7 @@ Requires-Dist: flask>=2.0; extra == 'all'
|
|
|
57
57
|
Requires-Dist: google-api-python-client==2.166.0; extra == 'all'
|
|
58
58
|
Requires-Dist: google-auth-httplib2==0.2.0; extra == 'all'
|
|
59
59
|
Requires-Dist: google-auth-oauthlib==1.2.1; extra == 'all'
|
|
60
|
+
Requires-Dist: google-cloud-aiplatform>=1.111.0; extra == 'all'
|
|
60
61
|
Requires-Dist: google-cloud-storage<3,>=2.18.0; extra == 'all'
|
|
61
62
|
Requires-Dist: google-genai>=1.13.0; extra == 'all'
|
|
62
63
|
Requires-Dist: googlemaps<5,>=4.10.0; extra == 'all'
|
|
@@ -73,6 +74,7 @@ Requires-Dist: markitdown>=0.1.1; extra == 'all'
|
|
|
73
74
|
Requires-Dist: math-verify<0.8,>=0.7.0; extra == 'all'
|
|
74
75
|
Requires-Dist: mcp>=1.3.0; extra == 'all'
|
|
75
76
|
Requires-Dist: mem0ai>=0.1.67; extra == 'all'
|
|
77
|
+
Requires-Dist: microsandbox>=0.1.8; extra == 'all'
|
|
76
78
|
Requires-Dist: mistralai<2,>=1.1.0; extra == 'all'
|
|
77
79
|
Requires-Dist: mock<6,>=5; extra == 'all'
|
|
78
80
|
Requires-Dist: mypy<2,>=1.5.1; extra == 'all'
|
|
@@ -186,13 +188,14 @@ Requires-Dist: uv<0.8,>=0.7.0; extra == 'dev'
|
|
|
186
188
|
Provides-Extra: dev-tools
|
|
187
189
|
Requires-Dist: aci-sdk>=1.0.0b1; extra == 'dev-tools'
|
|
188
190
|
Requires-Dist: agentops<0.4,>=0.3.21; extra == 'dev-tools'
|
|
189
|
-
Requires-Dist: daytona-sdk
|
|
191
|
+
Requires-Dist: daytona-sdk>=0.20.0; extra == 'dev-tools'
|
|
190
192
|
Requires-Dist: docker<8,>=7.1.0; extra == 'dev-tools'
|
|
191
193
|
Requires-Dist: e2b-code-interpreter<2,>=1.0.3; extra == 'dev-tools'
|
|
192
194
|
Requires-Dist: ipykernel<7,>=6.0.0; extra == 'dev-tools'
|
|
193
195
|
Requires-Dist: jupyter-client<9,>=8.6.2; extra == 'dev-tools'
|
|
194
196
|
Requires-Dist: langfuse>=2.60.5; extra == 'dev-tools'
|
|
195
197
|
Requires-Dist: mcp>=1.3.0; extra == 'dev-tools'
|
|
198
|
+
Requires-Dist: microsandbox>=0.1.8; extra == 'dev-tools'
|
|
196
199
|
Requires-Dist: tree-sitter-python<0.24,>=0.23.6; extra == 'dev-tools'
|
|
197
200
|
Requires-Dist: tree-sitter<0.24,>=0.23.2; extra == 'dev-tools'
|
|
198
201
|
Requires-Dist: typer>=0.15.2; extra == 'dev-tools'
|