camel-ai 0.2.27__py3-none-any.whl → 0.2.28__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.27'
17
+ __version__ = '0.2.28'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -263,7 +263,8 @@ class AzureOpenAIModel(BaseModelBackend):
263
263
  @property
264
264
  def stream(self) -> bool:
265
265
  r"""Returns whether the model is in stream mode,
266
- which sends partial results each time.
266
+ which sends partial results each time.
267
+
267
268
  Returns:
268
269
  bool: Whether the model is in stream mode.
269
270
  """
@@ -350,8 +350,9 @@ def _wait_for_server(base_url: str, timeout: Optional[int] = 30) -> None:
350
350
  r"""Wait for the server to be ready by polling the /v1/models endpoint.
351
351
 
352
352
  Args:
353
- base_url: The base URL of the server
354
- timeout: Maximum time to wait in seconds. Default is 30 seconds.
353
+ base_url (str): The base URL of the server
354
+ timeout (Optional[int]): Maximum time to wait in seconds.
355
+ (default: :obj:`30`)
355
356
  """
356
357
  import requests
357
358
 
@@ -139,6 +139,10 @@ class VLLMModel(BaseModelBackend):
139
139
  Args:
140
140
  messages (List[OpenAIMessage]): Message list with the chat history
141
141
  in OpenAI API format.
142
+ response_format (Optional[Type[BaseModel]], optional): The format
143
+ to return the response in.
144
+ tools (Optional[List[Dict[str, Any]]], optional): List of tools
145
+ the model may call.
142
146
 
143
147
  Returns:
144
148
  Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
@@ -146,10 +150,16 @@ class VLLMModel(BaseModelBackend):
146
150
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
147
151
  """
148
152
 
153
+ kwargs = self.model_config_dict.copy()
154
+ if tools:
155
+ kwargs["tools"] = tools
156
+ if response_format:
157
+ kwargs["response_format"] = {"type": "json_object"}
158
+
149
159
  response = await self._async_client.chat.completions.create(
150
160
  messages=messages,
151
161
  model=self.model_type,
152
- **self.model_config_dict,
162
+ **kwargs,
153
163
  )
154
164
  return response
155
165
 
@@ -164,6 +174,10 @@ class VLLMModel(BaseModelBackend):
164
174
  Args:
165
175
  messages (List[OpenAIMessage]): Message list with the chat history
166
176
  in OpenAI API format.
177
+ response_format (Optional[Type[BaseModel]], optional): The format
178
+ to return the response in.
179
+ tools (Optional[List[Dict[str, Any]]], optional): List of tools
180
+ the model may call.
167
181
 
168
182
  Returns:
169
183
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
@@ -171,10 +185,16 @@ class VLLMModel(BaseModelBackend):
171
185
  `Stream[ChatCompletionChunk]` in the stream mode.
172
186
  """
173
187
 
188
+ kwargs = self.model_config_dict.copy()
189
+ if tools:
190
+ kwargs["tools"] = tools
191
+ if response_format:
192
+ kwargs["response_format"] = {"type": "json_object"}
193
+
174
194
  response = self._client.chat.completions.create(
175
195
  messages=messages,
176
196
  model=self.model_type,
177
- **self.model_config_dict,
197
+ **kwargs,
178
198
  )
179
199
  return response
180
200
 
@@ -343,14 +343,14 @@ class Neo4jGraph(BaseGraphStorage):
343
343
  self, subj: str, obj: str, rel: str, timestamp: Optional[str] = None
344
344
  ) -> None:
345
345
  r"""Adds a relationship (triplet) between two entities
346
- in the database with a timestamp.
346
+ in the database with a timestamp.
347
347
 
348
348
  Args:
349
349
  subj (str): The identifier for the subject entity.
350
350
  obj (str): The identifier for the object entity.
351
351
  rel (str): The relationship between the subject and object.
352
352
  timestamp (Optional[str]): The timestamp of the relationship.
353
- Defaults to None.
353
+ Defaults to None.
354
354
  """
355
355
  query = """
356
356
  MERGE (n1:`%s` {id:$subj})
@@ -737,21 +737,23 @@ class Neo4jGraph(BaseGraphStorage):
737
737
  obj: Optional[str] = None,
738
738
  rel: Optional[str] = None,
739
739
  ) -> List[Dict[str, Any]]:
740
- r"""
741
- Query triplet information. If subj, obj, or rel is
740
+ r"""Query triplet information. If subj, obj, or rel is
742
741
  not specified, returns all matching triplets.
743
742
 
744
743
  Args:
745
744
  subj (Optional[str]): The ID of the subject node.
746
- If None, matches any subject node.
745
+ If None, matches any subject node.
746
+ (default: :obj:`None`)
747
747
  obj (Optional[str]): The ID of the object node.
748
- If None, matches any object node.
748
+ If None, matches any object node.
749
+ (default: :obj:`None`)
749
750
  rel (Optional[str]): The type of relationship.
750
- If None, matches any relationship type.
751
+ If None, matches any relationship type.
752
+ (default: :obj:`None`)
751
753
 
752
754
  Returns:
753
755
  List[Dict[str, Any]]: A list of matching triplets,
754
- each containing subj, obj, rel, and timestamp.
756
+ each containing subj, obj, rel, and timestamp.
755
757
  """
756
758
  import logging
757
759
 
@@ -98,6 +98,7 @@ __all__ = [
98
98
  'SymPyToolkit',
99
99
  'MinerUToolkit',
100
100
  'MCPToolkit',
101
+ 'MCPToolkitManager',
101
102
  'AudioAnalysisToolkit',
102
103
  'ExcelToolkit',
103
104
  'VideoAnalysisToolkit',
@@ -12,10 +12,13 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import inspect
15
+ import json
16
+ import os
15
17
  from contextlib import AsyncExitStack, asynccontextmanager
16
18
  from typing import (
17
19
  TYPE_CHECKING,
18
20
  Any,
21
+ AsyncGenerator,
19
22
  Callable,
20
23
  Dict,
21
24
  List,
@@ -28,13 +31,16 @@ from urllib.parse import urlparse
28
31
  if TYPE_CHECKING:
29
32
  from mcp import ListToolsResult, Tool
30
33
 
34
+ from camel.logger import get_logger
31
35
  from camel.toolkits import BaseToolkit, FunctionTool
32
36
 
37
+ logger = get_logger(__name__)
33
38
 
34
- class MCPToolkit(BaseToolkit):
35
- r"""MCPToolkit provides an abstraction layer to interact with external
36
- tools using the Model Context Protocol (MCP). It supports two modes of
37
- connection:
39
+
40
+ class _MCPServer(BaseToolkit):
41
+ r"""Internal class that provides an abstraction layer to interact with
42
+ external tools using the Model Context Protocol (MCP). It supports two
43
+ modes of connection:
38
44
 
39
45
  1. stdio mode: Connects via standard input/output streams for local
40
46
  command-line interactions.
@@ -73,20 +79,20 @@ class MCPToolkit(BaseToolkit):
73
79
  self._exit_stack = AsyncExitStack()
74
80
  self._is_connected = False
75
81
 
76
- @asynccontextmanager
77
- async def connection(self):
78
- r"""Async context manager for establishing and managing the connection
79
- with the MCP server. Automatically selects SSE or stdio mode based
80
- on the provided `command_or_url`.
82
+ async def connect(self):
83
+ r"""Explicitly connect to the MCP server.
81
84
 
82
- Yields:
83
- MCPToolkit: Instance with active connection ready for tool
84
- interaction.
85
+ Returns:
86
+ _MCPServer: The connected server instance
85
87
  """
86
88
  from mcp.client.session import ClientSession
87
89
  from mcp.client.sse import sse_client
88
90
  from mcp.client.stdio import StdioServerParameters, stdio_client
89
91
 
92
+ if self._is_connected:
93
+ logger.warning("Server is already connected")
94
+ return self
95
+
90
96
  try:
91
97
  if urlparse(self.command_or_url).scheme in ("http", "https"):
92
98
  (
@@ -113,12 +119,33 @@ class MCPToolkit(BaseToolkit):
113
119
  list_tools_result = await self.list_mcp_tools()
114
120
  self._mcp_tools = list_tools_result.tools
115
121
  self._is_connected = True
116
- yield self
122
+ return self
123
+ except Exception as e:
124
+ # Ensure resources are cleaned up on connection failure
125
+ await self.disconnect()
126
+ logger.error(f"Failed to connect to MCP server: {e}")
117
127
 
128
+ async def disconnect(self):
129
+ r"""Explicitly disconnect from the MCP server."""
130
+ self._is_connected = False
131
+ await self._exit_stack.aclose()
132
+ self._session = None
133
+
134
+ @asynccontextmanager
135
+ async def connection(self):
136
+ r"""Async context manager for establishing and managing the connection
137
+ with the MCP server. Automatically selects SSE or stdio mode based
138
+ on the provided `command_or_url`.
139
+
140
+ Yields:
141
+ _MCPServer: Instance with active connection ready for tool
142
+ interaction.
143
+ """
144
+ try:
145
+ await self.connect()
146
+ yield self
118
147
  finally:
119
- self._is_connected = False
120
- await self._exit_stack.aclose()
121
- self._session = None
148
+ await self.disconnect()
122
149
 
123
150
  async def list_mcp_tools(self) -> Union[str, "ListToolsResult"]:
124
151
  r"""Retrieves the list of available tools from the connected MCP
@@ -188,34 +215,53 @@ class MCPToolkit(BaseToolkit):
188
215
  kwargs.keys()
189
216
  )
190
217
  if missing_params:
191
- raise ValueError(
218
+ logger.warning(
192
219
  f"Missing required parameters: {missing_params}"
193
220
  )
221
+ return "Missing required parameters."
194
222
 
195
- result: CallToolResult = await self._session.call_tool(
196
- func_name, kwargs
197
- )
223
+ if not self._session:
224
+ logger.error(
225
+ "MCP Client is not connected. Call `connection()` first."
226
+ )
227
+ return (
228
+ "MCP Client is not connected. Call `connection()` first."
229
+ )
198
230
 
199
- if not result.content:
231
+ try:
232
+ result: CallToolResult = await self._session.call_tool(
233
+ func_name, kwargs
234
+ )
235
+ except Exception as e:
236
+ logger.error(f"Failed to call MCP tool '{func_name}': {e!s}")
237
+ return f"Failed to call MCP tool '{func_name}': {e!s}"
238
+
239
+ if not result.content or len(result.content) == 0:
200
240
  return "No data available for this request."
201
241
 
202
242
  # Handle different content types
203
- content = result.content[0]
204
- if content.type == "text":
205
- return content.text
206
- elif content.type == "image":
207
- # Return image URL or data URI if available
208
- if hasattr(content, "url") and content.url:
209
- return f"Image available at: {content.url}"
210
- return "Image content received (data URI not shown)"
211
- elif content.type == "embedded_resource":
212
- # Return resource information if available
213
- if hasattr(content, "name") and content.name:
214
- return f"Embedded resource: {content.name}"
215
- return "Embedded resource received"
216
- else:
217
- msg = f"Received content of type '{content.type}'"
218
- return f"{msg} which is not fully supported yet."
243
+ try:
244
+ content = result.content[0]
245
+ if content.type == "text":
246
+ return content.text
247
+ elif content.type == "image":
248
+ # Return image URL or data URI if available
249
+ if hasattr(content, "url") and content.url:
250
+ return f"Image available at: {content.url}"
251
+ return "Image content received (data URI not shown)"
252
+ elif content.type == "embedded_resource":
253
+ # Return resource information if available
254
+ if hasattr(content, "name") and content.name:
255
+ return f"Embedded resource: {content.name}"
256
+ return "Embedded resource received"
257
+ else:
258
+ msg = f"Received content of type '{content.type}'"
259
+ return f"{msg} which is not fully supported yet."
260
+ except (IndexError, AttributeError) as e:
261
+ logger.error(
262
+ f"Error processing content from MCP tool response: {e!s}"
263
+ )
264
+ return "Error processing content from MCP tool response"
219
265
 
220
266
  dynamic_function.__name__ = func_name
221
267
  dynamic_function.__doc__ = func_desc
@@ -236,6 +282,27 @@ class MCPToolkit(BaseToolkit):
236
282
 
237
283
  return dynamic_function
238
284
 
285
+ def _build_tool_schema(self, mcp_tool: "Tool") -> Dict[str, Any]:
286
+ input_schema = mcp_tool.inputSchema
287
+ properties = input_schema.get("properties", {})
288
+ required = input_schema.get("required", [])
289
+
290
+ parameters = {
291
+ "type": "object",
292
+ "properties": properties,
293
+ "required": required,
294
+ }
295
+
296
+ return {
297
+ "type": "function",
298
+ "function": {
299
+ "name": mcp_tool.name,
300
+ "description": mcp_tool.description
301
+ or "No description provided.",
302
+ "parameters": parameters,
303
+ },
304
+ }
305
+
239
306
  def get_tools(self) -> List[FunctionTool]:
240
307
  r"""Returns a list of FunctionTool objects representing the
241
308
  functions in the toolkit. Each function is dynamically generated
@@ -246,6 +313,197 @@ class MCPToolkit(BaseToolkit):
246
313
  representing the functions in the toolkit.
247
314
  """
248
315
  return [
249
- FunctionTool(self.generate_function_from_mcp_tool(mcp_tool))
316
+ FunctionTool(
317
+ self.generate_function_from_mcp_tool(mcp_tool),
318
+ openai_tool_schema=self._build_tool_schema(mcp_tool),
319
+ )
250
320
  for mcp_tool in self._mcp_tools
251
321
  ]
322
+
323
+
324
+ class MCPToolkit(BaseToolkit):
325
+ r"""MCPToolkit provides a unified interface for managing multiple
326
+ MCP server connections and their tools.
327
+
328
+ This class handles the lifecycle of multiple MCP server connections and
329
+ offers a centralized configuration mechanism for both local and remote
330
+ MCP services.
331
+
332
+ Args:
333
+ servers (Optional[List[_MCPServer]]): List of _MCPServer
334
+ instances to manage.
335
+ config_path (Optional[str]): Path to a JSON configuration file
336
+ defining MCP servers.
337
+
338
+ Note:
339
+ Either `servers` or `config_path` must be provided. If both are
340
+ provided, servers from both sources will be combined.
341
+
342
+ Attributes:
343
+ servers (List[_MCPServer]): List of _MCPServer instances being managed.
344
+ """
345
+
346
+ def __init__(
347
+ self,
348
+ servers: Optional[List[_MCPServer]] = None,
349
+ config_path: Optional[str] = None,
350
+ ):
351
+ super().__init__()
352
+
353
+ if servers and config_path:
354
+ logger.warning(
355
+ "Both servers and config_path are provided. "
356
+ "Servers from both sources will be combined."
357
+ )
358
+
359
+ self.servers = servers or []
360
+
361
+ if config_path:
362
+ self.servers.extend(self._load_servers_from_config(config_path))
363
+
364
+ self._exit_stack = AsyncExitStack()
365
+ self._connected = False
366
+
367
+ def _load_servers_from_config(self, config_path: str) -> List[_MCPServer]:
368
+ r"""Loads MCP server configurations from a JSON file.
369
+
370
+ Args:
371
+ config_path (str): Path to the JSON configuration file.
372
+
373
+ Returns:
374
+ List[_MCPServer]: List of configured _MCPServer instances.
375
+ """
376
+ try:
377
+ with open(config_path, "r", encoding="utf-8") as f:
378
+ try:
379
+ data = json.load(f)
380
+ except json.JSONDecodeError as e:
381
+ logger.warning(
382
+ f"Invalid JSON in config file '{config_path}': {e!s}"
383
+ )
384
+ return []
385
+ except FileNotFoundError:
386
+ logger.warning(f"Config file not found: '{config_path}'")
387
+ return []
388
+
389
+ all_servers = []
390
+
391
+ # Process local MCP servers
392
+ mcp_servers = data.get("mcpServers", {})
393
+ if not isinstance(mcp_servers, dict):
394
+ logger.warning("'mcpServers' is not a dictionary, skipping...")
395
+ mcp_servers = {}
396
+
397
+ for name, cfg in mcp_servers.items():
398
+ if not isinstance(cfg, dict):
399
+ logger.warning(
400
+ f"Configuration for server '{name}' must be a dictionary"
401
+ )
402
+ continue
403
+
404
+ if "command" not in cfg:
405
+ logger.warning(
406
+ f"Missing required 'command' field for server '{name}'"
407
+ )
408
+ continue
409
+
410
+ server = _MCPServer(
411
+ command_or_url=cfg["command"],
412
+ args=cfg.get("args", []),
413
+ env={**os.environ, **cfg.get("env", {})},
414
+ timeout=cfg.get("timeout", None),
415
+ )
416
+ all_servers.append(server)
417
+
418
+ # Process remote MCP web servers
419
+ mcp_web_servers = data.get("mcpWebServers", {})
420
+ if not isinstance(mcp_web_servers, dict):
421
+ logger.warning("'mcpWebServers' is not a dictionary, skipping...")
422
+ mcp_web_servers = {}
423
+
424
+ for name, cfg in mcp_web_servers.items():
425
+ if not isinstance(cfg, dict):
426
+ logger.warning(
427
+ f"Configuration for web server '{name}' must"
428
+ "be a dictionary"
429
+ )
430
+ continue
431
+
432
+ if "url" not in cfg:
433
+ logger.warning(
434
+ f"Missing required 'url' field for web server '{name}'"
435
+ )
436
+ continue
437
+
438
+ server = _MCPServer(
439
+ command_or_url=cfg["url"],
440
+ timeout=cfg.get("timeout", None),
441
+ )
442
+ all_servers.append(server)
443
+
444
+ return all_servers
445
+
446
+ async def connect(self):
447
+ r"""Explicitly connect to all MCP servers.
448
+
449
+ Returns:
450
+ MCPToolkit: The connected toolkit instance
451
+ """
452
+ if self._connected:
453
+ logger.warning("MCPToolkit is already connected")
454
+ return self
455
+
456
+ self._exit_stack = AsyncExitStack()
457
+ try:
458
+ # Sequentially connect to each server
459
+ for server in self.servers:
460
+ await server.connect()
461
+ self._connected = True
462
+ return self
463
+ except Exception as e:
464
+ # Ensure resources are cleaned up on connection failure
465
+ await self.disconnect()
466
+ logger.error(f"Failed to connect to one or more MCP servers: {e}")
467
+
468
+ async def disconnect(self):
469
+ r"""Explicitly disconnect from all MCP servers."""
470
+ if not self._connected:
471
+ return
472
+
473
+ for server in self.servers:
474
+ await server.disconnect()
475
+ self._connected = False
476
+ await self._exit_stack.aclose()
477
+
478
+ @asynccontextmanager
479
+ async def connection(self) -> AsyncGenerator["MCPToolkit", None]:
480
+ r"""Async context manager that simultaneously establishes connections
481
+ to all managed MCP server instances.
482
+
483
+ Yields:
484
+ MCPToolkit: Self with all servers connected.
485
+ """
486
+ try:
487
+ await self.connect()
488
+ yield self
489
+ finally:
490
+ await self.disconnect()
491
+
492
+ def is_connected(self) -> bool:
493
+ r"""Checks if all the managed servers are connected.
494
+
495
+ Returns:
496
+ bool: True if connected, False otherwise.
497
+ """
498
+ return self._connected
499
+
500
+ def get_tools(self) -> List[FunctionTool]:
501
+ r"""Aggregates all tools from the managed MCP server instances.
502
+
503
+ Returns:
504
+ List[FunctionTool]: Combined list of all available function tools.
505
+ """
506
+ all_tools = []
507
+ for server in self.servers:
508
+ all_tools.extend(server.get_tools())
509
+ return all_tools
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: camel-ai
3
- Version: 0.2.27
3
+ Version: 0.2.28
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Project-URL: Homepage, https://www.camel-ai.org/
6
6
  Project-URL: Repository, https://github.com/camel-ai/camel
@@ -1,4 +1,4 @@
1
- camel/__init__.py,sha256=6XPf3TQFxgCHXxPaRb-Lda_NJ-gHT1IsrHIeYeMzGGA,912
1
+ camel/__init__.py,sha256=cKkedYUg0x2fhS1OE-zLWTVrZa2yHjvkSYZ6n0mamWA,912
2
2
  camel/generators.py,sha256=JRqj9_m1PF4qT6UtybzTQ-KBT9MJQt18OAAYvQ_fr2o,13844
3
3
  camel/human.py,sha256=9X09UmxI2JqQnhrFfnZ3B9EzFmVfdSWQcjLWTIXKXe0,4962
4
4
  camel/logger.py,sha256=j6mPsLJyKOn16o6Um57882mHsURQ8h-jia6Jd_34wRA,4239
@@ -138,7 +138,7 @@ camel/models/__init__.py,sha256=iNZ1LcesJ3YaAbNRxHTA-sUaxfjn6gUQ-EQArll21KQ,2769
138
138
  camel/models/_utils.py,sha256=hob1ehnS5xZitMCdYToHVgaTB55JnaP4_DSWnTEfVsg,2045
139
139
  camel/models/aiml_model.py,sha256=4FW66DxmVMPWAJckh4UjMM6eD1QNyrAPAPtrpmWxzjc,6524
140
140
  camel/models/anthropic_model.py,sha256=8XAj9sVaN1X0hvrL9a-qsmkAFWoGe1Ozj5XZsXYe1UI,5894
141
- camel/models/azure_openai_model.py,sha256=AblW2scYp12_odI1GG0ATHI8-Tn7d6SCsxHe7g66rWs,10386
141
+ camel/models/azure_openai_model.py,sha256=l8hqhRQyxtkPR9mUq6fzbeL3ARjGxE8aht_GhiWC-r4,10383
142
142
  camel/models/base_audio_model.py,sha256=QkLqh0v-5kcE_jwFB5xAgvztAqB2Bot4_iG9sZdcl8A,2986
143
143
  camel/models/base_model.py,sha256=RolL8fRwVpfz8g9lpb_71h0mYTNl-U63f8KBy6hc3E0,10679
144
144
  camel/models/cohere_model.py,sha256=RAYHCyppDQxQ7BOR-e314AagB09vRxoScoHc-FtL6Bc,13355
@@ -161,11 +161,11 @@ camel/models/openai_model.py,sha256=CbfD9yVtAltyqdFpjnLXncFnmaGPDZq8JhJDaSfG0pc,
161
161
  camel/models/qwen_model.py,sha256=_LeeB0yrXRMI-gZOEEOHg0bWNOJpuQHf2G7u40--3r8,7064
162
162
  camel/models/reka_model.py,sha256=15DscZf3lbqsIzm6kzjzDrhblBt1_0xlphT4isuQMu0,10146
163
163
  camel/models/samba_model.py,sha256=i2k1qEbMMMhYDY5MWG3dqyy2oeDi43o35JZXLvDmvI0,22476
164
- camel/models/sglang_model.py,sha256=LsuilXBPCs48pVRW862T3QCiOVcFACBqiFxbWPiHbeg,13750
164
+ camel/models/sglang_model.py,sha256=lAxWpArP74lQ3E2IG-tEtBbs5ZRQPwKMfThvWSq1nFg,13782
165
165
  camel/models/siliconflow_model.py,sha256=c5vk4zAhZVf8pDF1uh-iSa_v8d0QoPLuIN27EemdMGE,5659
166
166
  camel/models/stub_model.py,sha256=dygYoxemnWWaxEX21L8QyKe-c75ti2CK9HnTuyHL5vs,5160
167
167
  camel/models/togetherai_model.py,sha256=-YwZV1S1bkrX8jGguQI5dbtIHVuqhv96MoAcl33ptPo,6657
168
- camel/models/vllm_model.py,sha256=dzH4rYr2Se7cejk2hobblaW-s483uxPxb8976RQE8x0,6884
168
+ camel/models/vllm_model.py,sha256=xhZ2NbPCtgDCxu4QpJ2-Q8klqsqOKfJvFEatmZDwdQY,7706
169
169
  camel/models/volcano_model.py,sha256=inYDiKOfGvq8o3XW4KVQIrXiZOhXQfB4HfCHGCWHPKs,3792
170
170
  camel/models/yi_model.py,sha256=V4sc9n8MAKVfjGO-NU0I8W4lGKdORSCbMV020SHT3R0,6180
171
171
  camel/models/zhipuai_model.py,sha256=o3uoTY30p1yUIklvoRMyr8JX39xZ5mLVKSTtUknW8nE,6517
@@ -234,7 +234,7 @@ camel/storages/graph_storages/__init__.py,sha256=G29BNn651C0WTOpjCl4QnVM-4B9tcNh
234
234
  camel/storages/graph_storages/base.py,sha256=uSe9jWuLudfm5jtfo6E-L_kNzITwK1_Ef-6L4HWw-JM,2852
235
235
  camel/storages/graph_storages/graph_element.py,sha256=X_2orbQOMaQd00xxzAoJLfEcrVNE1mgCqMJv0orMAKA,2733
236
236
  camel/storages/graph_storages/nebula_graph.py,sha256=iLcHrIgd5U59GXlcLtLBAI8vNFpqHHLHHFmHTceVVLc,22816
237
- camel/storages/graph_storages/neo4j_graph.py,sha256=FBOH19VvEU3vXcM1Kuel88hVb2v1K_AKAZob4NmG9m0,30713
237
+ camel/storages/graph_storages/neo4j_graph.py,sha256=Ng7fLCUrWhdFAd4d6UEpuAB6B6QgxbHmv8d8XDNOVJc,30773
238
238
  camel/storages/key_value_storages/__init__.py,sha256=le_hl7MYoQvaiYaJHwomy8c0cvTemicZbmwxgCJUpOs,962
239
239
  camel/storages/key_value_storages/base.py,sha256=FSfxeLuG7SPvn-Mg-OQxtRKPtQBnRkB7lYeDaFOefpk,2177
240
240
  camel/storages/key_value_storages/in_memory.py,sha256=k04Nx53lYxD5MoqDtBEgZrQYkAQ-zIuU6tqnoNqiHws,1949
@@ -256,7 +256,7 @@ camel/terminators/__init__.py,sha256=t8uqrkUnXEOYMXQDgaBkMFJ0EXFKI0kmx4cUimli3Ls
256
256
  camel/terminators/base.py,sha256=xmJzERX7GdSXcxZjAHHODa0rOxRChMSRboDCNHWSscs,1511
257
257
  camel/terminators/response_terminator.py,sha256=n3G5KP6Oj7-7WlRN0yFcrtLpqAJKaKS0bmhrWlFfCgQ,4982
258
258
  camel/terminators/token_limit_terminator.py,sha256=YWv6ZR8R9yI2Qnf_3xES5bEE_O5bb2CxQ0EUXfMh34c,2118
259
- camel/toolkits/__init__.py,sha256=SJ1Agk9YI_qH4gKX-pcvhl-m2t0Nori6R9Fs0gXReDs,3699
259
+ camel/toolkits/__init__.py,sha256=qFCITSR1oSHHsE37XVh9Rq0V74Wj3NNAH1HhBBLuRSA,3724
260
260
  camel/toolkits/arxiv_toolkit.py,sha256=d0Zn8LQGENhtlZ0BHlDr1pUV8xHOc6TOenAaKgbelu8,6279
261
261
  camel/toolkits/ask_news_toolkit.py,sha256=PAxio8I2eTau9TgOu1jyFC9fsHhvGb-aLIkroWPtwx4,23268
262
262
  camel/toolkits/audio_analysis_toolkit.py,sha256=LC0C6SEIwko8HqkT-C3ub6Ila2PfuIbKLBOEjrrF6BE,8552
@@ -276,7 +276,7 @@ camel/toolkits/human_toolkit.py,sha256=9CjB1flGXIx7mzkIliDjcwXATUvZNdrRCKWyEgR9E
276
276
  camel/toolkits/image_analysis_toolkit.py,sha256=dpvT8n49s8B8AhJ8aFdy4OONb8E8r_Cwxpx-ByFruy8,7209
277
277
  camel/toolkits/linkedin_toolkit.py,sha256=9e7R6YXTeW5aM5o_eOv3YbnJE1ifw5HgaM-pMA-9rqU,7996
278
278
  camel/toolkits/math_toolkit.py,sha256=5yVF0bKuwkZIV01uICd3TOfktXlTERjKt4DrFyz_oaE,3639
279
- camel/toolkits/mcp_toolkit.py,sha256=bgLUDjH0uwXeqOoU8mac97kb6f7eP2DHnGCbaCyBqKU,9112
279
+ camel/toolkits/mcp_toolkit.py,sha256=emglu4H4Kg_CGku9_8xKUqUwCzjXCIlgilGT-v7qlPE,17647
280
280
  camel/toolkits/meshy_toolkit.py,sha256=Fd6sQV2JtduxyvHxCBA0_zl2OCgJRAlvDEe58hX8gRg,6463
281
281
  camel/toolkits/mineru_toolkit.py,sha256=vRX9LholLNkpbJ6axfEN4pTG85aWb0PDmlVy3rAAXhg,6868
282
282
  camel/toolkits/networkx_toolkit.py,sha256=zsP95kst12Q-knafxixlSXdXr3sXjehCXZn7330bsEo,8483
@@ -341,7 +341,7 @@ camel/verifiers/__init__.py,sha256=p6UEyvaOlwUQaFACGB4C07fL1xSnpTouElt19YRuneQ,9
341
341
  camel/verifiers/base.py,sha256=efWZV9g58IHzJ24U4zr109y34CaAi8tV9WZPMCzP3YI,12017
342
342
  camel/verifiers/models.py,sha256=hC6m_YxEX-mqi_tkCNZHZWLBWf04ZTyv5vfKR-BEyU4,2818
343
343
  camel/verifiers/python_verifier.py,sha256=bj-UGxeJTZzxVVa3a8IEQ1lNOpSaaW3JdGnUEoPeQD0,7519
344
- camel_ai-0.2.27.dist-info/METADATA,sha256=B4jItSRHDPybZtfm-i0xxUq3z4HWUQ99OEdaRJ33Mfw,37992
345
- camel_ai-0.2.27.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
346
- camel_ai-0.2.27.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
347
- camel_ai-0.2.27.dist-info/RECORD,,
344
+ camel_ai-0.2.28.dist-info/METADATA,sha256=Osjure3q4QVC_wQGuxycpqgoLipQVkwyOu_AudKOrkU,37992
345
+ camel_ai-0.2.28.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
346
+ camel_ai-0.2.28.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
347
+ camel_ai-0.2.28.dist-info/RECORD,,