camel-ai 0.2.75a6__py3-none-any.whl → 0.2.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (97) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +1001 -205
  3. camel/agents/mcp_agent.py +30 -27
  4. camel/configs/__init__.py +6 -0
  5. camel/configs/amd_config.py +70 -0
  6. camel/configs/cometapi_config.py +104 -0
  7. camel/data_collectors/alpaca_collector.py +15 -6
  8. camel/environments/tic_tac_toe.py +1 -1
  9. camel/interpreters/__init__.py +2 -0
  10. camel/interpreters/docker/Dockerfile +3 -12
  11. camel/interpreters/microsandbox_interpreter.py +395 -0
  12. camel/loaders/__init__.py +11 -2
  13. camel/loaders/chunkr_reader.py +9 -0
  14. camel/memories/__init__.py +2 -1
  15. camel/memories/agent_memories.py +3 -1
  16. camel/memories/blocks/chat_history_block.py +21 -3
  17. camel/memories/records.py +88 -8
  18. camel/messages/base.py +127 -34
  19. camel/models/__init__.py +4 -0
  20. camel/models/amd_model.py +101 -0
  21. camel/models/azure_openai_model.py +0 -6
  22. camel/models/base_model.py +30 -0
  23. camel/models/cometapi_model.py +83 -0
  24. camel/models/model_factory.py +4 -0
  25. camel/models/openai_compatible_model.py +0 -6
  26. camel/models/openai_model.py +0 -6
  27. camel/models/zhipuai_model.py +61 -2
  28. camel/parsers/__init__.py +18 -0
  29. camel/parsers/mcp_tool_call_parser.py +176 -0
  30. camel/retrievers/auto_retriever.py +1 -0
  31. camel/runtimes/daytona_runtime.py +11 -12
  32. camel/societies/workforce/prompts.py +131 -50
  33. camel/societies/workforce/single_agent_worker.py +434 -49
  34. camel/societies/workforce/structured_output_handler.py +30 -18
  35. camel/societies/workforce/task_channel.py +43 -0
  36. camel/societies/workforce/utils.py +105 -12
  37. camel/societies/workforce/workforce.py +1322 -311
  38. camel/societies/workforce/workforce_logger.py +24 -5
  39. camel/storages/key_value_storages/json.py +15 -2
  40. camel/storages/object_storages/google_cloud.py +1 -1
  41. camel/storages/vectordb_storages/oceanbase.py +10 -11
  42. camel/storages/vectordb_storages/tidb.py +8 -6
  43. camel/tasks/task.py +4 -3
  44. camel/toolkits/__init__.py +18 -5
  45. camel/toolkits/aci_toolkit.py +45 -0
  46. camel/toolkits/code_execution.py +28 -1
  47. camel/toolkits/context_summarizer_toolkit.py +684 -0
  48. camel/toolkits/dingtalk.py +1135 -0
  49. camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
  50. camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
  51. camel/toolkits/function_tool.py +6 -1
  52. camel/toolkits/google_drive_mcp_toolkit.py +12 -31
  53. camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
  54. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +79 -2
  55. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
  56. camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
  57. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
  58. camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
  59. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
  60. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
  61. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
  62. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  63. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
  64. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  65. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
  66. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
  67. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +405 -131
  68. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
  69. camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
  70. camel/toolkits/markitdown_toolkit.py +27 -1
  71. camel/toolkits/mcp_toolkit.py +348 -348
  72. camel/toolkits/message_integration.py +3 -0
  73. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  74. camel/toolkits/note_taking_toolkit.py +18 -8
  75. camel/toolkits/notion_mcp_toolkit.py +16 -26
  76. camel/toolkits/origene_mcp_toolkit.py +8 -49
  77. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  78. camel/toolkits/resend_toolkit.py +168 -0
  79. camel/toolkits/slack_toolkit.py +50 -1
  80. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  81. camel/toolkits/terminal_toolkit/terminal_toolkit.py +924 -0
  82. camel/toolkits/terminal_toolkit/utils.py +532 -0
  83. camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
  84. camel/toolkits/video_analysis_toolkit.py +17 -11
  85. camel/toolkits/wechat_official_toolkit.py +483 -0
  86. camel/types/enums.py +124 -1
  87. camel/types/unified_model_type.py +5 -0
  88. camel/utils/commons.py +17 -0
  89. camel/utils/context_utils.py +804 -0
  90. camel/utils/mcp.py +136 -2
  91. camel/utils/token_counting.py +25 -17
  92. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/METADATA +158 -59
  93. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/RECORD +95 -76
  94. camel/loaders/pandas_reader.py +0 -368
  95. camel/toolkits/terminal_toolkit.py +0 -1788
  96. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/WHEEL +0 -0
  97. {camel_ai-0.2.75a6.dist-info → camel_ai-0.2.76.dist-info}/licenses/LICENSE +0 -0
@@ -18,8 +18,11 @@ import warnings
18
18
  from contextlib import AsyncExitStack
19
19
  from typing import Any, Dict, List, Optional
20
20
 
21
+ from typing_extensions import TypeGuard
22
+
21
23
  from camel.logger import get_logger
22
- from camel.toolkits import BaseToolkit, FunctionTool
24
+ from camel.toolkits.base import BaseToolkit
25
+ from camel.toolkits.function_tool import FunctionTool
23
26
  from camel.utils.commons import run_async
24
27
  from camel.utils.mcp_client import MCPClient, create_mcp_client
25
28
 
@@ -43,6 +46,187 @@ class MCPToolError(Exception):
43
46
  pass
44
47
 
45
48
 
49
+ _EMPTY_SCHEMA = {
50
+ "additionalProperties": False,
51
+ "type": "object",
52
+ "properties": {},
53
+ "required": [],
54
+ }
55
+
56
+
57
+ def ensure_strict_json_schema(schema: dict[str, Any]) -> dict[str, Any]:
58
+ r"""Mutates the given JSON schema to ensure it conforms to the
59
+ `strict` standard that the OpenAI API expects.
60
+ """
61
+ if schema == {}:
62
+ return _EMPTY_SCHEMA
63
+ return _ensure_strict_json_schema(schema, path=(), root=schema)
64
+
65
+
66
+ def _ensure_strict_json_schema(
67
+ json_schema: object,
68
+ *,
69
+ path: tuple[str, ...],
70
+ root: dict[str, object],
71
+ ) -> dict[str, Any]:
72
+ if not is_dict(json_schema):
73
+ raise TypeError(
74
+ f"Expected {json_schema} to be a dictionary; path={path}"
75
+ )
76
+
77
+ defs = json_schema.get("$defs")
78
+ if is_dict(defs):
79
+ for def_name, def_schema in defs.items():
80
+ _ensure_strict_json_schema(
81
+ def_schema, path=(*path, "$defs", def_name), root=root
82
+ )
83
+
84
+ definitions = json_schema.get("definitions")
85
+ if is_dict(definitions):
86
+ for definition_name, definition_schema in definitions.items():
87
+ _ensure_strict_json_schema(
88
+ definition_schema,
89
+ path=(*path, "definitions", definition_name),
90
+ root=root,
91
+ )
92
+
93
+ typ = json_schema.get("type")
94
+ if typ == "object" and "additionalProperties" not in json_schema:
95
+ json_schema["additionalProperties"] = False
96
+ elif (
97
+ typ == "object"
98
+ and "additionalProperties" in json_schema
99
+ and json_schema["additionalProperties"]
100
+ ):
101
+ raise ValueError(
102
+ "additionalProperties should not be set for object types. This "
103
+ "could be because you're using an older version of Pydantic, or "
104
+ "because you configured additional properties to be allowed. If "
105
+ "you really need this, update the function or output tool "
106
+ "to not use a strict schema."
107
+ )
108
+
109
+ # object types
110
+ # { 'type': 'object', 'properties': { 'a': {...} } }
111
+ properties = json_schema.get("properties")
112
+ if is_dict(properties):
113
+ json_schema["required"] = list(properties.keys())
114
+ json_schema["properties"] = {
115
+ key: _ensure_strict_json_schema(
116
+ prop_schema, path=(*path, "properties", key), root=root
117
+ )
118
+ for key, prop_schema in properties.items()
119
+ }
120
+
121
+ # arrays
122
+ # { 'type': 'array', 'items': {...} }
123
+ items = json_schema.get("items")
124
+ if is_dict(items):
125
+ json_schema["items"] = _ensure_strict_json_schema(
126
+ items, path=(*path, "items"), root=root
127
+ )
128
+
129
+ # unions
130
+ any_of = json_schema.get("anyOf")
131
+ if is_list(any_of):
132
+ json_schema["anyOf"] = [
133
+ _ensure_strict_json_schema(
134
+ variant, path=(*path, "anyOf", str(i)), root=root
135
+ )
136
+ for i, variant in enumerate(any_of)
137
+ ]
138
+
139
+ # intersections
140
+ all_of = json_schema.get("allOf")
141
+ if is_list(all_of):
142
+ if len(all_of) == 1:
143
+ json_schema.update(
144
+ _ensure_strict_json_schema(
145
+ all_of[0], path=(*path, "allOf", "0"), root=root
146
+ )
147
+ )
148
+ json_schema.pop("allOf")
149
+ else:
150
+ json_schema["allOf"] = [
151
+ _ensure_strict_json_schema(
152
+ entry, path=(*path, "allOf", str(i)), root=root
153
+ )
154
+ for i, entry in enumerate(all_of)
155
+ ]
156
+
157
+ # strip `None` defaults as there's no meaningful distinction here
158
+ # the schema will still be `nullable` and the model will default
159
+ # to using `None` anyway
160
+ if json_schema.get("default", None) is None:
161
+ json_schema.pop("default", None)
162
+
163
+ # we can't use `$ref`s if there are also other properties defined, e.g.
164
+ # `{"$ref": "...", "description": "my description"}`
165
+ #
166
+ # so we unravel the ref
167
+ # `{"type": "string", "description": "my description"}`
168
+ ref = json_schema.get("$ref")
169
+ if ref and has_more_than_n_keys(json_schema, 1):
170
+ assert isinstance(ref, str), f"Received non-string $ref - {ref}"
171
+
172
+ resolved = resolve_ref(root=root, ref=ref)
173
+ if not is_dict(resolved):
174
+ raise ValueError(
175
+ f"Expected `$ref: {ref}` to resolved to a dictionary but got "
176
+ f"{resolved}"
177
+ )
178
+
179
+ # properties from the json schema take priority
180
+ # over the ones on the `$ref`
181
+ json_schema.update({**resolved, **json_schema})
182
+ json_schema.pop("$ref")
183
+ # Since the schema expanded from `$ref` might not
184
+ # have `additionalProperties: false` applied
185
+ # we call `_ensure_strict_json_schema` again to fix the inlined
186
+ # schema and ensure it's valid
187
+ return _ensure_strict_json_schema(json_schema, path=path, root=root)
188
+
189
+ return json_schema
190
+
191
+
192
+ def resolve_ref(*, root: dict[str, object], ref: str) -> object:
193
+ if not ref.startswith("#/"):
194
+ raise ValueError(
195
+ f"Unexpected $ref format {ref!r}; Does not start with #/"
196
+ )
197
+
198
+ path = ref[2:].split("/")
199
+ resolved = root
200
+ for key in path:
201
+ value = resolved[key]
202
+ assert is_dict(value), (
203
+ f"encountered non-dictionary entry while resolving {ref} - "
204
+ f"{resolved}"
205
+ )
206
+ resolved = value
207
+
208
+ return resolved
209
+
210
+
211
+ def is_dict(obj: object) -> TypeGuard[dict[str, object]]:
212
+ # just pretend that we know there are only `str` keys
213
+ # as that check is not worth the performance cost
214
+ return isinstance(obj, dict)
215
+
216
+
217
+ def is_list(obj: object) -> TypeGuard[list[object]]:
218
+ return isinstance(obj, list)
219
+
220
+
221
+ def has_more_than_n_keys(obj: dict[str, object], n: int) -> bool:
222
+ i = 0
223
+ for _ in obj.keys():
224
+ i += 1
225
+ if i > n:
226
+ return True
227
+ return False
228
+
229
+
46
230
  class MCPToolkit(BaseToolkit):
47
231
  r"""MCPToolkit provides a unified interface for managing multiple
48
232
  MCP server connections and their tools.
@@ -220,26 +404,34 @@ class MCPToolkit(BaseToolkit):
220
404
  self._exit_stack = AsyncExitStack()
221
405
 
222
406
  try:
223
- # Connect to all clients using AsyncExitStack
224
- for i, client in enumerate(self.clients):
225
- try:
226
- # Use MCPClient directly as async context manager
227
- await self._exit_stack.enter_async_context(client)
228
- msg = f"Connected to client {i+1}/{len(self.clients)}"
229
- logger.debug(msg)
230
- except Exception as e:
231
- logger.error(f"Failed to connect to client {i+1}: {e}")
232
- # AsyncExitStack will handle cleanup of already connected
233
- await self._exit_stack.aclose()
234
- self._exit_stack = None
235
- error_msg = f"Failed to connect to client {i+1}: {e}"
236
- raise MCPConnectionError(error_msg) from e
407
+ # Apply timeout to the entire connection process
408
+ import asyncio
409
+
410
+ timeout_seconds = self.timeout or 30.0
411
+ await asyncio.wait_for(
412
+ self._connect_all_clients(), timeout=timeout_seconds
413
+ )
237
414
 
238
415
  self._is_connected = True
239
416
  msg = f"Successfully connected to {len(self.clients)} MCP servers"
240
417
  logger.info(msg)
241
418
  return self
242
419
 
420
+ except (asyncio.TimeoutError, asyncio.CancelledError):
421
+ self._is_connected = False
422
+ if self._exit_stack:
423
+ await self._exit_stack.aclose()
424
+ self._exit_stack = None
425
+
426
+ timeout_seconds = self.timeout or 30.0
427
+ error_msg = (
428
+ f"Connection timeout after {timeout_seconds}s. "
429
+ f"One or more MCP servers are not responding. "
430
+ f"Please check if the servers are running and accessible."
431
+ )
432
+ logger.error(error_msg)
433
+ raise MCPConnectionError(error_msg)
434
+
243
435
  except Exception:
244
436
  self._is_connected = False
245
437
  if self._exit_stack:
@@ -247,6 +439,23 @@ class MCPToolkit(BaseToolkit):
247
439
  self._exit_stack = None
248
440
  raise
249
441
 
442
+ async def _connect_all_clients(self):
443
+ r"""Connect to all clients sequentially."""
444
+ # Connect to all clients using AsyncExitStack
445
+ for i, client in enumerate(self.clients):
446
+ try:
447
+ # Use MCPClient directly as async context manager
448
+ await self._exit_stack.enter_async_context(client)
449
+ msg = f"Connected to client {i+1}/{len(self.clients)}"
450
+ logger.debug(msg)
451
+ except Exception as e:
452
+ logger.error(f"Failed to connect to client {i+1}: {e}")
453
+ # AsyncExitStack will cleanup already connected clients
454
+ await self._exit_stack.aclose()
455
+ self._exit_stack = None
456
+ error_msg = f"Failed to connect to client {i+1}: {e}"
457
+ raise MCPConnectionError(error_msg) from e
458
+
250
459
  async def disconnect(self):
251
460
  r"""Disconnect from all MCP servers."""
252
461
  if not self._is_connected:
@@ -451,367 +660,149 @@ class MCPToolkit(BaseToolkit):
451
660
  raise ValueError(error_msg) from e
452
661
 
453
662
  def _ensure_strict_tool_schema(self, tool: FunctionTool) -> FunctionTool:
454
- r"""Ensure a tool has a strict schema compatible with OpenAI's
455
- requirements according to the structured outputs specification.
456
-
457
- Args:
458
- tool (FunctionTool): The tool to check and update if necessary.
459
-
460
- Returns:
461
- FunctionTool: The tool with a strict schema.
663
+ r"""Ensure a tool has a strict schema compatible with
664
+ OpenAI's requirements.
665
+
666
+ Strategy:
667
+ - Ensure parameters exist with at least an empty properties object
668
+ (OpenAI requirement).
669
+ - Try converting parameters to strict using ensure_strict_json_schema.
670
+ - If conversion fails, mark function.strict = False and
671
+ keep best-effort parameters.
462
672
  """
463
673
  try:
464
674
  schema = tool.get_openai_tool_schema()
465
675
 
466
- # Helper functions for validation and transformation
467
- def _validate_and_fix_schema(obj, path="", in_root=True):
468
- r"""Recursively validate and fix schema to meet strict
469
- requirements.
470
- """
471
- if isinstance(obj, dict):
472
- # Check if this is the root object
473
- if in_root and path == "":
474
- # Root must be an object, not anyOf
475
- if "anyOf" in obj and "type" not in obj:
476
- raise ValueError(
477
- "Root object must not be anyOf and must "
478
- "be an object"
479
- )
480
- if obj.get("type") and obj["type"] != "object":
481
- raise ValueError(
482
- "Root object must have type 'object'"
483
- )
484
-
485
- # Handle object types
486
- if obj.get("type") == "object":
487
- # Ensure additionalProperties is false
488
- obj["additionalProperties"] = False
489
-
490
- # Process properties
491
- if "properties" in obj:
492
- props = obj["properties"]
493
- # Only set required if it doesn't exist or needs
494
- # updating
495
- if "required" not in obj:
496
- # If no required field exists, make all fields
497
- # required
498
- obj["required"] = list(props.keys())
499
- else:
500
- # Ensure required field only contains valid
501
- # property names
502
- existing_required = obj.get("required", [])
503
- valid_required = [
504
- req
505
- for req in existing_required
506
- if req in props
507
- ]
508
- # Add any missing properties to required
509
- for prop_name in props:
510
- if prop_name not in valid_required:
511
- valid_required.append(prop_name)
512
- obj["required"] = valid_required
513
-
514
- # Recursively process each property
515
- for prop_name, prop_schema in props.items():
516
- _validate_and_fix_schema(
517
- prop_schema, f"{path}.{prop_name}", False
518
- )
519
-
520
- # Handle arrays
521
- elif obj.get("type") == "array":
522
- if "items" in obj:
523
- _validate_and_fix_schema(
524
- obj["items"], f"{path}.items", False
525
- )
526
-
527
- # Handle anyOf
528
- elif "anyOf" in obj:
529
- # Validate anyOf schemas
530
- for i, schema in enumerate(obj["anyOf"]):
531
- _validate_and_fix_schema(
532
- schema, f"{path}.anyOf[{i}]", False
533
- )
676
+ def _has_strict_mode_incompatible_features(json_schema):
677
+ r"""Check if schema has features incompatible
678
+ with OpenAI strict mode."""
534
679
 
535
- # Handle string format validation
536
- elif obj.get("type") == "string":
537
- if "format" in obj:
538
- allowed_formats = [
539
- "date-time",
540
- "time",
541
- "date",
542
- "duration",
543
- "email",
544
- "hostname",
545
- "ipv4",
546
- "ipv6",
547
- "uuid",
548
- ]
549
- if obj["format"] not in allowed_formats:
550
- del obj["format"] # Remove unsupported format
551
-
552
- # Handle number/integer validation
553
- elif obj.get("type") in ["number", "integer"]:
554
- # These properties are supported
555
- supported_props = [
556
- "multipleOf",
557
- "maximum",
558
- "exclusiveMaximum",
559
- "minimum",
560
- "exclusiveMinimum",
561
- ]
562
- # Remove any unsupported properties
563
- for key in list(obj.keys()):
564
- if key not in [
565
- *supported_props,
566
- "type",
567
- "description",
568
- "default",
569
- ]:
570
- del obj[key]
571
-
572
- # Process nested structures
573
- for key in ["allOf", "oneOf", "$defs", "definitions"]:
574
- if key in obj:
575
- if isinstance(obj[key], list):
576
- for i, item in enumerate(obj[key]):
577
- _validate_and_fix_schema(
578
- item, f"{path}.{key}[{i}]", False
579
- )
580
- elif isinstance(obj[key], dict):
581
- for def_name, def_schema in obj[key].items():
582
- _validate_and_fix_schema(
583
- def_schema,
584
- f"{path}.{key}.{def_name}",
585
- False,
586
- )
587
-
588
- elif isinstance(obj, list):
589
- for i, item in enumerate(obj):
590
- _validate_and_fix_schema(item, f"{path}[{i}]", False)
591
-
592
- def _check_schema_limits(obj, counts=None):
593
- r"""Check if schema exceeds OpenAI limits."""
594
- if counts is None:
595
- counts = {
596
- "properties": 0,
597
- "depth": 0,
598
- "enums": 0,
599
- "string_length": 0,
600
- }
680
+ def _check_incompatible(obj, path=""):
681
+ if not isinstance(obj, dict):
682
+ return False
601
683
 
602
- def _count_properties(o, depth=0):
603
- if isinstance(o, dict):
604
- if depth > 5:
605
- raise ValueError(
606
- "Schema exceeds maximum nesting depth of 5"
684
+ # Check for allOf in array items (known to cause issues)
685
+ if "items" in obj and isinstance(obj["items"], dict):
686
+ items_schema = obj["items"]
687
+ if "allOf" in items_schema:
688
+ logger.debug(
689
+ f"Found allOf in array items at {path}"
607
690
  )
691
+ return True
692
+ # Recursively check items schema
693
+ if _check_incompatible(items_schema, f"{path}.items"):
694
+ return True
695
+
696
+ # Check for other potentially problematic patterns
697
+ # anyOf/oneOf in certain contexts can also cause issues
698
+ if (
699
+ "anyOf" in obj and len(obj["anyOf"]) > 10
700
+ ): # Large unions can be problematic
701
+ return True
702
+
703
+ # Recursively check nested objects
704
+ for key in [
705
+ "properties",
706
+ "additionalProperties",
707
+ "patternProperties",
708
+ ]:
709
+ if key in obj and isinstance(obj[key], dict):
710
+ if key == "properties":
711
+ for prop_name, prop_schema in obj[key].items():
712
+ if isinstance(
713
+ prop_schema, dict
714
+ ) and _check_incompatible(
715
+ prop_schema,
716
+ f"{path}.{key}.{prop_name}",
717
+ ):
718
+ return True
719
+ elif _check_incompatible(
720
+ obj[key], f"{path}.{key}"
721
+ ):
722
+ return True
608
723
 
609
- if o.get("type") == "object" and "properties" in o:
610
- counts["properties"] += len(o["properties"])
611
- for prop in o["properties"].values():
612
- _count_properties(prop, depth + 1)
613
-
614
- if "enum" in o:
615
- counts["enums"] += len(o["enum"])
616
- if isinstance(o["enum"], list):
617
- for val in o["enum"]:
618
- if isinstance(val, str):
619
- counts["string_length"] += len(val)
620
-
621
- # Count property names
622
- if "properties" in o:
623
- for name in o["properties"].keys():
624
- counts["string_length"] += len(name)
625
-
626
- # Process nested structures
627
- for key in ["items", "allOf", "oneOf", "anyOf"]:
628
- if key in o:
629
- if isinstance(o[key], dict):
630
- _count_properties(o[key], depth)
631
- elif isinstance(o[key], list):
632
- for item in o[key]:
633
- _count_properties(item, depth)
634
-
635
- _count_properties(obj)
636
-
637
- # Check limits, reference: https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#objects-have-limitations-on-nesting-depth-and-size # noqa: E501
638
- if counts["properties"] > 5000:
639
- raise ValueError(
640
- "Schema exceeds maximum of 5000 properties"
641
- )
642
- if counts["enums"] > 1000:
643
- raise ValueError(
644
- "Schema exceeds maximum of 1000 enum values"
645
- )
646
- if counts["string_length"] > 120000:
647
- raise ValueError(
648
- "Schema exceeds maximum total string length of 120000"
649
- )
650
-
651
- return True
724
+ # Check arrays and unions
725
+ for key in ["allOf", "anyOf", "oneOf"]:
726
+ if key in obj and isinstance(obj[key], list):
727
+ for i, item in enumerate(obj[key]):
728
+ if isinstance(
729
+ item, dict
730
+ ) and _check_incompatible(
731
+ item, f"{path}.{key}[{i}]"
732
+ ):
733
+ return True
652
734
 
653
- # Check if schema has any issues that prevent strict mode
654
- def _has_strict_mode_issues(obj):
655
- r"""Check for any issues that would prevent strict mode."""
656
- issues = []
735
+ return False
657
736
 
658
- def _check_issues(o, path=""):
659
- if isinstance(o, dict):
660
- # Check for additionalProperties: true
661
- if o.get("additionalProperties") is True:
662
- issues.append(
663
- f"additionalProperties: true at {path}"
664
- )
737
+ return _check_incompatible(json_schema)
665
738
 
666
- # Check for unsupported keywords
667
- unsupported = [
668
- "not",
669
- "dependentRequired",
670
- "dependentSchemas",
671
- "if",
672
- "then",
673
- "else",
674
- "patternProperties",
675
- ]
676
- for keyword in unsupported:
677
- if keyword in o:
678
- issues.append(
679
- f"Unsupported keyword '{keyword}' "
680
- f"at {path}"
681
- )
682
-
683
- # Recursively check
684
- for key, value in o.items():
685
- if isinstance(value, (dict, list)):
686
- _check_issues(value, f"{path}.{key}")
687
-
688
- elif isinstance(o, list):
689
- for i, item in enumerate(o):
690
- _check_issues(item, f"{path}[{i}]")
691
-
692
- _check_issues(obj)
693
- return issues
694
-
695
- # Check if already strict and compliant
696
- if schema.get("function", {}).get("strict") is True:
697
- # Validate it's actually compliant
698
- try:
699
- params = schema["function"].get("parameters", {})
700
- if params:
701
- _validate_and_fix_schema(params)
702
- _check_schema_limits(params)
703
- return tool
704
- except Exception:
705
- # Not actually compliant, continue to fix it
706
- pass
707
-
708
- # Apply sanitization first to handle optional fields properly
739
+ # Apply sanitization if available
709
740
  if "function" in schema:
710
- # Apply the sanitization function first
711
- from camel.toolkits.function_tool import (
712
- sanitize_and_enforce_required,
713
- )
741
+ try:
742
+ from camel.toolkits.function_tool import (
743
+ sanitize_and_enforce_required,
744
+ )
714
745
 
715
- schema = sanitize_and_enforce_required(schema)
716
-
717
- # Special handling for schemas with additionalProperties that
718
- # aren't false These can't use strict mode
719
- def _has_open_props(obj, path=""):
720
- """Check if any object has additionalProperties that
721
- isn't false."""
722
- if isinstance(obj, dict):
723
- if (
724
- obj.get("type") == "object"
725
- and "additionalProperties" in obj
726
- ):
727
- if obj["additionalProperties"] is not False:
728
- return True
746
+ schema = sanitize_and_enforce_required(schema)
747
+ except ImportError:
748
+ logger.debug("sanitize_and_enforce_required not available")
729
749
 
730
- # Recurse through the schema
731
- for key, value in obj.items():
732
- if key in [
733
- "properties",
734
- "items",
735
- "allOf",
736
- "oneOf",
737
- "anyOf",
738
- ]:
739
- if isinstance(value, dict):
740
- if _has_open_props(value, f"{path}.{key}"):
741
- return True
742
- elif isinstance(value, list):
743
- for i, item in enumerate(value):
744
- if _has_open_props(
745
- item,
746
- f"{path}.{key}[{i}]",
747
- ):
748
- return True
749
- elif isinstance(value, dict) and key not in [
750
- "description",
751
- "type",
752
- "enum",
753
- ]:
754
- if _has_open_props(value, f"{path}.{key}"):
755
- return True
756
- return False
750
+ parameters = schema["function"].get("parameters", {})
751
+ if not parameters:
752
+ # Empty parameters - use minimal valid schema
753
+ parameters = {
754
+ "type": "object",
755
+ "properties": {},
756
+ "additionalProperties": False,
757
+ }
758
+ schema["function"]["parameters"] = parameters
757
759
 
758
- # Check if schema has dynamic additionalProperties
759
- if _has_open_props(schema["function"].get("parameters", {})):
760
- # Can't use strict mode with dynamic additionalProperties
761
- schema["function"]["strict"] = False
762
- tool.set_openai_tool_schema(schema)
763
- logger.warning(
764
- f"Tool '{tool.get_function_name()}' has "
765
- f"dynamic additionalProperties and cannot use "
766
- f"strict mode"
767
- )
768
- return tool
760
+ # MCP spec doesn't require 'properties', but OpenAI spec does
761
+ if (
762
+ parameters.get("type") == "object"
763
+ and "properties" not in parameters
764
+ ):
765
+ parameters["properties"] = {}
769
766
 
770
- # Now check for blocking issues after sanitization
771
- issues = _has_strict_mode_issues(schema)
772
- if issues:
773
- # Can't use strict mode
767
+ try:
768
+ # _check_schema_limits(parameters)
769
+
770
+ # Check for OpenAI strict mode incompatible features
771
+ if _has_strict_mode_incompatible_features(parameters):
772
+ raise ValueError(
773
+ "Schema contains features "
774
+ "incompatible with strict mode"
775
+ )
776
+
777
+ strict_params = ensure_strict_json_schema(parameters)
778
+ schema["function"]["parameters"] = strict_params
779
+ schema["function"]["strict"] = True
780
+ except Exception as e:
781
+ # Fallback to non-strict mode on any failure
774
782
  schema["function"]["strict"] = False
775
- tool.set_openai_tool_schema(schema)
776
783
  logger.warning(
777
- f"Tool '{tool.get_function_name()}' has "
778
- f"issues preventing strict mode: "
779
- f"{'; '.join(issues[:3])}{'...' if len(issues) > 3 else ''}" # noqa: E501
784
+ f"Tool '{tool.get_function_name()}' "
785
+ f"cannot use strict mode: {e}"
780
786
  )
781
- return tool
782
-
783
- # Enable strict mode
784
- schema["function"]["strict"] = True
785
-
786
- parameters = schema["function"].get("parameters", {})
787
- if parameters:
788
- # Validate and fix the parameters schema
789
- _validate_and_fix_schema(parameters)
790
-
791
- # Check schema limits
792
- _check_schema_limits(parameters)
793
787
 
794
788
  tool.set_openai_tool_schema(schema)
795
- logger.debug(
796
- f"Updated tool '{tool.get_function_name()}' to strict mode"
797
- )
798
789
 
799
790
  except Exception as e:
800
- # If we can't make it strict, disable strict mode
791
+ # Final fallback - ensure tool still works
801
792
  try:
802
- if "function" in schema:
803
- schema["function"]["strict"] = False
804
- tool.set_openai_tool_schema(schema)
793
+ current_schema = tool.get_openai_tool_schema()
794
+ if "function" in current_schema:
795
+ current_schema["function"]["strict"] = False
796
+ tool.set_openai_tool_schema(current_schema)
805
797
  logger.warning(
806
- f"Failed to ensure strict schema for "
807
- f"tool '{tool.get_function_name()}': {str(e)[:100]}. "
808
- f"Setting strict=False."
798
+ f"Error processing schema for tool "
799
+ f"'{tool.get_function_name()}': {str(e)[:100]}. "
800
+ f"Using non-strict mode."
809
801
  )
810
802
  except Exception as inner_e:
811
- # If even setting strict=False fails, log the error
812
803
  logger.error(
813
- f"Critical error processing "
814
- f"tool '{tool.get_function_name()}': {inner_e}. "
804
+ f"Critical error processing tool "
805
+ f"'{tool.get_function_name()}': {inner_e}. "
815
806
  f"Tool may not function correctly."
816
807
  )
817
808
 
@@ -854,6 +845,7 @@ class MCPToolkit(BaseToolkit):
854
845
  )
855
846
 
856
847
  all_tools = []
848
+ seen_names: set[str] = set()
857
849
  for i, client in enumerate(self.clients):
858
850
  try:
859
851
  client_tools = client.get_tools()
@@ -862,6 +854,14 @@ class MCPToolkit(BaseToolkit):
862
854
  strict_tools = []
863
855
  for tool in client_tools:
864
856
  strict_tool = self._ensure_strict_tool_schema(tool)
857
+ name = strict_tool.get_function_name()
858
+ if name in seen_names:
859
+ logger.warning(
860
+ f"Duplicate tool name detected and "
861
+ f"skipped: '{name}' from client {i+1}"
862
+ )
863
+ continue
864
+ seen_names.add(name)
865
865
  strict_tools.append(strict_tool)
866
866
 
867
867
  all_tools.extend(strict_tools)