camel-ai 0.2.71a10__py3-none-any.whl → 0.2.71a12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +1 -1
- camel/models/cohere_model.py +4 -1
- camel/models/moonshot_model.py +54 -1
- camel/societies/workforce/prompts.py +32 -13
- camel/societies/workforce/role_playing_worker.py +1 -1
- camel/societies/workforce/worker.py +1 -1
- camel/societies/workforce/workforce.py +53 -18
- camel/tasks/task.py +9 -5
- camel/toolkits/function_tool.py +13 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +165 -218
- camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +3 -3
- camel/toolkits/search_toolkit.py +93 -60
- camel/toolkits/slack_toolkit.py +10 -0
- camel/types/enums.py +3 -0
- camel/utils/tool_result.py +1 -1
- {camel_ai-0.2.71a10.dist-info → camel_ai-0.2.71a12.dist-info}/METADATA +3 -3
- {camel_ai-0.2.71a10.dist-info → camel_ai-0.2.71a12.dist-info}/RECORD +20 -20
- {camel_ai-0.2.71a10.dist-info → camel_ai-0.2.71a12.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.71a10.dist-info → camel_ai-0.2.71a12.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
camel/models/cohere_model.py
CHANGED
|
@@ -21,7 +21,10 @@ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
|
|
21
21
|
from pydantic import BaseModel
|
|
22
22
|
|
|
23
23
|
if TYPE_CHECKING:
|
|
24
|
-
from cohere.types import
|
|
24
|
+
from cohere.types import ( # type: ignore[attr-defined]
|
|
25
|
+
ChatMessageV2,
|
|
26
|
+
ChatResponse,
|
|
27
|
+
)
|
|
25
28
|
|
|
26
29
|
from camel.configs import COHERE_API_PARAMS, CohereConfig
|
|
27
30
|
from camel.messages import OpenAIMessage
|
camel/models/moonshot_model.py
CHANGED
|
@@ -29,8 +29,23 @@ from camel.types import (
|
|
|
29
29
|
from camel.utils import (
|
|
30
30
|
BaseTokenCounter,
|
|
31
31
|
api_keys_required,
|
|
32
|
+
get_current_agent_session_id,
|
|
33
|
+
update_langfuse_trace,
|
|
32
34
|
)
|
|
33
35
|
|
|
36
|
+
if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
37
|
+
try:
|
|
38
|
+
from langfuse.decorators import observe
|
|
39
|
+
except ImportError:
|
|
40
|
+
from camel.utils import observe
|
|
41
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
42
|
+
try:
|
|
43
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
44
|
+
except ImportError:
|
|
45
|
+
from camel.utils import observe
|
|
46
|
+
else:
|
|
47
|
+
from camel.utils import observe
|
|
48
|
+
|
|
34
49
|
|
|
35
50
|
class MoonshotModel(OpenAICompatibleModel):
|
|
36
51
|
r"""Moonshot API in a unified OpenAICompatibleModel interface.
|
|
@@ -91,13 +106,51 @@ class MoonshotModel(OpenAICompatibleModel):
|
|
|
91
106
|
**kwargs,
|
|
92
107
|
)
|
|
93
108
|
|
|
109
|
+
@observe()
|
|
94
110
|
async def _arun(
|
|
95
111
|
self,
|
|
96
112
|
messages: List[OpenAIMessage],
|
|
97
113
|
response_format: Optional[Type[BaseModel]] = None,
|
|
98
114
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
99
115
|
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
100
|
-
|
|
116
|
+
r"""Runs inference of Moonshot chat completion asynchronously.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
120
|
+
in OpenAI API format.
|
|
121
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
122
|
+
response.
|
|
123
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
124
|
+
use for the request.
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
128
|
+
`ChatCompletion` in the non-stream mode, or
|
|
129
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
130
|
+
"""
|
|
131
|
+
|
|
132
|
+
# Update Langfuse trace with current agent session and metadata
|
|
133
|
+
agent_session_id = get_current_agent_session_id()
|
|
134
|
+
if agent_session_id:
|
|
135
|
+
update_langfuse_trace(
|
|
136
|
+
session_id=agent_session_id,
|
|
137
|
+
metadata={
|
|
138
|
+
"agent_id": agent_session_id,
|
|
139
|
+
"model_type": str(self.model_type),
|
|
140
|
+
},
|
|
141
|
+
tags=["CAMEL-AI", str(self.model_type)],
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
request_config = self.model_config_dict.copy()
|
|
145
|
+
|
|
146
|
+
if tools:
|
|
147
|
+
request_config["tools"] = tools
|
|
148
|
+
|
|
149
|
+
return await self._async_client.chat.completions.create(
|
|
150
|
+
messages=messages,
|
|
151
|
+
model=self.model_type,
|
|
152
|
+
**request_config,
|
|
153
|
+
)
|
|
101
154
|
|
|
102
155
|
def check_model_config(self):
|
|
103
156
|
r"""Check whether the model configuration contains any
|
|
@@ -147,7 +147,7 @@ Here is the content of the parent task for you to refer to:
|
|
|
147
147
|
Here are results of some prerequisite tasks that you can refer to:
|
|
148
148
|
|
|
149
149
|
==============================
|
|
150
|
-
{
|
|
150
|
+
{dependency_tasks_info}
|
|
151
151
|
==============================
|
|
152
152
|
|
|
153
153
|
Here are some additional information about the task:
|
|
@@ -198,7 +198,7 @@ Now you should summarize the scenario and return the result of the task.
|
|
|
198
198
|
|
|
199
199
|
TASK_DECOMPOSE_PROMPT = r"""You need to decompose the given task into subtasks according to the workers available in the group, following these important principles to maximize efficiency, parallelism, and clarity for the executing agents:
|
|
200
200
|
|
|
201
|
-
1. **Self-Contained Subtasks**: This is
|
|
201
|
+
1. **Self-Contained Subtasks**: This is critical principle. Each subtask's description **must be fully self-sufficient and independently understandable**. The agent executing the subtask has **no knowledge** of the parent task, other subtasks, or the overall workflow.
|
|
202
202
|
* **DO NOT** use relative references like "the first task," "the paper mentioned above," or "the result from the previous step."
|
|
203
203
|
* **DO** write explicit instructions. For example, instead of "Analyze the document," write "Analyze the document titled 'The Future of AI'." The system will automatically provide the necessary inputs (like the document itself) from previous steps.
|
|
204
204
|
|
|
@@ -220,23 +220,42 @@ These principles aim to reduce overall completion time by maximizing concurrent
|
|
|
220
220
|
|
|
221
221
|
**EXAMPLE FORMAT ONLY** (DO NOT use this example content for actual task decomposition):
|
|
222
222
|
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
* Poor decomposition (monolithic and vague):
|
|
226
|
-
`<tasks><task>Do all research, analysis, and write final report.</task></tasks>`
|
|
223
|
+
***
|
|
224
|
+
**Example 1: Sequential Task for a Single Worker**
|
|
227
225
|
|
|
228
|
-
* **
|
|
226
|
+
* **Overall Task**: "Create a short blog post about the benefits of Python. First, research the key benefits. Then, write a 300-word article. Finally, find a suitable image to go with it."
|
|
227
|
+
* **Available Workers**:
|
|
228
|
+
* `Document Agent`: A worker that can research topics, write articles, and find images.
|
|
229
|
+
* **Correct Decomposition**:
|
|
230
|
+
```xml
|
|
231
|
+
<tasks>
|
|
232
|
+
<task>Create a short blog post about the benefits of Python by researching key benefits, writing a 300-word article, and finding a suitable image.</task>
|
|
233
|
+
</tasks>
|
|
229
234
|
```
|
|
235
|
+
* **Reasoning**: All steps are sequential and can be handled by the same worker type (`Document Agent`). Grouping them into one subtask is efficient and maintains the workflow, following the "Strategic Grouping" principle.
|
|
236
|
+
|
|
237
|
+
***
|
|
238
|
+
**Example 2: Parallel Task Across Different Workers**
|
|
239
|
+
|
|
240
|
+
* **Overall Task**: "Write a report on the Q2 performance of Apple (AAPL) and Google (GOOGL). The report needs a financial summary and a market sentiment analysis for each company."
|
|
241
|
+
* **Available Workers**:
|
|
242
|
+
* `financial_analyst_1`: A worker that can analyze financial data and create summaries.
|
|
243
|
+
* `market_researcher_1`: A worker that can perform market sentiment analysis.
|
|
244
|
+
* `report_writer_1`: A worker that compiles information into a final report.
|
|
245
|
+
* **Correct Decomposition**:
|
|
246
|
+
```xml
|
|
230
247
|
<tasks>
|
|
231
|
-
<task>
|
|
232
|
-
<task>
|
|
233
|
-
<task>
|
|
234
|
-
<task>
|
|
235
|
-
<task>
|
|
248
|
+
<task>Create a financial summary for Apple (AAPL) for Q2.</task>
|
|
249
|
+
<task>Create a financial summary for Google (GOOGL) for Q2.</task>
|
|
250
|
+
<task>Perform market sentiment analysis for Apple (AAPL) for Q2.</task>
|
|
251
|
+
<task>Perform market sentiment analysis for Google (GOOGL) for Q2.</task>
|
|
252
|
+
<task>Compile the provided financial summaries and market sentiment analyses for Apple (AAPL) and Google (GOOGL) into a single Q2 performance report.</task>
|
|
236
253
|
</tasks>
|
|
237
254
|
```
|
|
255
|
+
* **Reasoning**: The financial analysis and market research can be done in parallel for both companies. The final report depends on all previous steps. This decomposition leverages worker specialization and parallelism, following the "Aggressive Parallelization" principle.
|
|
256
|
+
***
|
|
238
257
|
|
|
239
|
-
**END OF
|
|
258
|
+
**END OF EXAMPLES** - Now, apply these principles and examples to decompose the following task.
|
|
240
259
|
|
|
241
260
|
The content of the task is:
|
|
242
261
|
|
|
@@ -122,7 +122,7 @@ class RolePlayingWorker(Worker):
|
|
|
122
122
|
prompt = ROLEPLAY_PROCESS_TASK_PROMPT.format(
|
|
123
123
|
content=task.content,
|
|
124
124
|
parent_task_content=task.parent.content if task.parent else "",
|
|
125
|
-
|
|
125
|
+
dependency_tasks_info=dependency_tasks_info,
|
|
126
126
|
additional_info=task.additional_info,
|
|
127
127
|
)
|
|
128
128
|
role_play_session = RolePlaying(
|
|
@@ -1008,7 +1008,7 @@ class Workforce(BaseNode):
|
|
|
1008
1008
|
if not validate_task_content(new_content, task_id):
|
|
1009
1009
|
logger.warning(
|
|
1010
1010
|
f"Task {task_id} content modification rejected: "
|
|
1011
|
-
f"Invalid content. Content preview: '{new_content
|
|
1011
|
+
f"Invalid content. Content preview: '{new_content}'"
|
|
1012
1012
|
)
|
|
1013
1013
|
return False
|
|
1014
1014
|
|
|
@@ -1194,7 +1194,7 @@ class Workforce(BaseNode):
|
|
|
1194
1194
|
task.result = "Task failed: Invalid or empty content provided"
|
|
1195
1195
|
logger.warning(
|
|
1196
1196
|
f"Task {task.id} rejected: Invalid or empty content. "
|
|
1197
|
-
f"Content preview: '{task.content
|
|
1197
|
+
f"Content preview: '{task.content}'"
|
|
1198
1198
|
)
|
|
1199
1199
|
return task
|
|
1200
1200
|
|
|
@@ -1327,7 +1327,7 @@ class Workforce(BaseNode):
|
|
|
1327
1327
|
task.result = "Task failed: Invalid or empty content provided"
|
|
1328
1328
|
logger.warning(
|
|
1329
1329
|
f"Task {task.id} rejected: Invalid or empty content. "
|
|
1330
|
-
f"Content preview: '{task.content
|
|
1330
|
+
f"Content preview: '{task.content}'"
|
|
1331
1331
|
)
|
|
1332
1332
|
return task
|
|
1333
1333
|
|
|
@@ -1853,7 +1853,7 @@ class Workforce(BaseNode):
|
|
|
1853
1853
|
logger.error(
|
|
1854
1854
|
f"JSON parsing error in task assignment: Invalid response "
|
|
1855
1855
|
f"format - {e}. Response content: "
|
|
1856
|
-
f"{response.msg.content
|
|
1856
|
+
f"{response.msg.content}"
|
|
1857
1857
|
)
|
|
1858
1858
|
return TaskAssignResult(assignments=[])
|
|
1859
1859
|
|
|
@@ -1985,6 +1985,37 @@ class Workforce(BaseNode):
|
|
|
1985
1985
|
|
|
1986
1986
|
return final_assignments
|
|
1987
1987
|
|
|
1988
|
+
def _update_task_dependencies_from_assignments(
|
|
1989
|
+
self, assignments: List[TaskAssignment], tasks: List[Task]
|
|
1990
|
+
) -> None:
|
|
1991
|
+
r"""Update Task.dependencies with actual Task objects based on
|
|
1992
|
+
assignments.
|
|
1993
|
+
|
|
1994
|
+
Args:
|
|
1995
|
+
assignments (List[TaskAssignment]): The task assignments
|
|
1996
|
+
containing dependency IDs.
|
|
1997
|
+
tasks (List[Task]): The tasks that were assigned.
|
|
1998
|
+
"""
|
|
1999
|
+
# Create a lookup map for all available tasks
|
|
2000
|
+
all_tasks = {}
|
|
2001
|
+
for task_list in [self._completed_tasks, self._pending_tasks, tasks]:
|
|
2002
|
+
for task in task_list:
|
|
2003
|
+
all_tasks[task.id] = task
|
|
2004
|
+
|
|
2005
|
+
# Update dependencies for each assigned task
|
|
2006
|
+
for assignment in assignments:
|
|
2007
|
+
if not assignment.dependencies:
|
|
2008
|
+
continue
|
|
2009
|
+
|
|
2010
|
+
matching_tasks = [t for t in tasks if t.id == assignment.task_id]
|
|
2011
|
+
if matching_tasks:
|
|
2012
|
+
task = matching_tasks[0]
|
|
2013
|
+
task.dependencies = [
|
|
2014
|
+
all_tasks[dep_id]
|
|
2015
|
+
for dep_id in assignment.dependencies
|
|
2016
|
+
if dep_id in all_tasks
|
|
2017
|
+
]
|
|
2018
|
+
|
|
1988
2019
|
async def _find_assignee(
|
|
1989
2020
|
self,
|
|
1990
2021
|
tasks: List[Task],
|
|
@@ -2021,19 +2052,24 @@ class Workforce(BaseNode):
|
|
|
2021
2052
|
|
|
2022
2053
|
# if all assignments are valid and all tasks are assigned, return early
|
|
2023
2054
|
if not invalid_assignments and not unassigned_tasks:
|
|
2055
|
+
self._update_task_dependencies_from_assignments(
|
|
2056
|
+
valid_assignments, tasks
|
|
2057
|
+
)
|
|
2024
2058
|
return TaskAssignResult(assignments=valid_assignments)
|
|
2025
2059
|
|
|
2026
|
-
# handle retry and fallback for
|
|
2027
|
-
#
|
|
2028
|
-
all_problem_assignments = invalid_assignments
|
|
2060
|
+
# handle retry and fallback for invalid assignments and unassigned
|
|
2061
|
+
# tasks
|
|
2029
2062
|
retry_and_fallback_assignments = (
|
|
2030
2063
|
await self._handle_assignment_retry_and_fallback(
|
|
2031
|
-
|
|
2064
|
+
invalid_assignments, tasks, valid_worker_ids
|
|
2032
2065
|
)
|
|
2033
2066
|
)
|
|
2034
|
-
valid_assignments
|
|
2067
|
+
all_assignments = valid_assignments + retry_and_fallback_assignments
|
|
2068
|
+
|
|
2069
|
+
# Update Task.dependencies for all final assignments
|
|
2070
|
+
self._update_task_dependencies_from_assignments(all_assignments, tasks)
|
|
2035
2071
|
|
|
2036
|
-
return TaskAssignResult(assignments=
|
|
2072
|
+
return TaskAssignResult(assignments=all_assignments)
|
|
2037
2073
|
|
|
2038
2074
|
async def _post_task(self, task: Task, assignee_id: str) -> None:
|
|
2039
2075
|
# Record the start time when a task is posted
|
|
@@ -2107,7 +2143,7 @@ class Workforce(BaseNode):
|
|
|
2107
2143
|
)
|
|
2108
2144
|
new_node_conf = WorkerConf(
|
|
2109
2145
|
description=f"Fallback worker for task: "
|
|
2110
|
-
f"{task.content
|
|
2146
|
+
f"{task.content}",
|
|
2111
2147
|
role="General Assistant",
|
|
2112
2148
|
sys_msg="You are a general assistant that can help "
|
|
2113
2149
|
"with various tasks.",
|
|
@@ -2117,8 +2153,7 @@ class Workforce(BaseNode):
|
|
|
2117
2153
|
response.msg.content,
|
|
2118
2154
|
schema=WorkerConf,
|
|
2119
2155
|
fallback_values={
|
|
2120
|
-
"description": f"Worker for task: "
|
|
2121
|
-
f"{task.content[:50]}...",
|
|
2156
|
+
"description": f"Worker for task: " f"{task.content}",
|
|
2122
2157
|
"role": "Task Specialist",
|
|
2123
2158
|
"sys_msg": f"You are a specialist for: {task.content}",
|
|
2124
2159
|
},
|
|
@@ -2130,7 +2165,7 @@ class Workforce(BaseNode):
|
|
|
2130
2165
|
new_node_conf = WorkerConf(**result)
|
|
2131
2166
|
else:
|
|
2132
2167
|
new_node_conf = WorkerConf(
|
|
2133
|
-
description=f"Worker for task: {task.content
|
|
2168
|
+
description=f"Worker for task: {task.content}",
|
|
2134
2169
|
role="Task Specialist",
|
|
2135
2170
|
sys_msg=f"You are a specialist for: {task.content}",
|
|
2136
2171
|
)
|
|
@@ -2147,7 +2182,7 @@ class Workforce(BaseNode):
|
|
|
2147
2182
|
# Create a fallback worker configuration
|
|
2148
2183
|
new_node_conf = WorkerConf(
|
|
2149
2184
|
description=f"Fallback worker for "
|
|
2150
|
-
f"task: {task.content
|
|
2185
|
+
f"task: {task.content}",
|
|
2151
2186
|
role="General Assistant",
|
|
2152
2187
|
sys_msg="You are a general assistant that can help "
|
|
2153
2188
|
"with various tasks.",
|
|
@@ -2160,7 +2195,7 @@ class Workforce(BaseNode):
|
|
|
2160
2195
|
logger.error(
|
|
2161
2196
|
f"JSON parsing error in worker creation: Invalid "
|
|
2162
2197
|
f"response format - {e}. Response content: "
|
|
2163
|
-
f"{response.msg.content
|
|
2198
|
+
f"{response.msg.content}"
|
|
2164
2199
|
)
|
|
2165
2200
|
raise RuntimeError(
|
|
2166
2201
|
f"Failed to create worker for task {task.id}: "
|
|
@@ -2364,7 +2399,7 @@ class Workforce(BaseNode):
|
|
|
2364
2399
|
f"Task {task.id} has exceeded maximum retry attempts "
|
|
2365
2400
|
f"({MAX_TASK_RETRIES}). Final failure "
|
|
2366
2401
|
f"reason: {detailed_error}. "
|
|
2367
|
-
f"Task content: '{task.content
|
|
2402
|
+
f"Task content: '{task.content}'"
|
|
2368
2403
|
)
|
|
2369
2404
|
self._cleanup_task_tracking(task.id)
|
|
2370
2405
|
# Mark task as completed for dependency tracking before halting
|
|
@@ -2793,7 +2828,7 @@ class Workforce(BaseNode):
|
|
|
2793
2828
|
# useful results
|
|
2794
2829
|
if is_task_result_insufficient(returned_task):
|
|
2795
2830
|
result_preview = (
|
|
2796
|
-
returned_task.result
|
|
2831
|
+
returned_task.result
|
|
2797
2832
|
if returned_task.result
|
|
2798
2833
|
else "No result"
|
|
2799
2834
|
)
|
camel/tasks/task.py
CHANGED
|
@@ -99,7 +99,7 @@ def validate_task_content(
|
|
|
99
99
|
logger.warning(
|
|
100
100
|
f"Task {task_id}: Content too short ({len(stripped_content)} "
|
|
101
101
|
f"chars < {min_length} minimum). Content preview: "
|
|
102
|
-
f"'{stripped_content
|
|
102
|
+
f"'{stripped_content}'"
|
|
103
103
|
)
|
|
104
104
|
return False
|
|
105
105
|
|
|
@@ -124,7 +124,7 @@ def validate_task_content(
|
|
|
124
124
|
if any(indicator in content_lower for indicator in failure_indicators):
|
|
125
125
|
logger.warning(
|
|
126
126
|
f"Task {task_id}: Failure indicator detected in result. "
|
|
127
|
-
f"Content preview: '{stripped_content
|
|
127
|
+
f"Content preview: '{stripped_content}'"
|
|
128
128
|
)
|
|
129
129
|
return False
|
|
130
130
|
|
|
@@ -132,7 +132,7 @@ def validate_task_content(
|
|
|
132
132
|
if content_lower.startswith(("error", "failed", "cannot", "unable")):
|
|
133
133
|
logger.warning(
|
|
134
134
|
f"Task {task_id}: Error/refusal pattern detected at start. "
|
|
135
|
-
f"Content preview: '{stripped_content
|
|
135
|
+
f"Content preview: '{stripped_content}'"
|
|
136
136
|
)
|
|
137
137
|
return False
|
|
138
138
|
|
|
@@ -195,7 +195,7 @@ def parse_response(
|
|
|
195
195
|
logger.warning(
|
|
196
196
|
f"Skipping invalid subtask {task_id}.{i} "
|
|
197
197
|
f"during decomposition: "
|
|
198
|
-
f"Content '{stripped_content
|
|
198
|
+
f"Content '{stripped_content}' failed validation"
|
|
199
199
|
)
|
|
200
200
|
return tasks
|
|
201
201
|
|
|
@@ -233,6 +233,8 @@ class Task(BaseModel):
|
|
|
233
233
|
(default: :obj:`0`)
|
|
234
234
|
assigned_worker_id (Optional[str]): The ID of the worker assigned to
|
|
235
235
|
this task. (default: :obj:`None`)
|
|
236
|
+
dependencies (List[Task]): The dependencies for the task.
|
|
237
|
+
(default: :obj:`[]`)
|
|
236
238
|
additional_info (Optional[Dict[str, Any]]): Additional information for
|
|
237
239
|
the task. (default: :obj:`None`)
|
|
238
240
|
image_list (Optional[List[Image.Image]]): Optional list of PIL Image
|
|
@@ -265,6 +267,8 @@ class Task(BaseModel):
|
|
|
265
267
|
|
|
266
268
|
assigned_worker_id: Optional[str] = None
|
|
267
269
|
|
|
270
|
+
dependencies: List["Task"] = []
|
|
271
|
+
|
|
268
272
|
additional_info: Optional[Dict[str, Any]] = None
|
|
269
273
|
|
|
270
274
|
image_list: Optional[List[Image.Image]] = None
|
|
@@ -530,7 +534,7 @@ class Task(BaseModel):
|
|
|
530
534
|
logger.warning(
|
|
531
535
|
f"Skipping invalid subtask {task_id}.{i} "
|
|
532
536
|
f"during streaming decomposition: "
|
|
533
|
-
f"Content '{stripped_content
|
|
537
|
+
f"Content '{stripped_content}' failed validation"
|
|
534
538
|
)
|
|
535
539
|
return tasks
|
|
536
540
|
|
camel/toolkits/function_tool.py
CHANGED
|
@@ -256,11 +256,24 @@ def sanitize_and_enforce_required(parameters_dict):
|
|
|
256
256
|
# This field is optional - add null to its type
|
|
257
257
|
current_type = field_schema.get('type')
|
|
258
258
|
has_ref = '$ref' in field_schema
|
|
259
|
+
has_any_of = 'anyOf' in field_schema
|
|
259
260
|
|
|
260
261
|
if has_ref:
|
|
261
262
|
# Fields with $ref shouldn't have additional type field
|
|
262
263
|
# The $ref itself defines the type structure
|
|
263
264
|
pass
|
|
265
|
+
elif has_any_of:
|
|
266
|
+
# Field already has anyOf
|
|
267
|
+
any_of_types = field_schema['anyOf']
|
|
268
|
+
has_null_type = any(
|
|
269
|
+
item.get('type') == 'null' for item in any_of_types
|
|
270
|
+
)
|
|
271
|
+
if not has_null_type:
|
|
272
|
+
# Add null type to anyOf
|
|
273
|
+
field_schema['anyOf'].append({'type': 'null'})
|
|
274
|
+
# Remove conflicting type field if it exists
|
|
275
|
+
if 'type' in field_schema:
|
|
276
|
+
del field_schema['type']
|
|
264
277
|
elif current_type:
|
|
265
278
|
if isinstance(current_type, str):
|
|
266
279
|
# Single type - convert to array with null
|