camel-ai 0.2.71a4__py3-none-any.whl → 0.2.71a5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +1482 -134
- camel/agents/repo_agent.py +2 -1
- camel/benchmarks/browsecomp.py +6 -6
- camel/logger.py +1 -1
- camel/messages/base.py +12 -1
- camel/models/azure_openai_model.py +96 -7
- camel/models/base_model.py +68 -10
- camel/models/deepseek_model.py +5 -0
- camel/models/gemini_model.py +5 -0
- camel/models/litellm_model.py +48 -16
- camel/models/model_manager.py +24 -6
- camel/models/openai_compatible_model.py +109 -5
- camel/models/openai_model.py +117 -8
- camel/societies/workforce/prompts.py +68 -5
- camel/societies/workforce/role_playing_worker.py +1 -0
- camel/societies/workforce/single_agent_worker.py +1 -0
- camel/societies/workforce/utils.py +67 -2
- camel/societies/workforce/workforce.py +270 -36
- camel/societies/workforce/workforce_logger.py +0 -8
- camel/tasks/task.py +2 -0
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/file_write_toolkit.py +526 -121
- camel/toolkits/message_agent_toolkit.py +608 -0
- {camel_ai-0.2.71a4.dist-info → camel_ai-0.2.71a5.dist-info}/METADATA +6 -4
- {camel_ai-0.2.71a4.dist-info → camel_ai-0.2.71a5.dist-info}/RECORD +28 -27
- {camel_ai-0.2.71a4.dist-info → camel_ai-0.2.71a5.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.71a4.dist-info → camel_ai-0.2.71a5.dist-info}/licenses/LICENSE +0 -0
|
@@ -17,6 +17,10 @@ from json import JSONDecodeError
|
|
|
17
17
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
18
18
|
|
|
19
19
|
from openai import AsyncOpenAI, AsyncStream, BadRequestError, OpenAI, Stream
|
|
20
|
+
from openai.lib.streaming.chat import (
|
|
21
|
+
AsyncChatCompletionStreamManager,
|
|
22
|
+
ChatCompletionStreamManager,
|
|
23
|
+
)
|
|
20
24
|
from pydantic import BaseModel, ValidationError
|
|
21
25
|
|
|
22
26
|
from camel.logger import get_logger
|
|
@@ -41,6 +45,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
41
45
|
from langfuse.decorators import observe
|
|
42
46
|
except ImportError:
|
|
43
47
|
from camel.utils import observe
|
|
48
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
49
|
+
try:
|
|
50
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
51
|
+
except ImportError:
|
|
52
|
+
from camel.utils import observe
|
|
44
53
|
else:
|
|
45
54
|
from camel.utils import observe
|
|
46
55
|
|
|
@@ -138,7 +147,11 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
138
147
|
messages: List[OpenAIMessage],
|
|
139
148
|
response_format: Optional[Type[BaseModel]] = None,
|
|
140
149
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
141
|
-
) -> Union[
|
|
150
|
+
) -> Union[
|
|
151
|
+
ChatCompletion,
|
|
152
|
+
Stream[ChatCompletionChunk],
|
|
153
|
+
ChatCompletionStreamManager[BaseModel],
|
|
154
|
+
]:
|
|
142
155
|
r"""Runs inference of OpenAI chat completion.
|
|
143
156
|
|
|
144
157
|
Args:
|
|
@@ -153,6 +166,8 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
153
166
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
154
167
|
`ChatCompletion` in the non-stream mode, or
|
|
155
168
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
169
|
+
`ChatCompletionStreamManager[BaseModel]` for
|
|
170
|
+
structured output streaming.
|
|
156
171
|
"""
|
|
157
172
|
|
|
158
173
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -170,10 +185,22 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
170
185
|
response_format = response_format or self.model_config_dict.get(
|
|
171
186
|
"response_format", None
|
|
172
187
|
)
|
|
188
|
+
|
|
189
|
+
# Check if streaming is enabled
|
|
190
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
191
|
+
|
|
173
192
|
if response_format:
|
|
174
193
|
result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
|
|
175
194
|
self._request_parse(messages, response_format, tools)
|
|
176
195
|
)
|
|
196
|
+
if is_streaming:
|
|
197
|
+
# Use streaming parse for structured output
|
|
198
|
+
return self._request_stream_parse(
|
|
199
|
+
messages, response_format, tools
|
|
200
|
+
)
|
|
201
|
+
else:
|
|
202
|
+
# Use non-streaming parse for structured output
|
|
203
|
+
return self._request_parse(messages, response_format, tools)
|
|
177
204
|
else:
|
|
178
205
|
result = self._request_chat_completion(messages, tools)
|
|
179
206
|
|
|
@@ -185,7 +212,11 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
185
212
|
messages: List[OpenAIMessage],
|
|
186
213
|
response_format: Optional[Type[BaseModel]] = None,
|
|
187
214
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
188
|
-
) -> Union[
|
|
215
|
+
) -> Union[
|
|
216
|
+
ChatCompletion,
|
|
217
|
+
AsyncStream[ChatCompletionChunk],
|
|
218
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
219
|
+
]:
|
|
189
220
|
r"""Runs inference of OpenAI chat completion in async mode.
|
|
190
221
|
|
|
191
222
|
Args:
|
|
@@ -197,9 +228,12 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
197
228
|
use for the request.
|
|
198
229
|
|
|
199
230
|
Returns:
|
|
200
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
201
|
-
|
|
202
|
-
`
|
|
231
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
|
|
232
|
+
AsyncChatCompletionStreamManager[BaseModel]]:
|
|
233
|
+
`ChatCompletion` in the non-stream mode,
|
|
234
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode,
|
|
235
|
+
or `AsyncChatCompletionStreamManager[BaseModel]` for
|
|
236
|
+
structured output streaming.
|
|
203
237
|
"""
|
|
204
238
|
|
|
205
239
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -217,10 +251,24 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
217
251
|
response_format = response_format or self.model_config_dict.get(
|
|
218
252
|
"response_format", None
|
|
219
253
|
)
|
|
254
|
+
|
|
255
|
+
# Check if streaming is enabled
|
|
256
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
257
|
+
|
|
220
258
|
if response_format:
|
|
221
259
|
result: Union[
|
|
222
260
|
ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
223
261
|
] = await self._arequest_parse(messages, response_format, tools)
|
|
262
|
+
if is_streaming:
|
|
263
|
+
# Use streaming parse for structured output
|
|
264
|
+
return await self._arequest_stream_parse(
|
|
265
|
+
messages, response_format, tools
|
|
266
|
+
)
|
|
267
|
+
else:
|
|
268
|
+
# Use non-streaming parse for structured output
|
|
269
|
+
return await self._arequest_parse(
|
|
270
|
+
messages, response_format, tools
|
|
271
|
+
)
|
|
224
272
|
else:
|
|
225
273
|
result = await self._arequest_chat_completion(messages, tools)
|
|
226
274
|
|
|
@@ -336,6 +384,62 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
336
384
|
logger.error(f"Fallback attempt also failed: {e}")
|
|
337
385
|
raise
|
|
338
386
|
|
|
387
|
+
def _request_stream_parse(
|
|
388
|
+
self,
|
|
389
|
+
messages: List[OpenAIMessage],
|
|
390
|
+
response_format: Type[BaseModel],
|
|
391
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
392
|
+
) -> ChatCompletionStreamManager[BaseModel]:
|
|
393
|
+
r"""Request streaming structured output parsing.
|
|
394
|
+
|
|
395
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
396
|
+
"""
|
|
397
|
+
import copy
|
|
398
|
+
|
|
399
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
400
|
+
|
|
401
|
+
# Remove stream from config as it's handled by the stream method
|
|
402
|
+
request_config.pop("stream", None)
|
|
403
|
+
|
|
404
|
+
if tools is not None:
|
|
405
|
+
request_config["tools"] = tools
|
|
406
|
+
|
|
407
|
+
# Use the beta streaming API for structured outputs
|
|
408
|
+
return self._client.beta.chat.completions.stream(
|
|
409
|
+
messages=messages,
|
|
410
|
+
model=self.model_type,
|
|
411
|
+
response_format=response_format,
|
|
412
|
+
**request_config,
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
async def _arequest_stream_parse(
|
|
416
|
+
self,
|
|
417
|
+
messages: List[OpenAIMessage],
|
|
418
|
+
response_format: Type[BaseModel],
|
|
419
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
420
|
+
) -> AsyncChatCompletionStreamManager[BaseModel]:
|
|
421
|
+
r"""Request async streaming structured output parsing.
|
|
422
|
+
|
|
423
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
424
|
+
"""
|
|
425
|
+
import copy
|
|
426
|
+
|
|
427
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
428
|
+
|
|
429
|
+
# Remove stream from config as it's handled by the stream method
|
|
430
|
+
request_config.pop("stream", None)
|
|
431
|
+
|
|
432
|
+
if tools is not None:
|
|
433
|
+
request_config["tools"] = tools
|
|
434
|
+
|
|
435
|
+
# Use the beta streaming API for structured outputs
|
|
436
|
+
return self._async_client.beta.chat.completions.stream(
|
|
437
|
+
messages=messages,
|
|
438
|
+
model=self.model_type,
|
|
439
|
+
response_format=response_format,
|
|
440
|
+
**request_config,
|
|
441
|
+
)
|
|
442
|
+
|
|
339
443
|
@property
|
|
340
444
|
def token_counter(self) -> BaseTokenCounter:
|
|
341
445
|
r"""Initialize the token counter for the model backend.
|
camel/models/openai_model.py
CHANGED
|
@@ -16,6 +16,10 @@ import warnings
|
|
|
16
16
|
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
17
|
|
|
18
18
|
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
19
|
+
from openai.lib.streaming.chat import (
|
|
20
|
+
AsyncChatCompletionStreamManager,
|
|
21
|
+
ChatCompletionStreamManager,
|
|
22
|
+
)
|
|
19
23
|
from pydantic import BaseModel
|
|
20
24
|
|
|
21
25
|
from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
|
|
@@ -40,6 +44,11 @@ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
|
|
|
40
44
|
from langfuse.decorators import observe
|
|
41
45
|
except ImportError:
|
|
42
46
|
from camel.utils import observe
|
|
47
|
+
elif os.environ.get("TRACEROOT_ENABLED", "False").lower() == "true":
|
|
48
|
+
try:
|
|
49
|
+
from traceroot import trace as observe # type: ignore[import]
|
|
50
|
+
except ImportError:
|
|
51
|
+
from camel.utils import observe
|
|
43
52
|
else:
|
|
44
53
|
from camel.utils import observe
|
|
45
54
|
|
|
@@ -238,7 +247,11 @@ class OpenAIModel(BaseModelBackend):
|
|
|
238
247
|
messages: List[OpenAIMessage],
|
|
239
248
|
response_format: Optional[Type[BaseModel]] = None,
|
|
240
249
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
241
|
-
) -> Union[
|
|
250
|
+
) -> Union[
|
|
251
|
+
ChatCompletion,
|
|
252
|
+
Stream[ChatCompletionChunk],
|
|
253
|
+
ChatCompletionStreamManager[BaseModel],
|
|
254
|
+
]:
|
|
242
255
|
r"""Runs inference of OpenAI chat completion.
|
|
243
256
|
|
|
244
257
|
Args:
|
|
@@ -250,9 +263,12 @@ class OpenAIModel(BaseModelBackend):
|
|
|
250
263
|
use for the request.
|
|
251
264
|
|
|
252
265
|
Returns:
|
|
253
|
-
Union[ChatCompletion, Stream[ChatCompletionChunk]
|
|
254
|
-
|
|
255
|
-
`
|
|
266
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk],
|
|
267
|
+
ChatCompletionStreamManager[BaseModel]]:
|
|
268
|
+
`ChatCompletion` in the non-stream mode,
|
|
269
|
+
`Stream[ChatCompletionChunk]`in the stream mode,
|
|
270
|
+
or `ChatCompletionStreamManager[BaseModel]` for
|
|
271
|
+
structured output streaming.
|
|
256
272
|
"""
|
|
257
273
|
|
|
258
274
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -273,10 +289,22 @@ class OpenAIModel(BaseModelBackend):
|
|
|
273
289
|
response_format = response_format or self.model_config_dict.get(
|
|
274
290
|
"response_format", None
|
|
275
291
|
)
|
|
292
|
+
|
|
293
|
+
# Check if streaming is enabled
|
|
294
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
295
|
+
|
|
276
296
|
if response_format:
|
|
277
297
|
result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
|
|
278
298
|
self._request_parse(messages, response_format, tools)
|
|
279
299
|
)
|
|
300
|
+
if is_streaming:
|
|
301
|
+
# Use streaming parse for structured output
|
|
302
|
+
return self._request_stream_parse(
|
|
303
|
+
messages, response_format, tools
|
|
304
|
+
)
|
|
305
|
+
else:
|
|
306
|
+
# Use non-streaming parse for structured output
|
|
307
|
+
return self._request_parse(messages, response_format, tools)
|
|
280
308
|
else:
|
|
281
309
|
result = self._request_chat_completion(messages, tools)
|
|
282
310
|
|
|
@@ -288,7 +316,11 @@ class OpenAIModel(BaseModelBackend):
|
|
|
288
316
|
messages: List[OpenAIMessage],
|
|
289
317
|
response_format: Optional[Type[BaseModel]] = None,
|
|
290
318
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
291
|
-
) -> Union[
|
|
319
|
+
) -> Union[
|
|
320
|
+
ChatCompletion,
|
|
321
|
+
AsyncStream[ChatCompletionChunk],
|
|
322
|
+
AsyncChatCompletionStreamManager[BaseModel],
|
|
323
|
+
]:
|
|
292
324
|
r"""Runs inference of OpenAI chat completion in async mode.
|
|
293
325
|
|
|
294
326
|
Args:
|
|
@@ -300,9 +332,12 @@ class OpenAIModel(BaseModelBackend):
|
|
|
300
332
|
use for the request.
|
|
301
333
|
|
|
302
334
|
Returns:
|
|
303
|
-
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
304
|
-
|
|
305
|
-
`
|
|
335
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk],
|
|
336
|
+
AsyncChatCompletionStreamManager[BaseModel]]:
|
|
337
|
+
`ChatCompletion` in the non-stream mode,
|
|
338
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode, or
|
|
339
|
+
`AsyncChatCompletionStreamManager[BaseModel]` for
|
|
340
|
+
structured output streaming.
|
|
306
341
|
"""
|
|
307
342
|
|
|
308
343
|
# Update Langfuse trace with current agent session and metadata
|
|
@@ -323,10 +358,24 @@ class OpenAIModel(BaseModelBackend):
|
|
|
323
358
|
response_format = response_format or self.model_config_dict.get(
|
|
324
359
|
"response_format", None
|
|
325
360
|
)
|
|
361
|
+
|
|
362
|
+
# Check if streaming is enabled
|
|
363
|
+
is_streaming = self.model_config_dict.get("stream", False)
|
|
364
|
+
|
|
326
365
|
if response_format:
|
|
327
366
|
result: Union[
|
|
328
367
|
ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
329
368
|
] = await self._arequest_parse(messages, response_format, tools)
|
|
369
|
+
if is_streaming:
|
|
370
|
+
# Use streaming parse for structured output
|
|
371
|
+
return await self._arequest_stream_parse(
|
|
372
|
+
messages, response_format, tools
|
|
373
|
+
)
|
|
374
|
+
else:
|
|
375
|
+
# Use non-streaming parse for structured output
|
|
376
|
+
return await self._arequest_parse(
|
|
377
|
+
messages, response_format, tools
|
|
378
|
+
)
|
|
330
379
|
else:
|
|
331
380
|
result = await self._arequest_chat_completion(messages, tools)
|
|
332
381
|
|
|
@@ -422,6 +471,66 @@ class OpenAIModel(BaseModelBackend):
|
|
|
422
471
|
**request_config,
|
|
423
472
|
)
|
|
424
473
|
|
|
474
|
+
def _request_stream_parse(
|
|
475
|
+
self,
|
|
476
|
+
messages: List[OpenAIMessage],
|
|
477
|
+
response_format: Type[BaseModel],
|
|
478
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
479
|
+
) -> ChatCompletionStreamManager[BaseModel]:
|
|
480
|
+
r"""Request streaming structured output parsing.
|
|
481
|
+
|
|
482
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
483
|
+
"""
|
|
484
|
+
import copy
|
|
485
|
+
|
|
486
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
487
|
+
|
|
488
|
+
# Remove stream from config as it's handled by the stream method
|
|
489
|
+
request_config.pop("stream", None)
|
|
490
|
+
|
|
491
|
+
if tools is not None:
|
|
492
|
+
request_config["tools"] = tools
|
|
493
|
+
|
|
494
|
+
request_config = self._sanitize_config(request_config)
|
|
495
|
+
|
|
496
|
+
# Use the beta streaming API for structured outputs
|
|
497
|
+
return self._client.beta.chat.completions.stream(
|
|
498
|
+
messages=messages,
|
|
499
|
+
model=self.model_type,
|
|
500
|
+
response_format=response_format,
|
|
501
|
+
**request_config,
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
async def _arequest_stream_parse(
|
|
505
|
+
self,
|
|
506
|
+
messages: List[OpenAIMessage],
|
|
507
|
+
response_format: Type[BaseModel],
|
|
508
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
509
|
+
) -> AsyncChatCompletionStreamManager[BaseModel]:
|
|
510
|
+
r"""Request async streaming structured output parsing.
|
|
511
|
+
|
|
512
|
+
Note: This uses OpenAI's beta streaming API for structured outputs.
|
|
513
|
+
"""
|
|
514
|
+
import copy
|
|
515
|
+
|
|
516
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
517
|
+
|
|
518
|
+
# Remove stream from config as it's handled by the stream method
|
|
519
|
+
request_config.pop("stream", None)
|
|
520
|
+
|
|
521
|
+
if tools is not None:
|
|
522
|
+
request_config["tools"] = tools
|
|
523
|
+
|
|
524
|
+
request_config = self._sanitize_config(request_config)
|
|
525
|
+
|
|
526
|
+
# Use the beta streaming API for structured outputs
|
|
527
|
+
return self._async_client.beta.chat.completions.stream(
|
|
528
|
+
messages=messages,
|
|
529
|
+
model=self.model_type,
|
|
530
|
+
response_format=response_format,
|
|
531
|
+
**request_config,
|
|
532
|
+
)
|
|
533
|
+
|
|
425
534
|
def check_model_config(self):
|
|
426
535
|
r"""Check whether the model configuration contains any
|
|
427
536
|
unexpected arguments to OpenAI API.
|
|
@@ -65,7 +65,8 @@ Example valid response:
|
|
|
65
65
|
"assignments": [
|
|
66
66
|
{{"task_id": "task_1", "assignee_id": "node_12345", "dependencies": []}},
|
|
67
67
|
{{"task_id": "task_2", "assignee_id": "node_67890", "dependencies": ["task_1"]}},
|
|
68
|
-
{{"task_id": "task_3", "assignee_id": "node_12345", "dependencies": []}}
|
|
68
|
+
{{"task_id": "task_3", "assignee_id": "node_12345", "dependencies": []}},
|
|
69
|
+
{{"task_id": "task_4", "assignee_id": "node_67890", "dependencies": ["task_1", "task_2"]}}
|
|
69
70
|
]
|
|
70
71
|
}}
|
|
71
72
|
|
|
@@ -96,6 +97,11 @@ Please keep in mind the task you are going to process, the content of the task t
|
|
|
96
97
|
{content}
|
|
97
98
|
==============================
|
|
98
99
|
|
|
100
|
+
Here is the content of the parent task for you to refer to:
|
|
101
|
+
==============================
|
|
102
|
+
{parent_task_content}
|
|
103
|
+
==============================
|
|
104
|
+
|
|
99
105
|
Here are results of some prerequisite tasks that you can refer to:
|
|
100
106
|
|
|
101
107
|
==============================
|
|
@@ -126,16 +132,22 @@ concluding remarks, explanations, or any other text outside the JSON structure i
|
|
|
126
132
|
|
|
127
133
|
ROLEPLAY_PROCESS_TASK_PROMPT = TextPrompt(
|
|
128
134
|
"""You need to process the task. It is recommended that tools be actively called when needed.
|
|
129
|
-
|
|
135
|
+
|
|
136
|
+
The content of the task that you need to do is:
|
|
130
137
|
|
|
131
138
|
==============================
|
|
132
|
-
{
|
|
139
|
+
{content}
|
|
133
140
|
==============================
|
|
134
141
|
|
|
135
|
-
|
|
142
|
+
Here is the content of the parent task for you to refer to:
|
|
143
|
+
==============================
|
|
144
|
+
{parent_task_content}
|
|
145
|
+
==============================
|
|
146
|
+
|
|
147
|
+
Here are results of some prerequisite tasks that you can refer to:
|
|
136
148
|
|
|
137
149
|
==============================
|
|
138
|
-
{
|
|
150
|
+
{dependency_task_info}
|
|
139
151
|
==============================
|
|
140
152
|
|
|
141
153
|
Here are some additional information about the task:
|
|
@@ -254,3 +266,54 @@ Each subtask should be:
|
|
|
254
266
|
- Contain all sequential steps that should be performed by the same worker type
|
|
255
267
|
- Only separated from other subtasks when parallel execution by different worker types is beneficial
|
|
256
268
|
"""
|
|
269
|
+
|
|
270
|
+
FAILURE_ANALYSIS_PROMPT = TextPrompt(
|
|
271
|
+
"""You need to analyze a task failure and decide on the best recovery strategy.
|
|
272
|
+
|
|
273
|
+
**TASK FAILURE DETAILS:**
|
|
274
|
+
Task ID: {task_id}
|
|
275
|
+
Task Content: {task_content}
|
|
276
|
+
Failure Count: {failure_count}/3
|
|
277
|
+
Error Message: {error_message}
|
|
278
|
+
Worker ID: {worker_id}
|
|
279
|
+
Task Depth: {task_depth}
|
|
280
|
+
Additional Info: {additional_info}
|
|
281
|
+
|
|
282
|
+
**AVAILABLE RECOVERY STRATEGIES:**
|
|
283
|
+
|
|
284
|
+
1. **RETRY**: Attempt the same task again without changes
|
|
285
|
+
- Use for: Network errors, temporary API issues, random failures
|
|
286
|
+
- Avoid for: Fundamental task misunderstanding, capability gaps
|
|
287
|
+
|
|
288
|
+
2. **REPLAN**: Modify the task content to address the underlying issue
|
|
289
|
+
- Use for: Unclear requirements, insufficient context, correctable errors
|
|
290
|
+
- Provide: Modified task content that addresses the failure cause
|
|
291
|
+
|
|
292
|
+
3. **DECOMPOSE**: Break the task into smaller, more manageable subtasks
|
|
293
|
+
- Use for: Complex tasks, capability mismatches, persistent failures
|
|
294
|
+
- Consider: Whether the task is too complex for a single worker
|
|
295
|
+
|
|
296
|
+
4. **CREATE_WORKER**: Create a new worker node to handle the task
|
|
297
|
+
- Use for: Fundamental task misunderstanding, capability gaps
|
|
298
|
+
|
|
299
|
+
**ANALYSIS GUIDELINES:**
|
|
300
|
+
|
|
301
|
+
- **Connection/Network Errors**: Almost always choose RETRY
|
|
302
|
+
- **Model Processing Errors**: Consider REPLAN if the task can be clarified, otherwise DECOMPOSE
|
|
303
|
+
- **Capability Gaps**: Choose DECOMPOSE to break into simpler parts
|
|
304
|
+
- **Ambiguous Requirements**: Choose REPLAN with clearer instructions
|
|
305
|
+
- **High Failure Count**: Lean towards DECOMPOSE rather than repeated retries
|
|
306
|
+
- **Deep Tasks (depth > 2)**: Prefer RETRY or REPLAN over further decomposition
|
|
307
|
+
|
|
308
|
+
**RESPONSE FORMAT:**
|
|
309
|
+
You must return a valid JSON object with these fields:
|
|
310
|
+
- "strategy": one of "retry", "replan", or "decompose"
|
|
311
|
+
- "reasoning": explanation for your choice (1-2 sentences)
|
|
312
|
+
- "modified_task_content": new task content if strategy is "replan", null otherwise
|
|
313
|
+
|
|
314
|
+
**Example Response:**
|
|
315
|
+
{{"strategy": "retry", "reasoning": "The connection error appears to be temporary and network-related, a simple retry should resolve this.", "modified_task_content": null}}
|
|
316
|
+
|
|
317
|
+
**CRITICAL**: Return ONLY the JSON object. No explanations or text outside the JSON structure.
|
|
318
|
+
"""
|
|
319
|
+
)
|
|
@@ -104,6 +104,7 @@ class RolePlayingWorker(Worker):
|
|
|
104
104
|
dependency_tasks_info = self._get_dep_tasks_info(dependencies)
|
|
105
105
|
prompt = ROLEPLAY_PROCESS_TASK_PROMPT.format(
|
|
106
106
|
content=task.content,
|
|
107
|
+
parent_task_content=task.parent.content if task.parent else "",
|
|
107
108
|
dependency_task_info=dependency_tasks_info,
|
|
108
109
|
additional_info=task.additional_info,
|
|
109
110
|
)
|
|
@@ -273,6 +273,7 @@ class SingleAgentWorker(Worker):
|
|
|
273
273
|
dependency_tasks_info = self._get_dep_tasks_info(dependencies)
|
|
274
274
|
prompt = PROCESS_TASK_PROMPT.format(
|
|
275
275
|
content=task.content,
|
|
276
|
+
parent_task_content=task.parent.content if task.parent else "",
|
|
276
277
|
dependency_tasks_info=dependency_tasks_info,
|
|
277
278
|
additional_info=task.additional_info,
|
|
278
279
|
)
|
|
@@ -11,10 +11,11 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from enum import Enum
|
|
14
15
|
from functools import wraps
|
|
15
|
-
from typing import Callable, List
|
|
16
|
+
from typing import Callable, List, Optional
|
|
16
17
|
|
|
17
|
-
from pydantic import BaseModel, Field
|
|
18
|
+
from pydantic import BaseModel, Field, field_validator
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
class WorkerConf(BaseModel):
|
|
@@ -54,6 +55,23 @@ class TaskAssignment(BaseModel):
|
|
|
54
55
|
"This is critical for the task decomposition and execution.",
|
|
55
56
|
)
|
|
56
57
|
|
|
58
|
+
# Allow LLMs to output dependencies as a comma-separated string or empty
|
|
59
|
+
# string. This validator converts such cases into a list[str] so that
|
|
60
|
+
# downstream logic does not break with validation errors.
|
|
61
|
+
@staticmethod
|
|
62
|
+
def _split_and_strip(dep_str: str) -> List[str]:
|
|
63
|
+
r"""Utility to split a comma separated string and strip whitespace."""
|
|
64
|
+
return [d.strip() for d in dep_str.split(',') if d.strip()]
|
|
65
|
+
|
|
66
|
+
@field_validator("dependencies", mode="before")
|
|
67
|
+
def validate_dependencies(cls, v) -> List[str]:
|
|
68
|
+
if v is None:
|
|
69
|
+
return []
|
|
70
|
+
# Handle empty string or comma-separated string from LLM
|
|
71
|
+
if isinstance(v, str):
|
|
72
|
+
return TaskAssignment._split_and_strip(v)
|
|
73
|
+
return v
|
|
74
|
+
|
|
57
75
|
|
|
58
76
|
class TaskAssignResult(BaseModel):
|
|
59
77
|
r"""The result of task assignment for both single and batch assignments."""
|
|
@@ -63,6 +81,53 @@ class TaskAssignResult(BaseModel):
|
|
|
63
81
|
)
|
|
64
82
|
|
|
65
83
|
|
|
84
|
+
class RecoveryStrategy(str, Enum):
|
|
85
|
+
r"""Strategies for handling failed tasks."""
|
|
86
|
+
|
|
87
|
+
RETRY = "retry"
|
|
88
|
+
REPLAN = "replan"
|
|
89
|
+
DECOMPOSE = "decompose"
|
|
90
|
+
CREATE_WORKER = "create_worker"
|
|
91
|
+
|
|
92
|
+
def __str__(self):
|
|
93
|
+
return self.value
|
|
94
|
+
|
|
95
|
+
def __repr__(self):
|
|
96
|
+
return f"RecoveryStrategy.{self.name}"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class FailureContext(BaseModel):
|
|
100
|
+
r"""Context information about a task failure."""
|
|
101
|
+
|
|
102
|
+
task_id: str = Field(description="ID of the failed task")
|
|
103
|
+
task_content: str = Field(description="Content of the failed task")
|
|
104
|
+
failure_count: int = Field(
|
|
105
|
+
description="Number of times this task has failed"
|
|
106
|
+
)
|
|
107
|
+
error_message: str = Field(description="Detailed error message")
|
|
108
|
+
worker_id: Optional[str] = Field(
|
|
109
|
+
default=None, description="ID of the worker that failed"
|
|
110
|
+
)
|
|
111
|
+
task_depth: int = Field(
|
|
112
|
+
description="Depth of the task in the decomposition hierarchy"
|
|
113
|
+
)
|
|
114
|
+
additional_info: Optional[str] = Field(
|
|
115
|
+
default=None, description="Additional context about the task"
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class RecoveryDecision(BaseModel):
|
|
120
|
+
r"""Decision on how to recover from a task failure."""
|
|
121
|
+
|
|
122
|
+
strategy: RecoveryStrategy = Field(
|
|
123
|
+
description="The chosen recovery strategy"
|
|
124
|
+
)
|
|
125
|
+
reasoning: str = Field(description="Explanation for the chosen strategy")
|
|
126
|
+
modified_task_content: Optional[str] = Field(
|
|
127
|
+
default=None, description="Modified task content if strategy is REPLAN"
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
66
131
|
def check_if_running(
|
|
67
132
|
running: bool,
|
|
68
133
|
max_retries: int = 3,
|