camel-ai 0.2.0__py3-none-any.whl → 0.2.1a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +246 -59
- camel/loaders/firecrawl_reader.py +11 -43
- camel/models/mistral_model.py +1 -1
- camel/tasks/task.py +11 -4
- camel/tasks/task_prompt.py +4 -0
- camel/utils/commons.py +8 -1
- camel/workforce/__init__.py +6 -6
- camel/workforce/base.py +9 -5
- camel/workforce/prompts.py +175 -0
- camel/workforce/role_playing_worker.py +181 -0
- camel/workforce/{single_agent_node.py → single_agent_worker.py} +49 -22
- camel/workforce/task_channel.py +3 -5
- camel/workforce/utils.py +20 -50
- camel/workforce/{worker_node.py → worker.py} +15 -12
- camel/workforce/workforce.py +456 -19
- {camel_ai-0.2.0.dist-info → camel_ai-0.2.1a0.dist-info}/METADATA +2 -2
- {camel_ai-0.2.0.dist-info → camel_ai-0.2.1a0.dist-info}/RECORD +19 -20
- camel/workforce/manager_node.py +0 -299
- camel/workforce/role_playing_node.py +0 -168
- camel/workforce/workforce_prompt.py +0 -125
- {camel_ai-0.2.0.dist-info → camel_ai-0.2.1a0.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -15,6 +15,8 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
17
|
import logging
|
|
18
|
+
import re
|
|
19
|
+
import uuid
|
|
18
20
|
from collections import defaultdict
|
|
19
21
|
from typing import (
|
|
20
22
|
TYPE_CHECKING,
|
|
@@ -28,6 +30,7 @@ from typing import (
|
|
|
28
30
|
)
|
|
29
31
|
|
|
30
32
|
from openai.types.chat import ChatCompletionMessageToolCall
|
|
33
|
+
from openai.types.chat.chat_completion_message_tool_call import Function
|
|
31
34
|
from pydantic import BaseModel
|
|
32
35
|
|
|
33
36
|
from camel.agents.base import BaseAgent
|
|
@@ -190,7 +193,7 @@ class ChatAgent(BaseAgent):
|
|
|
190
193
|
tool.get_openai_tool_schema() for tool in all_tools
|
|
191
194
|
]
|
|
192
195
|
self.model_backend.model_config_dict['tools'] = tool_schema_list
|
|
193
|
-
|
|
196
|
+
self.tool_schema_list = tool_schema_list
|
|
194
197
|
self.model_config_dict = self.model_backend.model_config_dict
|
|
195
198
|
|
|
196
199
|
self.model_token_limit = token_limit or self.model_backend.token_limit
|
|
@@ -206,6 +209,56 @@ class ChatAgent(BaseAgent):
|
|
|
206
209
|
self.response_terminators = response_terminators or []
|
|
207
210
|
self.init_messages()
|
|
208
211
|
|
|
212
|
+
# ruff: noqa: E501
|
|
213
|
+
def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
|
|
214
|
+
tool_prompts = []
|
|
215
|
+
|
|
216
|
+
for tool in tool_schema_list:
|
|
217
|
+
tool_info = tool['function']
|
|
218
|
+
tool_name = tool_info['name']
|
|
219
|
+
tool_description = tool_info['description']
|
|
220
|
+
tool_json = json.dumps(tool_info, indent=4)
|
|
221
|
+
|
|
222
|
+
prompt = f"Use the function '{tool_name}' to '{tool_description}':\n{tool_json}\n"
|
|
223
|
+
tool_prompts.append(prompt)
|
|
224
|
+
|
|
225
|
+
tool_prompt_str = "\n".join(tool_prompts)
|
|
226
|
+
|
|
227
|
+
final_prompt = f'''
|
|
228
|
+
# Tool prompt
|
|
229
|
+
TOOL_PROMPT = f"""
|
|
230
|
+
You have access to the following functions:
|
|
231
|
+
|
|
232
|
+
{tool_prompt_str}
|
|
233
|
+
|
|
234
|
+
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
|
235
|
+
|
|
236
|
+
<function=example_function_name>{{"example_name": "example_value"}}</function>
|
|
237
|
+
|
|
238
|
+
Reminder:
|
|
239
|
+
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
|
240
|
+
- Required parameters MUST be specified
|
|
241
|
+
- Only call one function at a time
|
|
242
|
+
- Put the entire function call reply on one line
|
|
243
|
+
- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls
|
|
244
|
+
"""
|
|
245
|
+
'''
|
|
246
|
+
return final_prompt
|
|
247
|
+
|
|
248
|
+
def _parse_tool_response(self, response: str):
|
|
249
|
+
function_regex = r"<function=(\w+)>(.*?)</function>"
|
|
250
|
+
match = re.search(function_regex, response)
|
|
251
|
+
|
|
252
|
+
if match:
|
|
253
|
+
function_name, args_string = match.groups()
|
|
254
|
+
try:
|
|
255
|
+
args = json.loads(args_string)
|
|
256
|
+
return {"function": function_name, "arguments": args}
|
|
257
|
+
except json.JSONDecodeError as error:
|
|
258
|
+
print(f"Error parsing function arguments: {error}")
|
|
259
|
+
return None
|
|
260
|
+
return None
|
|
261
|
+
|
|
209
262
|
def reset(self):
|
|
210
263
|
r"""Resets the :obj:`ChatAgent` to its initial state and returns the
|
|
211
264
|
stored messages.
|
|
@@ -367,89 +420,221 @@ class ChatAgent(BaseAgent):
|
|
|
367
420
|
a boolean indicating whether the chat session has terminated,
|
|
368
421
|
and information about the chat session.
|
|
369
422
|
"""
|
|
370
|
-
|
|
423
|
+
if (
|
|
424
|
+
isinstance(self.model_type, ModelType)
|
|
425
|
+
and "lama" in self.model_type.value
|
|
426
|
+
or isinstance(self.model_type, str)
|
|
427
|
+
and "lama" in self.model_type
|
|
428
|
+
):
|
|
429
|
+
if self.model_backend.model_config_dict['tools']:
|
|
430
|
+
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
|
431
|
+
|
|
432
|
+
tool_sys_msg = BaseMessage.make_assistant_message(
|
|
433
|
+
role_name="Assistant",
|
|
434
|
+
content=tool_prompt,
|
|
435
|
+
)
|
|
371
436
|
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
437
|
+
self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
|
|
438
|
+
|
|
439
|
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
440
|
+
|
|
441
|
+
tool_call_records: List[FunctionCallingRecord] = []
|
|
442
|
+
while True:
|
|
443
|
+
# Check if token has exceeded
|
|
444
|
+
try:
|
|
445
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
446
|
+
except RuntimeError as e:
|
|
447
|
+
return self._step_token_exceed(
|
|
448
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
(
|
|
452
|
+
response,
|
|
453
|
+
output_messages,
|
|
454
|
+
finish_reasons,
|
|
455
|
+
usage_dict,
|
|
456
|
+
response_id,
|
|
457
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
458
|
+
# If the model response is not a function call, meaning the
|
|
459
|
+
# model has generated a message response, break the loop
|
|
460
|
+
if (
|
|
461
|
+
not self.is_tools_added()
|
|
462
|
+
or not isinstance(response, ChatCompletion)
|
|
463
|
+
or "</function>" not in response.choices[0].message.content # type: ignore[operator]
|
|
464
|
+
):
|
|
465
|
+
break
|
|
466
|
+
|
|
467
|
+
parsed_content = self._parse_tool_response(
|
|
468
|
+
response.choices[0].message.content # type: ignore[arg-type]
|
|
380
469
|
)
|
|
381
470
|
|
|
382
|
-
|
|
383
|
-
|
|
471
|
+
response.choices[0].message.tool_calls = [
|
|
472
|
+
ChatCompletionMessageToolCall(
|
|
473
|
+
id=str(uuid.uuid4()),
|
|
474
|
+
function=Function(
|
|
475
|
+
arguments=str(parsed_content["arguments"]).replace(
|
|
476
|
+
"'", '"'
|
|
477
|
+
),
|
|
478
|
+
name=str(parsed_content["function"]),
|
|
479
|
+
),
|
|
480
|
+
type="function",
|
|
481
|
+
)
|
|
482
|
+
]
|
|
483
|
+
|
|
484
|
+
# Check for external tool call
|
|
485
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
486
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
487
|
+
# if model calls an external tool, directly return the
|
|
488
|
+
# request
|
|
489
|
+
info = self._step_get_info(
|
|
490
|
+
output_messages,
|
|
491
|
+
finish_reasons,
|
|
492
|
+
usage_dict,
|
|
493
|
+
response_id,
|
|
494
|
+
tool_call_records,
|
|
495
|
+
num_tokens,
|
|
496
|
+
tool_call_request,
|
|
497
|
+
)
|
|
498
|
+
return ChatAgentResponse(
|
|
499
|
+
msgs=output_messages,
|
|
500
|
+
terminated=self.terminated,
|
|
501
|
+
info=info,
|
|
502
|
+
)
|
|
503
|
+
|
|
504
|
+
# Normal function calling
|
|
505
|
+
tool_call_records.append(
|
|
506
|
+
self._step_tool_call_and_update(response)
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
if (
|
|
510
|
+
output_schema is not None
|
|
511
|
+
and self.model_type.supports_tool_calling
|
|
512
|
+
):
|
|
513
|
+
(
|
|
514
|
+
output_messages,
|
|
515
|
+
finish_reasons,
|
|
516
|
+
usage_dict,
|
|
517
|
+
response_id,
|
|
518
|
+
tool_call,
|
|
519
|
+
num_tokens,
|
|
520
|
+
) = self._structure_output_with_function(output_schema)
|
|
521
|
+
tool_call_records.append(tool_call)
|
|
522
|
+
|
|
523
|
+
info = self._step_get_info(
|
|
384
524
|
output_messages,
|
|
385
525
|
finish_reasons,
|
|
386
526
|
usage_dict,
|
|
387
527
|
response_id,
|
|
388
|
-
|
|
528
|
+
tool_call_records,
|
|
529
|
+
num_tokens,
|
|
530
|
+
)
|
|
389
531
|
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
532
|
+
if len(output_messages) == 1:
|
|
533
|
+
# Auto record if the output result is a single message
|
|
534
|
+
self.record_message(output_messages[0])
|
|
535
|
+
else:
|
|
536
|
+
logger.warning(
|
|
537
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
538
|
+
"recorded automatically. Please call `record_message()` "
|
|
539
|
+
"to record the selected message manually."
|
|
540
|
+
)
|
|
398
541
|
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
542
|
+
return ChatAgentResponse(
|
|
543
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
544
|
+
)
|
|
545
|
+
|
|
546
|
+
else:
|
|
547
|
+
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
548
|
+
|
|
549
|
+
tool_call_records: List[FunctionCallingRecord] = [] # type: ignore[no-redef]
|
|
550
|
+
while True:
|
|
551
|
+
# Check if token has exceeded
|
|
552
|
+
try:
|
|
553
|
+
openai_messages, num_tokens = self.memory.get_context()
|
|
554
|
+
except RuntimeError as e:
|
|
555
|
+
return self._step_token_exceed(
|
|
556
|
+
e.args[1], tool_call_records, "max_tokens_exceeded"
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
(
|
|
560
|
+
response,
|
|
404
561
|
output_messages,
|
|
405
562
|
finish_reasons,
|
|
406
563
|
usage_dict,
|
|
407
564
|
response_id,
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
565
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
566
|
+
# If the model response is not a function call, meaning the
|
|
567
|
+
# model has generated a message response, break the loop
|
|
568
|
+
if (
|
|
569
|
+
not self.is_tools_added()
|
|
570
|
+
or not isinstance(response, ChatCompletion)
|
|
571
|
+
or response.choices[0].message.tool_calls is None
|
|
572
|
+
):
|
|
573
|
+
break
|
|
574
|
+
|
|
575
|
+
# Check for external tool call
|
|
576
|
+
tool_call_request = response.choices[0].message.tool_calls[0]
|
|
577
|
+
|
|
578
|
+
if tool_call_request.function.name in self.external_tool_names:
|
|
579
|
+
# if model calls an external tool, directly return the
|
|
580
|
+
# request
|
|
581
|
+
info = self._step_get_info(
|
|
582
|
+
output_messages,
|
|
583
|
+
finish_reasons,
|
|
584
|
+
usage_dict,
|
|
585
|
+
response_id,
|
|
586
|
+
tool_call_records,
|
|
587
|
+
num_tokens,
|
|
588
|
+
tool_call_request,
|
|
589
|
+
)
|
|
590
|
+
return ChatAgentResponse(
|
|
591
|
+
msgs=output_messages,
|
|
592
|
+
terminated=self.terminated,
|
|
593
|
+
info=info,
|
|
594
|
+
)
|
|
595
|
+
|
|
596
|
+
# Normal function calling
|
|
597
|
+
tool_call_records.append(
|
|
598
|
+
self._step_tool_call_and_update(response)
|
|
414
599
|
)
|
|
415
600
|
|
|
416
|
-
|
|
417
|
-
|
|
601
|
+
if (
|
|
602
|
+
output_schema is not None
|
|
603
|
+
and self.model_type.supports_tool_calling
|
|
604
|
+
):
|
|
605
|
+
(
|
|
606
|
+
output_messages,
|
|
607
|
+
finish_reasons,
|
|
608
|
+
usage_dict,
|
|
609
|
+
response_id,
|
|
610
|
+
tool_call,
|
|
611
|
+
num_tokens,
|
|
612
|
+
) = self._structure_output_with_function(output_schema)
|
|
613
|
+
tool_call_records.append(tool_call)
|
|
418
614
|
|
|
419
|
-
|
|
420
|
-
(
|
|
615
|
+
info = self._step_get_info(
|
|
421
616
|
output_messages,
|
|
422
617
|
finish_reasons,
|
|
423
618
|
usage_dict,
|
|
424
619
|
response_id,
|
|
425
|
-
|
|
620
|
+
tool_call_records,
|
|
426
621
|
num_tokens,
|
|
427
|
-
)
|
|
428
|
-
tool_call_records.append(tool_call)
|
|
622
|
+
)
|
|
429
623
|
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
624
|
+
if len(output_messages) == 1:
|
|
625
|
+
# Auto record if the output result is a single message
|
|
626
|
+
self.record_message(output_messages[0])
|
|
627
|
+
else:
|
|
628
|
+
logger.warning(
|
|
629
|
+
"Multiple messages returned in `step()`, message won't be "
|
|
630
|
+
"recorded automatically. Please call `record_message()` "
|
|
631
|
+
"to record the selected message manually."
|
|
632
|
+
)
|
|
438
633
|
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
self.record_message(output_messages[0])
|
|
442
|
-
else:
|
|
443
|
-
logger.warning(
|
|
444
|
-
"Multiple messages returned in `step()`, message won't be "
|
|
445
|
-
"recorded automatically. Please call `record_message()` to "
|
|
446
|
-
"record the selected message manually."
|
|
634
|
+
return ChatAgentResponse(
|
|
635
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
447
636
|
)
|
|
448
637
|
|
|
449
|
-
return ChatAgentResponse(
|
|
450
|
-
msgs=output_messages, terminated=self.terminated, info=info
|
|
451
|
-
)
|
|
452
|
-
|
|
453
638
|
async def step_async(
|
|
454
639
|
self,
|
|
455
640
|
input_message: BaseMessage,
|
|
@@ -622,9 +807,11 @@ class ChatAgent(BaseAgent):
|
|
|
622
807
|
|
|
623
808
|
# Replace the original tools with the structuring function
|
|
624
809
|
self.func_dict = {func.get_function_name(): func.func}
|
|
810
|
+
self.model_backend.model_config_dict = original_model_dict.copy()
|
|
625
811
|
self.model_backend.model_config_dict["tools"] = [
|
|
626
812
|
func.get_openai_tool_schema()
|
|
627
813
|
]
|
|
814
|
+
self.model_backend.model_config_dict["tool_choice"] = "required"
|
|
628
815
|
|
|
629
816
|
openai_messages, num_tokens = self.memory.get_context()
|
|
630
817
|
(
|
|
@@ -49,7 +49,6 @@ class Firecrawl:
|
|
|
49
49
|
self,
|
|
50
50
|
url: str,
|
|
51
51
|
params: Optional[Dict[str, Any]] = None,
|
|
52
|
-
wait_until_done: bool = True,
|
|
53
52
|
**kwargs: Any,
|
|
54
53
|
) -> Any:
|
|
55
54
|
r"""Crawl a URL and all accessible subpages. Customize the crawl by
|
|
@@ -60,14 +59,12 @@ class Firecrawl:
|
|
|
60
59
|
url (str): The URL to crawl.
|
|
61
60
|
params (Optional[Dict[str, Any]]): Additional parameters for the
|
|
62
61
|
crawl request. Defaults to `None`.
|
|
63
|
-
wait_until_done (bool): Whether to wait until the crawl job is
|
|
64
|
-
completed. Defaults to `True`.
|
|
65
62
|
**kwargs (Any): Additional keyword arguments, such as
|
|
66
|
-
`poll_interval`, `idempotency_key
|
|
63
|
+
`poll_interval`, `idempotency_key`.
|
|
67
64
|
|
|
68
65
|
Returns:
|
|
69
|
-
Any: The
|
|
70
|
-
|
|
66
|
+
Any: The crawl job ID or the crawl results if waiting until
|
|
67
|
+
completion.
|
|
71
68
|
|
|
72
69
|
Raises:
|
|
73
70
|
RuntimeError: If the crawling process fails.
|
|
@@ -78,13 +75,8 @@ class Firecrawl:
|
|
|
78
75
|
url=url,
|
|
79
76
|
params=params,
|
|
80
77
|
**kwargs,
|
|
81
|
-
wait_until_done=wait_until_done,
|
|
82
|
-
)
|
|
83
|
-
return (
|
|
84
|
-
crawl_response
|
|
85
|
-
if wait_until_done
|
|
86
|
-
else crawl_response.get("jobId")
|
|
87
78
|
)
|
|
79
|
+
return crawl_response
|
|
88
80
|
except Exception as e:
|
|
89
81
|
raise RuntimeError(f"Failed to crawl the URL: {e}")
|
|
90
82
|
|
|
@@ -103,7 +95,10 @@ class Firecrawl:
|
|
|
103
95
|
"""
|
|
104
96
|
|
|
105
97
|
try:
|
|
106
|
-
crawl_result = self.app.crawl_url(
|
|
98
|
+
crawl_result = self.app.crawl_url(
|
|
99
|
+
url,
|
|
100
|
+
{'formats': ['markdown']},
|
|
101
|
+
)
|
|
107
102
|
if not isinstance(crawl_result, list):
|
|
108
103
|
raise ValueError("Unexpected response format")
|
|
109
104
|
markdown_contents = [
|
|
@@ -180,41 +175,14 @@ class Firecrawl:
|
|
|
180
175
|
data = self.app.scrape_url(
|
|
181
176
|
url,
|
|
182
177
|
{
|
|
183
|
-
'
|
|
184
|
-
|
|
185
|
-
"extractionPrompt": "Based on the information on "
|
|
186
|
-
"the page, extract the information from the schema.",
|
|
187
|
-
'extractionSchema': output_schema.model_json_schema(),
|
|
188
|
-
},
|
|
189
|
-
'pageOptions': {'onlyMainContent': True},
|
|
178
|
+
'formats': ['extract'],
|
|
179
|
+
'extract': {'schema': output_schema.model_json_schema()},
|
|
190
180
|
},
|
|
191
181
|
)
|
|
192
|
-
return data.get("
|
|
182
|
+
return data.get("extract", {})
|
|
193
183
|
except Exception as e:
|
|
194
184
|
raise RuntimeError(f"Failed to perform structured scrape: {e}")
|
|
195
185
|
|
|
196
|
-
def tidy_scrape(self, url: str) -> str:
|
|
197
|
-
r"""Only return the main content of the page, excluding headers,
|
|
198
|
-
navigation bars, footers, etc. in Markdown format.
|
|
199
|
-
|
|
200
|
-
Args:
|
|
201
|
-
url (str): The URL to read.
|
|
202
|
-
|
|
203
|
-
Returns:
|
|
204
|
-
str: The markdown content of the URL.
|
|
205
|
-
|
|
206
|
-
Raises:
|
|
207
|
-
RuntimeError: If the scrape process fails.
|
|
208
|
-
"""
|
|
209
|
-
|
|
210
|
-
try:
|
|
211
|
-
scrape_result = self.app.scrape_url(
|
|
212
|
-
url, {'pageOptions': {'onlyMainContent': True}}
|
|
213
|
-
)
|
|
214
|
-
return scrape_result.get("markdown", "")
|
|
215
|
-
except Exception as e:
|
|
216
|
-
raise RuntimeError(f"Failed to perform tidy scrape: {e}")
|
|
217
|
-
|
|
218
186
|
def map_site(
|
|
219
187
|
self, url: str, params: Optional[Dict[str, Any]] = None
|
|
220
188
|
) -> list:
|
camel/models/mistral_model.py
CHANGED
|
@@ -93,7 +93,7 @@ class MistralModel(BaseModelBackend):
|
|
|
93
93
|
"name": tool_call.function.name, # type: ignore[union-attr]
|
|
94
94
|
"arguments": tool_call.function.arguments, # type: ignore[union-attr]
|
|
95
95
|
},
|
|
96
|
-
type=tool_call.
|
|
96
|
+
type=tool_call.type, # type: ignore[union-attr]
|
|
97
97
|
)
|
|
98
98
|
for tool_call in response.choices[0].message.tool_calls
|
|
99
99
|
]
|
camel/tasks/task.py
CHANGED
|
@@ -93,6 +93,10 @@ class Task(BaseModel):
|
|
|
93
93
|
|
|
94
94
|
result: Optional[str] = ""
|
|
95
95
|
|
|
96
|
+
failure_count: int = 0
|
|
97
|
+
|
|
98
|
+
additional_info: Optional[str] = None
|
|
99
|
+
|
|
96
100
|
@classmethod
|
|
97
101
|
def from_message(cls, message: BaseMessage) -> "Task":
|
|
98
102
|
r"""Create a task from a message.
|
|
@@ -193,7 +197,7 @@ class Task(BaseModel):
|
|
|
193
197
|
def decompose(
|
|
194
198
|
self,
|
|
195
199
|
agent: ChatAgent,
|
|
196
|
-
|
|
200
|
+
prompt: Optional[str] = None,
|
|
197
201
|
task_parser: Callable[[str, str], List["Task"]] = parse_response,
|
|
198
202
|
) -> List["Task"]:
|
|
199
203
|
r"""Decompose a task to a list of sub-tasks. It can be used for data
|
|
@@ -201,8 +205,8 @@ class Task(BaseModel):
|
|
|
201
205
|
|
|
202
206
|
Args:
|
|
203
207
|
agent (ChatAgent): An agent that used to decompose the task.
|
|
204
|
-
|
|
205
|
-
|
|
208
|
+
prompt (str, optional): A prompt to decompose the task. If not
|
|
209
|
+
provided, the default prompt will be used.
|
|
206
210
|
task_parser (Callable[[str, str], List[Task]], optional): A
|
|
207
211
|
function to extract Task from response. If not provided,
|
|
208
212
|
the default parse_response will be used.
|
|
@@ -212,7 +216,7 @@ class Task(BaseModel):
|
|
|
212
216
|
"""
|
|
213
217
|
|
|
214
218
|
role_name = agent.role_name
|
|
215
|
-
content =
|
|
219
|
+
content = prompt or TASK_DECOMPOSE_PROMPT.format(
|
|
216
220
|
role_name=role_name,
|
|
217
221
|
content=self.content,
|
|
218
222
|
)
|
|
@@ -221,6 +225,8 @@ class Task(BaseModel):
|
|
|
221
225
|
)
|
|
222
226
|
response = agent.step(msg)
|
|
223
227
|
tasks = task_parser(response.msg.content, self.id)
|
|
228
|
+
for task in tasks:
|
|
229
|
+
task.additional_info = self.additional_info
|
|
224
230
|
return tasks
|
|
225
231
|
|
|
226
232
|
def compose(
|
|
@@ -248,6 +254,7 @@ class Task(BaseModel):
|
|
|
248
254
|
content = template.format(
|
|
249
255
|
role_name=role_name,
|
|
250
256
|
content=self.content,
|
|
257
|
+
additional_info=self.additional_info,
|
|
251
258
|
other_results=sub_tasks_result,
|
|
252
259
|
)
|
|
253
260
|
msg = BaseMessage.make_user_message(
|
camel/tasks/task_prompt.py
CHANGED
camel/utils/commons.py
CHANGED
|
@@ -381,10 +381,17 @@ def json_to_function_code(json_obj: Dict) -> str:
|
|
|
381
381
|
docstring_args = []
|
|
382
382
|
return_keys = []
|
|
383
383
|
|
|
384
|
+
prop_to_python = {
|
|
385
|
+
'string': 'str',
|
|
386
|
+
'number': 'float',
|
|
387
|
+
'integer': 'int',
|
|
388
|
+
'boolean': 'bool',
|
|
389
|
+
}
|
|
390
|
+
|
|
384
391
|
for prop in required:
|
|
385
392
|
description = properties[prop]['description']
|
|
386
393
|
prop_type = properties[prop]['type']
|
|
387
|
-
python_type =
|
|
394
|
+
python_type = prop_to_python.get(prop_type, prop_type)
|
|
388
395
|
args.append(f"{prop}: {python_type}")
|
|
389
396
|
docstring_args.append(
|
|
390
397
|
f" {prop} ({python_type}): {description}."
|
camel/workforce/__init__.py
CHANGED
|
@@ -12,12 +12,12 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
|
|
15
|
-
from .
|
|
16
|
-
from .
|
|
17
|
-
from .
|
|
15
|
+
from .role_playing_worker import RolePlayingWorker
|
|
16
|
+
from .single_agent_worker import SingleAgentWorker
|
|
17
|
+
from .workforce import Workforce
|
|
18
18
|
|
|
19
19
|
__all__ = [
|
|
20
|
-
"
|
|
21
|
-
"
|
|
22
|
-
"
|
|
20
|
+
"Workforce",
|
|
21
|
+
"SingleAgentWorker",
|
|
22
|
+
"RolePlayingWorker",
|
|
23
23
|
]
|
camel/workforce/base.py
CHANGED
|
@@ -15,36 +15,40 @@ from abc import ABC, abstractmethod
|
|
|
15
15
|
from typing import Any
|
|
16
16
|
|
|
17
17
|
from camel.workforce.task_channel import TaskChannel
|
|
18
|
+
from camel.workforce.utils import check_if_running
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
class BaseNode(ABC):
|
|
21
22
|
def __init__(self, description: str) -> None:
|
|
22
23
|
self.node_id = str(id(self))
|
|
23
24
|
self.description = description
|
|
24
|
-
# every node is initialized to use its own channel
|
|
25
25
|
self._channel: TaskChannel = TaskChannel()
|
|
26
26
|
self._running = False
|
|
27
27
|
|
|
28
|
+
@check_if_running(False)
|
|
28
29
|
def reset(self, *args: Any, **kwargs: Any) -> Any:
|
|
29
30
|
"""Resets the node to its initial state."""
|
|
30
|
-
|
|
31
|
+
self._channel = TaskChannel()
|
|
32
|
+
self._running = False
|
|
31
33
|
|
|
32
34
|
@abstractmethod
|
|
33
35
|
def set_channel(self, channel: TaskChannel):
|
|
34
36
|
r"""Sets the channel for the node."""
|
|
37
|
+
pass
|
|
35
38
|
|
|
36
39
|
@abstractmethod
|
|
37
40
|
async def _listen_to_channel(self):
|
|
38
41
|
r"""Listens to the channel and handle tasks. This method should be
|
|
39
42
|
the main loop for the node.
|
|
40
43
|
"""
|
|
44
|
+
pass
|
|
41
45
|
|
|
42
46
|
@abstractmethod
|
|
43
47
|
async def start(self):
|
|
44
48
|
r"""Start the node."""
|
|
49
|
+
pass
|
|
45
50
|
|
|
46
51
|
@abstractmethod
|
|
47
52
|
def stop(self):
|
|
48
|
-
r"""
|
|
49
|
-
|
|
50
|
-
"""
|
|
53
|
+
r"""Stop the node."""
|
|
54
|
+
pass
|