camel-ai 0.1.5.9__py3-none-any.whl → 0.1.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +246 -33
- camel/agents/critic_agent.py +17 -1
- camel/agents/deductive_reasoner_agent.py +12 -0
- camel/agents/embodied_agent.py +19 -5
- camel/agents/knowledge_graph_agent.py +22 -3
- camel/agents/role_assignment_agent.py +12 -0
- camel/agents/search_agent.py +12 -0
- camel/agents/task_agent.py +15 -0
- camel/configs/__init__.py +2 -9
- camel/configs/anthropic_config.py +5 -6
- camel/configs/base_config.py +50 -4
- camel/configs/gemini_config.py +69 -18
- camel/configs/groq_config.py +6 -20
- camel/configs/litellm_config.py +2 -8
- camel/configs/mistral_config.py +17 -20
- camel/configs/ollama_config.py +6 -8
- camel/configs/openai_config.py +12 -23
- camel/configs/vllm_config.py +7 -8
- camel/configs/zhipuai_config.py +5 -11
- camel/human.py +1 -1
- camel/loaders/__init__.py +2 -0
- camel/loaders/firecrawl_reader.py +213 -0
- camel/memories/agent_memories.py +1 -4
- camel/memories/blocks/chat_history_block.py +6 -2
- camel/memories/blocks/vectordb_block.py +3 -1
- camel/memories/context_creators/score_based.py +6 -6
- camel/memories/records.py +9 -7
- camel/messages/base.py +1 -0
- camel/models/open_source_model.py +2 -2
- camel/prompts/__init__.py +7 -0
- camel/prompts/image_craft.py +34 -0
- camel/prompts/multi_condition_image_craft.py +34 -0
- camel/prompts/task_prompt_template.py +6 -0
- camel/responses/agent_responses.py +4 -3
- camel/retrievers/auto_retriever.py +0 -2
- camel/societies/babyagi_playing.py +6 -4
- camel/societies/role_playing.py +16 -8
- camel/storages/graph_storages/graph_element.py +10 -14
- camel/storages/vectordb_storages/base.py +24 -13
- camel/storages/vectordb_storages/milvus.py +1 -1
- camel/storages/vectordb_storages/qdrant.py +2 -3
- camel/tasks/__init__.py +22 -0
- camel/tasks/task.py +408 -0
- camel/tasks/task_prompt.py +65 -0
- camel/toolkits/__init__.py +3 -0
- camel/toolkits/base.py +3 -1
- camel/toolkits/dalle_toolkit.py +146 -0
- camel/toolkits/github_toolkit.py +16 -32
- camel/toolkits/google_maps_toolkit.py +2 -1
- camel/toolkits/open_api_toolkit.py +1 -2
- camel/toolkits/openai_function.py +2 -7
- camel/types/enums.py +6 -2
- camel/utils/__init__.py +14 -2
- camel/utils/commons.py +167 -2
- camel/utils/constants.py +3 -0
- camel/workforce/__init__.py +23 -0
- camel/workforce/base.py +50 -0
- camel/workforce/manager_node.py +299 -0
- camel/workforce/role_playing_node.py +168 -0
- camel/workforce/single_agent_node.py +77 -0
- camel/workforce/task_channel.py +173 -0
- camel/workforce/utils.py +97 -0
- camel/workforce/worker_node.py +115 -0
- camel/workforce/workforce.py +49 -0
- camel/workforce/workforce_prompt.py +125 -0
- {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/METADATA +5 -2
- {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/RECORD +69 -52
- {camel_ai-0.1.5.9.dist-info → camel_ai-0.1.6.2.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -15,8 +15,18 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
17
|
from collections import defaultdict
|
|
18
|
-
from
|
|
19
|
-
|
|
18
|
+
from typing import (
|
|
19
|
+
TYPE_CHECKING,
|
|
20
|
+
Any,
|
|
21
|
+
Callable,
|
|
22
|
+
Dict,
|
|
23
|
+
List,
|
|
24
|
+
Optional,
|
|
25
|
+
Tuple,
|
|
26
|
+
Union,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from pydantic import BaseModel
|
|
20
30
|
|
|
21
31
|
from camel.agents.base import BaseAgent
|
|
22
32
|
from camel.configs import ChatGPTConfig
|
|
@@ -37,7 +47,13 @@ from camel.types import (
|
|
|
37
47
|
OpenAIBackendRole,
|
|
38
48
|
RoleType,
|
|
39
49
|
)
|
|
40
|
-
from camel.utils import
|
|
50
|
+
from camel.utils import (
|
|
51
|
+
Constants,
|
|
52
|
+
func_string_to_callable,
|
|
53
|
+
get_model_encoding,
|
|
54
|
+
get_pydantic_object_schema,
|
|
55
|
+
json_to_function_code,
|
|
56
|
+
)
|
|
41
57
|
|
|
42
58
|
if TYPE_CHECKING:
|
|
43
59
|
from openai import Stream
|
|
@@ -45,9 +61,19 @@ if TYPE_CHECKING:
|
|
|
45
61
|
from camel.terminators import ResponseTerminator
|
|
46
62
|
from camel.toolkits import OpenAIFunction
|
|
47
63
|
|
|
64
|
+
# AgentOps decorator setting
|
|
65
|
+
try:
|
|
66
|
+
import os
|
|
67
|
+
|
|
68
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
69
|
+
from agentops import track_agent
|
|
70
|
+
else:
|
|
71
|
+
raise ImportError
|
|
72
|
+
except (ImportError, AttributeError):
|
|
73
|
+
from camel.utils import track_agent
|
|
48
74
|
|
|
49
|
-
|
|
50
|
-
class FunctionCallingRecord:
|
|
75
|
+
|
|
76
|
+
class FunctionCallingRecord(BaseModel):
|
|
51
77
|
r"""Historical records of functions called in the conversation.
|
|
52
78
|
|
|
53
79
|
Attributes:
|
|
@@ -67,14 +93,17 @@ class FunctionCallingRecord:
|
|
|
67
93
|
Returns:
|
|
68
94
|
str: Modified string to represent the function calling.
|
|
69
95
|
"""
|
|
70
|
-
|
|
71
96
|
return (
|
|
72
97
|
f"Function Execution: {self.func_name}\n"
|
|
73
98
|
f"\tArgs: {self.args}\n"
|
|
74
99
|
f"\tResult: {self.result}"
|
|
75
100
|
)
|
|
76
101
|
|
|
102
|
+
def as_dict(self) -> dict[str, Any]:
|
|
103
|
+
return self.model_dump()
|
|
104
|
+
|
|
77
105
|
|
|
106
|
+
@track_agent(name="ChatAgent")
|
|
78
107
|
class ChatAgent(BaseAgent):
|
|
79
108
|
r"""Class for managing conversations of CAMEL Chat Agents.
|
|
80
109
|
|
|
@@ -128,7 +157,7 @@ class ChatAgent(BaseAgent):
|
|
|
128
157
|
else ModelFactory.create(
|
|
129
158
|
model_platform=ModelPlatformType.OPENAI,
|
|
130
159
|
model_type=ModelType.GPT_4O_MINI,
|
|
131
|
-
model_config_dict=ChatGPTConfig().
|
|
160
|
+
model_config_dict=ChatGPTConfig().as_dict(),
|
|
132
161
|
api_key=self._api_key,
|
|
133
162
|
)
|
|
134
163
|
)
|
|
@@ -209,7 +238,9 @@ class ChatAgent(BaseAgent):
|
|
|
209
238
|
messages.
|
|
210
239
|
role (OpenAIBackendRole): The backend role type.
|
|
211
240
|
"""
|
|
212
|
-
self.memory.write_record(
|
|
241
|
+
self.memory.write_record(
|
|
242
|
+
MemoryRecord(message=message, role_at_backend=role)
|
|
243
|
+
)
|
|
213
244
|
|
|
214
245
|
def set_output_language(self, output_language: str) -> BaseMessage:
|
|
215
246
|
r"""Sets the output language for the system message. This method
|
|
@@ -268,7 +299,8 @@ class ChatAgent(BaseAgent):
|
|
|
268
299
|
message.
|
|
269
300
|
"""
|
|
270
301
|
system_record = MemoryRecord(
|
|
271
|
-
self.system_message,
|
|
302
|
+
message=self.system_message,
|
|
303
|
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
|
272
304
|
)
|
|
273
305
|
self.memory.clear()
|
|
274
306
|
self.memory.write_record(system_record)
|
|
@@ -287,6 +319,7 @@ class ChatAgent(BaseAgent):
|
|
|
287
319
|
def step(
|
|
288
320
|
self,
|
|
289
321
|
input_message: BaseMessage,
|
|
322
|
+
output_schema: Optional[BaseModel] = None,
|
|
290
323
|
) -> ChatAgentResponse:
|
|
291
324
|
r"""Performs a single step in the chat session by generating a response
|
|
292
325
|
to the input message.
|
|
@@ -297,6 +330,10 @@ class ChatAgent(BaseAgent):
|
|
|
297
330
|
either `user` or `assistant` but it will be set to `user`
|
|
298
331
|
anyway since for the self agent any incoming message is
|
|
299
332
|
external.
|
|
333
|
+
output_schema (Optional[BaseModel]): An optional pydantic model
|
|
334
|
+
that includes value types and field descriptions used to
|
|
335
|
+
generate a structured response by LLM. This schema helps
|
|
336
|
+
in defining the expected output format.
|
|
300
337
|
|
|
301
338
|
Returns:
|
|
302
339
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -310,7 +347,7 @@ class ChatAgent(BaseAgent):
|
|
|
310
347
|
tool_calls: List[FunctionCallingRecord] = []
|
|
311
348
|
while True:
|
|
312
349
|
# Format messages and get the token number
|
|
313
|
-
openai_messages:
|
|
350
|
+
openai_messages: Optional[List[OpenAIMessage]]
|
|
314
351
|
|
|
315
352
|
try:
|
|
316
353
|
openai_messages, num_tokens = self.memory.get_context()
|
|
@@ -318,6 +355,15 @@ class ChatAgent(BaseAgent):
|
|
|
318
355
|
return self.step_token_exceed(
|
|
319
356
|
e.args[1], tool_calls, "max_tokens_exceeded"
|
|
320
357
|
)
|
|
358
|
+
# use structed output response without tools
|
|
359
|
+
# If the user provides the output_schema parameter and does not
|
|
360
|
+
# specify the use of tools, then in the model config of the
|
|
361
|
+
# chatgent, call the model specified by tools with
|
|
362
|
+
# return_json_response of OpenAIFunction format, and return a
|
|
363
|
+
# structured response with the user-specified output schema.
|
|
364
|
+
if output_schema is not None and len(self.func_dict) == 0:
|
|
365
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
366
|
+
|
|
321
367
|
(
|
|
322
368
|
response,
|
|
323
369
|
output_messages,
|
|
@@ -333,9 +379,8 @@ class ChatAgent(BaseAgent):
|
|
|
333
379
|
):
|
|
334
380
|
# Tools added for function calling and not in stream mode
|
|
335
381
|
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
self.step_tool_call(response)
|
|
382
|
+
tool_calls, func_assistant_msg, func_result_msg = (
|
|
383
|
+
self._add_tools_for_func_call(response, tool_calls)
|
|
339
384
|
)
|
|
340
385
|
|
|
341
386
|
# Update the messages
|
|
@@ -344,11 +389,44 @@ class ChatAgent(BaseAgent):
|
|
|
344
389
|
)
|
|
345
390
|
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
346
391
|
|
|
347
|
-
# Record the function calling
|
|
348
|
-
tool_calls.append(func_record)
|
|
349
|
-
|
|
350
392
|
else:
|
|
351
|
-
#
|
|
393
|
+
# If the user specifies tools, it is necessary to wait for the
|
|
394
|
+
# model to complete all tools' calls. Finally, use the
|
|
395
|
+
# generated response as the input for the structure,
|
|
396
|
+
# simultaneously calling the return_json_response function.
|
|
397
|
+
# Call the model again with return_json_response in the format
|
|
398
|
+
# of OpenAIFunction as the last tool, returning a structured
|
|
399
|
+
# response with the user-specified output schema.
|
|
400
|
+
if output_schema is not None and all(
|
|
401
|
+
record.func_name
|
|
402
|
+
!= Constants.FUNC_NAME_FOR_STRUCTURE_OUTPUT
|
|
403
|
+
for record in tool_calls
|
|
404
|
+
):
|
|
405
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
406
|
+
|
|
407
|
+
(
|
|
408
|
+
response,
|
|
409
|
+
output_messages,
|
|
410
|
+
finish_reasons,
|
|
411
|
+
usage_dict,
|
|
412
|
+
response_id,
|
|
413
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
414
|
+
|
|
415
|
+
if isinstance(response, ChatCompletion):
|
|
416
|
+
# Tools added for function calling and not in stream
|
|
417
|
+
# mode
|
|
418
|
+
tool_calls, func_assistant_msg, func_result_msg = (
|
|
419
|
+
self._add_tools_for_func_call(response, tool_calls)
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Update the messages
|
|
423
|
+
self.update_memory(
|
|
424
|
+
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
425
|
+
)
|
|
426
|
+
self.update_memory(
|
|
427
|
+
func_result_msg, OpenAIBackendRole.FUNCTION
|
|
428
|
+
)
|
|
429
|
+
|
|
352
430
|
info = self._step_get_info(
|
|
353
431
|
output_messages,
|
|
354
432
|
finish_reasons,
|
|
@@ -359,20 +437,34 @@ class ChatAgent(BaseAgent):
|
|
|
359
437
|
)
|
|
360
438
|
break
|
|
361
439
|
|
|
362
|
-
|
|
440
|
+
# if use structure response, set structure result as content of
|
|
441
|
+
# BaseMessage
|
|
442
|
+
if output_schema and self.model_type.is_openai:
|
|
443
|
+
for base_message_item in output_messages:
|
|
444
|
+
base_message_item.content = str(info['tool_calls'][-1].result)
|
|
445
|
+
|
|
446
|
+
return ChatAgentResponse(
|
|
447
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
448
|
+
)
|
|
363
449
|
|
|
364
450
|
async def step_async(
|
|
365
451
|
self,
|
|
366
452
|
input_message: BaseMessage,
|
|
453
|
+
output_schema: Optional[BaseModel] = None,
|
|
367
454
|
) -> ChatAgentResponse:
|
|
368
455
|
r"""Performs a single step in the chat session by generating a response
|
|
369
456
|
to the input message. This agent step can call async function calls.
|
|
370
457
|
|
|
371
458
|
Args:
|
|
372
459
|
input_message (BaseMessage): The input message to the agent.
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
460
|
+
Its `role` field that specifies the role at backend may be
|
|
461
|
+
either `user` or `assistant` but it will be set to `user`
|
|
462
|
+
anyway since for the self agent any incoming message is
|
|
463
|
+
external.
|
|
464
|
+
output_schema (Optional[BaseModel]): An optional pydantic model
|
|
465
|
+
that includes value types and field descriptions used to
|
|
466
|
+
generate a structured response by LLM. This schema helps
|
|
467
|
+
in defining the expected output format.
|
|
376
468
|
|
|
377
469
|
Returns:
|
|
378
470
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -386,7 +478,7 @@ class ChatAgent(BaseAgent):
|
|
|
386
478
|
tool_calls: List[FunctionCallingRecord] = []
|
|
387
479
|
while True:
|
|
388
480
|
# Format messages and get the token number
|
|
389
|
-
openai_messages:
|
|
481
|
+
openai_messages: Optional[List[OpenAIMessage]]
|
|
390
482
|
|
|
391
483
|
try:
|
|
392
484
|
openai_messages, num_tokens = self.memory.get_context()
|
|
@@ -394,6 +486,9 @@ class ChatAgent(BaseAgent):
|
|
|
394
486
|
return self.step_token_exceed(
|
|
395
487
|
e.args[1], tool_calls, "max_tokens_exceeded"
|
|
396
488
|
)
|
|
489
|
+
if output_schema is not None:
|
|
490
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
491
|
+
|
|
397
492
|
(
|
|
398
493
|
response,
|
|
399
494
|
output_messages,
|
|
@@ -426,6 +521,37 @@ class ChatAgent(BaseAgent):
|
|
|
426
521
|
tool_calls.append(func_record)
|
|
427
522
|
|
|
428
523
|
else:
|
|
524
|
+
# use structed output response without tools
|
|
525
|
+
if output_schema is not None and all(
|
|
526
|
+
record.func_name
|
|
527
|
+
!= Constants.FUNC_NAME_FOR_STRUCTURE_OUTPUT
|
|
528
|
+
for record in tool_calls
|
|
529
|
+
):
|
|
530
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
531
|
+
|
|
532
|
+
(
|
|
533
|
+
response,
|
|
534
|
+
output_messages,
|
|
535
|
+
finish_reasons,
|
|
536
|
+
usage_dict,
|
|
537
|
+
response_id,
|
|
538
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
539
|
+
|
|
540
|
+
if isinstance(response, ChatCompletion):
|
|
541
|
+
# Tools added for function calling and not in stream
|
|
542
|
+
# mode
|
|
543
|
+
tool_calls, func_assistant_msg, func_result_msg = (
|
|
544
|
+
self._add_tools_for_func_call(response, tool_calls)
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
# Update the messages
|
|
548
|
+
self.update_memory(
|
|
549
|
+
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
550
|
+
)
|
|
551
|
+
self.update_memory(
|
|
552
|
+
func_result_msg, OpenAIBackendRole.FUNCTION
|
|
553
|
+
)
|
|
554
|
+
|
|
429
555
|
# Function calling disabled or not a function calling
|
|
430
556
|
info = self._step_get_info(
|
|
431
557
|
output_messages,
|
|
@@ -437,17 +563,100 @@ class ChatAgent(BaseAgent):
|
|
|
437
563
|
)
|
|
438
564
|
break
|
|
439
565
|
|
|
440
|
-
|
|
566
|
+
# if use structure response, set structure result as content of
|
|
567
|
+
# BaseMessage
|
|
568
|
+
if output_schema and self.model_type.is_openai:
|
|
569
|
+
for base_message_item in output_messages:
|
|
570
|
+
base_message_item.content = str(info['tool_calls'][0].result)
|
|
571
|
+
|
|
572
|
+
return ChatAgentResponse(
|
|
573
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
def _add_tools_for_func_call(
|
|
577
|
+
self,
|
|
578
|
+
response: ChatCompletion,
|
|
579
|
+
tool_calls: List[FunctionCallingRecord],
|
|
580
|
+
) -> tuple[
|
|
581
|
+
List[FunctionCallingRecord],
|
|
582
|
+
FunctionCallingMessage,
|
|
583
|
+
FunctionCallingMessage,
|
|
584
|
+
]:
|
|
585
|
+
r"""
|
|
586
|
+
Handles adding tools for function calls based on the response.
|
|
587
|
+
This method processes a function call within the chat completion
|
|
588
|
+
response, and records the function call in the provided
|
|
589
|
+
list of tool calls.
|
|
590
|
+
Args:
|
|
591
|
+
response (ChatCompletion): The response object from the chat
|
|
592
|
+
completion.
|
|
593
|
+
tool_calls (List[FunctionCallingRecord]): The list to record
|
|
594
|
+
function calls.
|
|
595
|
+
Returns:
|
|
596
|
+
tuple: A tuple containing:
|
|
597
|
+
- List[FunctionCallingRecord]: The updated list of function
|
|
598
|
+
call records.
|
|
599
|
+
- FunctionCallingMessage: The assistant's message regarding the
|
|
600
|
+
function call.
|
|
601
|
+
- FunctionCallingMessage: The result message of the function
|
|
602
|
+
call.
|
|
603
|
+
"""
|
|
604
|
+
|
|
605
|
+
# Perform function calling
|
|
606
|
+
func_assistant_msg, func_result_msg, func_record = self.step_tool_call(
|
|
607
|
+
response
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
# Record the function call in the list of tool calls
|
|
611
|
+
tool_calls.append(func_record)
|
|
612
|
+
|
|
613
|
+
# Return updated tool calls list, assistant's message, and function
|
|
614
|
+
# result message
|
|
615
|
+
return tool_calls, func_assistant_msg, func_result_msg
|
|
616
|
+
|
|
617
|
+
def _add_output_schema_to_tool_list(self, output_schema: BaseModel):
|
|
618
|
+
r"""Handles the structured output response for OpenAI.
|
|
619
|
+
This method processes the given output schema and integrates the
|
|
620
|
+
resulting function into the tools for the OpenAI model configuration.
|
|
621
|
+
Args:
|
|
622
|
+
output_schema (BaseModel): The schema representing the expected
|
|
623
|
+
output structure.
|
|
624
|
+
"""
|
|
625
|
+
from camel.toolkits import OpenAIFunction
|
|
626
|
+
|
|
627
|
+
# step 1 extract the output_schema info as json.
|
|
628
|
+
schema_json = get_pydantic_object_schema(output_schema)
|
|
629
|
+
|
|
630
|
+
# step 2 convert output schema json as callable string
|
|
631
|
+
func_str = json_to_function_code(schema_json)
|
|
632
|
+
|
|
633
|
+
# step 3 get callable function from string
|
|
634
|
+
func_callable = func_string_to_callable(func_str)
|
|
635
|
+
|
|
636
|
+
# step 4 add return_json_func into tools
|
|
637
|
+
func = OpenAIFunction(func_callable)
|
|
638
|
+
tools = [func]
|
|
639
|
+
self.func_dict[func.get_function_name()] = func.func
|
|
640
|
+
if self.model_type.is_openai:
|
|
641
|
+
self.model_backend.model_config_dict = ChatGPTConfig(
|
|
642
|
+
tools=tools
|
|
643
|
+
).as_dict()
|
|
644
|
+
elif self.model_type.is_gemini:
|
|
645
|
+
from camel.configs.gemini_config import GeminiConfig
|
|
646
|
+
|
|
647
|
+
self.model_backend.model_config_dict = GeminiConfig(
|
|
648
|
+
tools=tools
|
|
649
|
+
).as_dict()
|
|
441
650
|
|
|
442
651
|
def _step_model_response(
|
|
443
652
|
self,
|
|
444
|
-
openai_messages:
|
|
653
|
+
openai_messages: List[OpenAIMessage],
|
|
445
654
|
num_tokens: int,
|
|
446
655
|
) -> tuple[
|
|
447
|
-
ChatCompletion
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
656
|
+
Union[ChatCompletion, Stream],
|
|
657
|
+
List[BaseMessage],
|
|
658
|
+
List[str],
|
|
659
|
+
Dict[str, int],
|
|
451
660
|
str,
|
|
452
661
|
]:
|
|
453
662
|
r"""Internal function for agent step model response."""
|
|
@@ -617,9 +826,9 @@ class ChatAgent(BaseAgent):
|
|
|
617
826
|
)
|
|
618
827
|
|
|
619
828
|
return ChatAgentResponse(
|
|
620
|
-
output_messages,
|
|
621
|
-
self.terminated,
|
|
622
|
-
info,
|
|
829
|
+
msgs=output_messages,
|
|
830
|
+
terminated=self.terminated,
|
|
831
|
+
info=info,
|
|
623
832
|
)
|
|
624
833
|
|
|
625
834
|
def step_tool_call(
|
|
@@ -676,7 +885,9 @@ class ChatAgent(BaseAgent):
|
|
|
676
885
|
)
|
|
677
886
|
|
|
678
887
|
# Record information about this function call
|
|
679
|
-
func_record = FunctionCallingRecord(
|
|
888
|
+
func_record = FunctionCallingRecord(
|
|
889
|
+
func_name=func_name, args=args, result=result
|
|
890
|
+
)
|
|
680
891
|
return assist_msg, func_msg, func_record
|
|
681
892
|
|
|
682
893
|
async def step_tool_call_async(
|
|
@@ -735,7 +946,9 @@ class ChatAgent(BaseAgent):
|
|
|
735
946
|
)
|
|
736
947
|
|
|
737
948
|
# Record information about this function call
|
|
738
|
-
func_record = FunctionCallingRecord(
|
|
949
|
+
func_record = FunctionCallingRecord(
|
|
950
|
+
func_name=func_name, args=args, result=result
|
|
951
|
+
)
|
|
739
952
|
return assist_msg, func_msg, func_record
|
|
740
953
|
|
|
741
954
|
def get_usage_dict(
|
camel/agents/critic_agent.py
CHANGED
|
@@ -24,7 +24,19 @@ from camel.models import BaseModelBackend
|
|
|
24
24
|
from camel.responses import ChatAgentResponse
|
|
25
25
|
from camel.utils import get_first_int, print_text_animated
|
|
26
26
|
|
|
27
|
+
# AgentOps decorator setting
|
|
28
|
+
try:
|
|
29
|
+
import os
|
|
27
30
|
|
|
31
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
32
|
+
from agentops import track_agent
|
|
33
|
+
else:
|
|
34
|
+
raise ImportError
|
|
35
|
+
except (ImportError, AttributeError):
|
|
36
|
+
from camel.utils import track_agent
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@track_agent(name="CriticAgent")
|
|
28
40
|
class CriticAgent(ChatAgent):
|
|
29
41
|
r"""A class for the critic agent that assists in selecting an option.
|
|
30
42
|
|
|
@@ -184,4 +196,8 @@ class CriticAgent(ChatAgent):
|
|
|
184
196
|
output_msg = meta_chat_message.create_new_instance(option)
|
|
185
197
|
|
|
186
198
|
# TODO: The return `info` can be improved.
|
|
187
|
-
return ChatAgentResponse(
|
|
199
|
+
return ChatAgentResponse(
|
|
200
|
+
msgs=[output_msg],
|
|
201
|
+
terminated=False,
|
|
202
|
+
info={},
|
|
203
|
+
)
|
|
@@ -20,7 +20,19 @@ from camel.models import BaseModelBackend
|
|
|
20
20
|
from camel.prompts import TextPrompt
|
|
21
21
|
from camel.types import RoleType
|
|
22
22
|
|
|
23
|
+
# AgentOps decorator setting
|
|
24
|
+
try:
|
|
25
|
+
import os
|
|
23
26
|
|
|
27
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
28
|
+
from agentops import track_agent
|
|
29
|
+
else:
|
|
30
|
+
raise ImportError
|
|
31
|
+
except (ImportError, AttributeError):
|
|
32
|
+
from camel.utils import track_agent
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@track_agent(name="DeductiveReasonerAgent")
|
|
24
36
|
class DeductiveReasonerAgent(ChatAgent):
|
|
25
37
|
r"""An agent responsible for deductive reasoning. Model of deductive
|
|
26
38
|
reasoning:
|
camel/agents/embodied_agent.py
CHANGED
|
@@ -27,7 +27,19 @@ from camel.models import BaseModelBackend
|
|
|
27
27
|
from camel.responses import ChatAgentResponse
|
|
28
28
|
from camel.utils import print_text_animated
|
|
29
29
|
|
|
30
|
+
# AgentOps decorator setting
|
|
31
|
+
try:
|
|
32
|
+
import os
|
|
30
33
|
|
|
34
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
35
|
+
from agentops import track_agent
|
|
36
|
+
else:
|
|
37
|
+
raise ImportError
|
|
38
|
+
except (ImportError, AttributeError):
|
|
39
|
+
from camel.utils import track_agent
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@track_agent(name="EmbodiedAgent")
|
|
31
43
|
class EmbodiedAgent(ChatAgent):
|
|
32
44
|
r"""Class for managing conversations of CAMEL Embodied Agents.
|
|
33
45
|
|
|
@@ -120,10 +132,8 @@ class EmbodiedAgent(ChatAgent):
|
|
|
120
132
|
else:
|
|
121
133
|
return []
|
|
122
134
|
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
input_message: BaseMessage,
|
|
126
|
-
) -> ChatAgentResponse:
|
|
135
|
+
# ruff: noqa: E501
|
|
136
|
+
def step(self, input_message: BaseMessage) -> ChatAgentResponse: # type: ignore[override]
|
|
127
137
|
r"""Performs a step in the conversation.
|
|
128
138
|
|
|
129
139
|
Args:
|
|
@@ -184,4 +194,8 @@ class EmbodiedAgent(ChatAgent):
|
|
|
184
194
|
input_message.meta_dict,
|
|
185
195
|
content,
|
|
186
196
|
)
|
|
187
|
-
return ChatAgentResponse(
|
|
197
|
+
return ChatAgentResponse(
|
|
198
|
+
msgs=[message],
|
|
199
|
+
terminated=response.terminated,
|
|
200
|
+
info=response.info,
|
|
201
|
+
)
|
|
@@ -29,6 +29,18 @@ from camel.storages.graph_storages.graph_element import (
|
|
|
29
29
|
)
|
|
30
30
|
from camel.types import RoleType
|
|
31
31
|
|
|
32
|
+
# AgentOps decorator setting
|
|
33
|
+
try:
|
|
34
|
+
import os
|
|
35
|
+
|
|
36
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
37
|
+
from agentops import track_agent
|
|
38
|
+
else:
|
|
39
|
+
raise ImportError
|
|
40
|
+
except (ImportError, AttributeError):
|
|
41
|
+
from camel.utils import track_agent
|
|
42
|
+
|
|
43
|
+
|
|
32
44
|
text_prompt = """
|
|
33
45
|
You are tasked with extracting nodes and relationships from given content and
|
|
34
46
|
structures them into Node and Relationship objects. Here's the outline of what
|
|
@@ -97,6 +109,7 @@ into Node and Relationship objects.
|
|
|
97
109
|
"""
|
|
98
110
|
|
|
99
111
|
|
|
112
|
+
@track_agent(name="KnowledgeGraphAgent")
|
|
100
113
|
class KnowledgeGraphAgent(ChatAgent):
|
|
101
114
|
r"""An agent that can extract node and relationship information for
|
|
102
115
|
different entities from given `Element` content.
|
|
@@ -224,7 +237,7 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
224
237
|
id, type = match.groups()
|
|
225
238
|
properties = {'source': 'agent_created'}
|
|
226
239
|
if id not in nodes:
|
|
227
|
-
node = Node(id, type, properties)
|
|
240
|
+
node = Node(id=id, type=type, properties=properties)
|
|
228
241
|
if self._validate_node(node):
|
|
229
242
|
nodes[id] = node
|
|
230
243
|
|
|
@@ -235,8 +248,14 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
235
248
|
if subj_id in nodes and obj_id in nodes:
|
|
236
249
|
subj = nodes[subj_id]
|
|
237
250
|
obj = nodes[obj_id]
|
|
238
|
-
relationship = Relationship(
|
|
251
|
+
relationship = Relationship(
|
|
252
|
+
subj=subj, obj=obj, type=rel_type, properties=properties
|
|
253
|
+
)
|
|
239
254
|
if self._validate_relationship(relationship):
|
|
240
255
|
relationships.append(relationship)
|
|
241
256
|
|
|
242
|
-
return GraphElement(
|
|
257
|
+
return GraphElement(
|
|
258
|
+
nodes=list(nodes.values()),
|
|
259
|
+
relationships=relationships,
|
|
260
|
+
source=self.element,
|
|
261
|
+
)
|
|
@@ -20,7 +20,19 @@ from camel.models import BaseModelBackend
|
|
|
20
20
|
from camel.prompts import TextPrompt
|
|
21
21
|
from camel.types import RoleType
|
|
22
22
|
|
|
23
|
+
# AgentOps decorator setting
|
|
24
|
+
try:
|
|
25
|
+
import os
|
|
23
26
|
|
|
27
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
28
|
+
from agentops import track_agent
|
|
29
|
+
else:
|
|
30
|
+
raise ImportError
|
|
31
|
+
except (ImportError, AttributeError):
|
|
32
|
+
from camel.utils import track_agent
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@track_agent(name="RoleAssignmentAgent")
|
|
24
36
|
class RoleAssignmentAgent(ChatAgent):
|
|
25
37
|
r"""An agent that generates role names based on the task prompt.
|
|
26
38
|
|
camel/agents/search_agent.py
CHANGED
|
@@ -20,7 +20,19 @@ from camel.prompts import TextPrompt
|
|
|
20
20
|
from camel.types import RoleType
|
|
21
21
|
from camel.utils import create_chunks
|
|
22
22
|
|
|
23
|
+
# AgentOps decorator setting
|
|
24
|
+
try:
|
|
25
|
+
import os
|
|
23
26
|
|
|
27
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
28
|
+
from agentops import track_agent
|
|
29
|
+
else:
|
|
30
|
+
raise ImportError
|
|
31
|
+
except (ImportError, AttributeError):
|
|
32
|
+
from camel.utils import track_agent
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@track_agent(name="SearchAgent")
|
|
24
36
|
class SearchAgent(ChatAgent):
|
|
25
37
|
r"""An agent that summarizes text based on a query and evaluates the
|
|
26
38
|
relevance of an answer.
|
camel/agents/task_agent.py
CHANGED
|
@@ -20,7 +20,19 @@ from camel.prompts import PromptTemplateGenerator, TextPrompt
|
|
|
20
20
|
from camel.types import RoleType, TaskType
|
|
21
21
|
from camel.utils import get_task_list
|
|
22
22
|
|
|
23
|
+
# AgentOps decorator setting
|
|
24
|
+
try:
|
|
25
|
+
import os
|
|
23
26
|
|
|
27
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
28
|
+
from agentops import track_agent
|
|
29
|
+
else:
|
|
30
|
+
raise ImportError
|
|
31
|
+
except (ImportError, AttributeError):
|
|
32
|
+
from camel.utils import track_agent
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@track_agent(name="TaskSpecifyAgent")
|
|
24
36
|
class TaskSpecifyAgent(ChatAgent):
|
|
25
37
|
r"""An agent that specifies a given task prompt by prompting the user to
|
|
26
38
|
provide more details.
|
|
@@ -115,6 +127,7 @@ class TaskSpecifyAgent(ChatAgent):
|
|
|
115
127
|
return TextPrompt(specified_task_msg.content)
|
|
116
128
|
|
|
117
129
|
|
|
130
|
+
@track_agent(name="TaskPlannerAgent")
|
|
118
131
|
class TaskPlannerAgent(ChatAgent):
|
|
119
132
|
r"""An agent that helps divide a task into subtasks based on the input
|
|
120
133
|
task prompt.
|
|
@@ -184,6 +197,7 @@ class TaskPlannerAgent(ChatAgent):
|
|
|
184
197
|
return TextPrompt(sub_tasks_msg.content)
|
|
185
198
|
|
|
186
199
|
|
|
200
|
+
@track_agent(name="TaskCreationAgent")
|
|
187
201
|
class TaskCreationAgent(ChatAgent):
|
|
188
202
|
r"""An agent that helps create new tasks based on the objective
|
|
189
203
|
and last completed task. Compared to :obj:`TaskPlannerAgent`,
|
|
@@ -298,6 +312,7 @@ Be concrete.
|
|
|
298
312
|
return get_task_list(sub_tasks_msg.content)
|
|
299
313
|
|
|
300
314
|
|
|
315
|
+
@track_agent(name="TaskPrioritizationAgent")
|
|
301
316
|
class TaskPrioritizationAgent(ChatAgent):
|
|
302
317
|
r"""An agent that helps re-prioritize the task list and
|
|
303
318
|
returns numbered prioritized list. Modified from
|