camel-ai 0.1.5.1__py3-none-any.whl → 0.1.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (78) hide show
  1. camel/agents/__init__.py +2 -0
  2. camel/agents/chat_agent.py +217 -36
  3. camel/agents/deductive_reasoner_agent.py +86 -31
  4. camel/agents/knowledge_graph_agent.py +41 -18
  5. camel/agents/role_assignment_agent.py +4 -1
  6. camel/agents/search_agent.py +122 -0
  7. camel/bots/__init__.py +20 -0
  8. camel/bots/discord_bot.py +103 -0
  9. camel/bots/telegram_bot.py +84 -0
  10. camel/configs/__init__.py +3 -0
  11. camel/configs/anthropic_config.py +1 -1
  12. camel/configs/litellm_config.py +113 -0
  13. camel/embeddings/__init__.py +2 -0
  14. camel/embeddings/openai_embedding.py +2 -2
  15. camel/embeddings/sentence_transformers_embeddings.py +6 -5
  16. camel/embeddings/vlm_embedding.py +146 -0
  17. camel/functions/__init__.py +9 -0
  18. camel/functions/open_api_function.py +150 -29
  19. camel/functions/open_api_specs/biztoc/__init__.py +13 -0
  20. camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  21. camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  22. camel/functions/open_api_specs/create_qr_code/__init__.py +13 -0
  23. camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  24. camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  25. camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  26. camel/functions/open_api_specs/outschool/__init__.py +13 -0
  27. camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  28. camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  29. camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  30. camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  31. camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  32. camel/functions/open_api_specs/security_config.py +21 -0
  33. camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  34. camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  35. camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  36. camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  37. camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  38. camel/functions/openai_function.py +3 -1
  39. camel/functions/search_functions.py +104 -171
  40. camel/functions/slack_functions.py +2 -1
  41. camel/human.py +3 -1
  42. camel/loaders/base_io.py +3 -1
  43. camel/loaders/unstructured_io.py +16 -22
  44. camel/messages/base.py +135 -46
  45. camel/models/__init__.py +4 -0
  46. camel/models/anthropic_model.py +20 -14
  47. camel/models/base_model.py +2 -0
  48. camel/models/litellm_model.py +112 -0
  49. camel/models/model_factory.py +8 -1
  50. camel/models/open_source_model.py +1 -0
  51. camel/models/openai_model.py +6 -2
  52. camel/models/zhipuai_model.py +125 -0
  53. camel/prompts/__init__.py +2 -0
  54. camel/prompts/base.py +2 -1
  55. camel/prompts/descripte_video_prompt.py +33 -0
  56. camel/prompts/task_prompt_template.py +9 -3
  57. camel/retrievers/auto_retriever.py +20 -11
  58. camel/retrievers/base.py +4 -2
  59. camel/retrievers/bm25_retriever.py +2 -1
  60. camel/retrievers/cohere_rerank_retriever.py +2 -1
  61. camel/retrievers/vector_retriever.py +10 -4
  62. camel/societies/babyagi_playing.py +2 -1
  63. camel/societies/role_playing.py +2 -1
  64. camel/storages/graph_storages/base.py +1 -0
  65. camel/storages/graph_storages/neo4j_graph.py +5 -3
  66. camel/storages/vectordb_storages/base.py +2 -1
  67. camel/storages/vectordb_storages/milvus.py +5 -2
  68. camel/toolkits/github_toolkit.py +120 -26
  69. camel/types/__init__.py +3 -2
  70. camel/types/enums.py +25 -1
  71. camel/utils/__init__.py +11 -2
  72. camel/utils/commons.py +74 -4
  73. camel/utils/constants.py +26 -0
  74. camel/utils/token_counting.py +58 -5
  75. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.2.dist-info}/METADATA +29 -13
  76. camel_ai-0.1.5.2.dist-info/RECORD +148 -0
  77. camel_ai-0.1.5.1.dist-info/RECORD +0 -119
  78. {camel_ai-0.1.5.1.dist-info → camel_ai-0.1.5.2.dist-info}/WHEEL +0 -0
camel/agents/__init__.py CHANGED
@@ -17,6 +17,7 @@ from .critic_agent import CriticAgent
17
17
  from .embodied_agent import EmbodiedAgent
18
18
  from .knowledge_graph_agent import KnowledgeGraphAgent
19
19
  from .role_assignment_agent import RoleAssignmentAgent
20
+ from .search_agent import SearchAgent
20
21
  from .task_agent import (
21
22
  TaskCreationAgent,
22
23
  TaskPlannerAgent,
@@ -38,5 +39,6 @@ __all__ = [
38
39
  'HuggingFaceToolAgent',
39
40
  'EmbodiedAgent',
40
41
  'RoleAssignmentAgent',
42
+ 'SearchAgent',
41
43
  'KnowledgeGraphAgent',
42
44
  ]
@@ -306,7 +306,7 @@ class ChatAgent(BaseAgent):
306
306
  tool_calls: List[FunctionCallingRecord] = []
307
307
  while True:
308
308
  # Format messages and get the token number
309
- openai_messages: Optional[List[OpenAIMessage]]
309
+ openai_messages: list[OpenAIMessage] | None
310
310
 
311
311
  try:
312
312
  openai_messages, num_tokens = self.memory.get_context()
@@ -314,18 +314,13 @@ class ChatAgent(BaseAgent):
314
314
  return self.step_token_exceed(
315
315
  e.args[1], tool_calls, "max_tokens_exceeded"
316
316
  )
317
-
318
- # Obtain the model's response
319
- response = self.model_backend.run(openai_messages)
320
-
321
- if isinstance(response, ChatCompletion):
322
- output_messages, finish_reasons, usage_dict, response_id = (
323
- self.handle_batch_response(response)
324
- )
325
- else:
326
- output_messages, finish_reasons, usage_dict, response_id = (
327
- self.handle_stream_response(response, num_tokens)
328
- )
317
+ (
318
+ response,
319
+ output_messages,
320
+ finish_reasons,
321
+ usage_dict,
322
+ response_id,
323
+ ) = self._step_model_response(openai_messages, num_tokens)
329
324
 
330
325
  if (
331
326
  self.is_tools_added()
@@ -350,38 +345,165 @@ class ChatAgent(BaseAgent):
350
345
 
351
346
  else:
352
347
  # Function calling disabled or not a function calling
348
+ info = self._step_get_info(
349
+ output_messages,
350
+ finish_reasons,
351
+ usage_dict,
352
+ response_id,
353
+ tool_calls,
354
+ num_tokens,
355
+ )
356
+ break
357
+
358
+ return ChatAgentResponse(output_messages, self.terminated, info)
359
+
360
+ async def step_async(
361
+ self,
362
+ input_message: BaseMessage,
363
+ ) -> ChatAgentResponse:
364
+ r"""Performs a single step in the chat session by generating a response
365
+ to the input message. This agent step can call async function calls.
366
+
367
+ Args:
368
+ input_message (BaseMessage): The input message to the agent.
369
+ Its `role` field that specifies the role at backend may be either
370
+ `user` or `assistant` but it will be set to `user` anyway since
371
+ for the self agent any incoming message is external.
372
+
373
+ Returns:
374
+ ChatAgentResponse: A struct containing the output messages,
375
+ a boolean indicating whether the chat session has terminated,
376
+ and information about the chat session.
377
+ """
378
+ self.update_memory(input_message, OpenAIBackendRole.USER)
379
+
380
+ output_messages: List[BaseMessage]
381
+ info: Dict[str, Any]
382
+ tool_calls: List[FunctionCallingRecord] = []
383
+ while True:
384
+ # Format messages and get the token number
385
+ openai_messages: list[OpenAIMessage] | None
386
+
387
+ try:
388
+ openai_messages, num_tokens = self.memory.get_context()
389
+ except RuntimeError as e:
390
+ return self.step_token_exceed(
391
+ e.args[1], tool_calls, "max_tokens_exceeded"
392
+ )
393
+ (
394
+ response,
395
+ output_messages,
396
+ finish_reasons,
397
+ usage_dict,
398
+ response_id,
399
+ ) = self._step_model_response(openai_messages, num_tokens)
400
+
401
+ if (
402
+ self.is_tools_added()
403
+ and isinstance(response, ChatCompletion)
404
+ and response.choices[0].message.tool_calls is not None
405
+ ):
406
+ # Tools added for function calling and not in stream mode
353
407
 
354
- # Loop over responses terminators, get list of termination
355
- # tuples with whether the terminator terminates the agent
356
- # and termination reason
357
- termination = [
358
- terminator.is_terminated(output_messages)
359
- for terminator in self.response_terminators
360
- ]
361
- # Terminate the agent if any of the terminator terminates
362
- self.terminated, termination_reason = next(
363
- (
364
- (terminated, termination_reason)
365
- for terminated, termination_reason in termination
366
- if terminated
367
- ),
368
- (False, None),
408
+ # Do function calling
409
+ (
410
+ func_assistant_msg,
411
+ func_result_msg,
412
+ func_record,
413
+ ) = await self.step_tool_call_async(response)
414
+
415
+ # Update the messages
416
+ self.update_memory(
417
+ func_assistant_msg, OpenAIBackendRole.ASSISTANT
369
418
  )
370
- # For now only retain the first termination reason
371
- if self.terminated and termination_reason is not None:
372
- finish_reasons = [termination_reason] * len(finish_reasons)
419
+ self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
373
420
 
374
- info = self.get_info(
375
- response_id,
376
- usage_dict,
421
+ # Record the function calling
422
+ tool_calls.append(func_record)
423
+
424
+ else:
425
+ # Function calling disabled or not a function calling
426
+ info = self._step_get_info(
427
+ output_messages,
377
428
  finish_reasons,
378
- num_tokens,
429
+ usage_dict,
430
+ response_id,
379
431
  tool_calls,
432
+ num_tokens,
380
433
  )
381
434
  break
382
435
 
383
436
  return ChatAgentResponse(output_messages, self.terminated, info)
384
437
 
438
+ def _step_model_response(
439
+ self,
440
+ openai_messages: list[OpenAIMessage],
441
+ num_tokens: int,
442
+ ) -> tuple[
443
+ ChatCompletion | Stream[ChatCompletionChunk],
444
+ list[BaseMessage],
445
+ list[str],
446
+ dict[str, int],
447
+ str,
448
+ ]:
449
+ r"""Internal function for agent step model response."""
450
+ # Obtain the model's response
451
+ response = self.model_backend.run(openai_messages)
452
+
453
+ if isinstance(response, ChatCompletion):
454
+ output_messages, finish_reasons, usage_dict, response_id = (
455
+ self.handle_batch_response(response)
456
+ )
457
+ else:
458
+ output_messages, finish_reasons, usage_dict, response_id = (
459
+ self.handle_stream_response(response, num_tokens)
460
+ )
461
+ return (
462
+ response,
463
+ output_messages,
464
+ finish_reasons,
465
+ usage_dict,
466
+ response_id,
467
+ )
468
+
469
+ def _step_get_info(
470
+ self,
471
+ output_messages: List[BaseMessage],
472
+ finish_reasons: List[str],
473
+ usage_dict: Dict[str, int],
474
+ response_id: str,
475
+ tool_calls: List[FunctionCallingRecord],
476
+ num_tokens: int,
477
+ ) -> Dict[str, Any]:
478
+ # Loop over responses terminators, get list of termination
479
+ # tuples with whether the terminator terminates the agent
480
+ # and termination reason
481
+ termination = [
482
+ terminator.is_terminated(output_messages)
483
+ for terminator in self.response_terminators
484
+ ]
485
+ # Terminate the agent if any of the terminator terminates
486
+ self.terminated, termination_reason = next(
487
+ (
488
+ (terminated, termination_reason)
489
+ for terminated, termination_reason in termination
490
+ if terminated
491
+ ),
492
+ (False, None),
493
+ )
494
+ # For now only retain the first termination reason
495
+ if self.terminated and termination_reason is not None:
496
+ finish_reasons = [termination_reason] * len(finish_reasons)
497
+
498
+ info = self.get_info(
499
+ response_id,
500
+ usage_dict,
501
+ finish_reasons,
502
+ num_tokens,
503
+ tool_calls,
504
+ )
505
+ return info
506
+
385
507
  def handle_batch_response(
386
508
  self, response: ChatCompletion
387
509
  ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
@@ -516,7 +638,7 @@ class ChatAgent(BaseAgent):
516
638
  """
517
639
  choice = response.choices[0]
518
640
  if choice.message.tool_calls is None:
519
- raise RuntimeError("Tool calls is None")
641
+ raise RuntimeError("Tool call is None")
520
642
  func_name = choice.message.tool_calls[0].function.name
521
643
  func = self.func_dict[func_name]
522
644
 
@@ -553,6 +675,65 @@ class ChatAgent(BaseAgent):
553
675
  func_record = FunctionCallingRecord(func_name, args, result)
554
676
  return assist_msg, func_msg, func_record
555
677
 
678
+ async def step_tool_call_async(
679
+ self,
680
+ response: ChatCompletion,
681
+ ) -> Tuple[
682
+ FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
683
+ ]:
684
+ r"""Execute the async function with arguments following the model's
685
+ response.
686
+
687
+ Args:
688
+ response (Dict[str, Any]): The response obtained by calling the
689
+ model.
690
+
691
+ Returns:
692
+ tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
693
+ one about the arguments and the other about the execution
694
+ result, and a struct for logging information about this
695
+ function call.
696
+ """
697
+ # Note that when function calling is enabled, `n` is set to 1.
698
+ choice = response.choices[0]
699
+ if choice.message.tool_calls is None:
700
+ raise RuntimeError("Tool call is None")
701
+ func_name = choice.message.tool_calls[0].function.name
702
+ func = self.func_dict[func_name]
703
+
704
+ args_str: str = choice.message.tool_calls[0].function.arguments
705
+ args = json.loads(args_str.replace("'", "\""))
706
+
707
+ # Pass the extracted arguments to the indicated function
708
+ try:
709
+ result = await func(**args)
710
+ except Exception:
711
+ raise ValueError(
712
+ f"Execution of function {func.__name__} failed with "
713
+ f"arguments being {args}."
714
+ )
715
+
716
+ assist_msg = FunctionCallingMessage(
717
+ role_name=self.role_name,
718
+ role_type=self.role_type,
719
+ meta_dict=None,
720
+ content="",
721
+ func_name=func_name,
722
+ args=args,
723
+ )
724
+ func_msg = FunctionCallingMessage(
725
+ role_name=self.role_name,
726
+ role_type=self.role_type,
727
+ meta_dict=None,
728
+ content="",
729
+ func_name=func_name,
730
+ result=result,
731
+ )
732
+
733
+ # Record information about this function call
734
+ func_record = FunctionCallingRecord(func_name, args, result)
735
+ return assist_msg, func_msg, func_record
736
+
556
737
  def get_usage_dict(
557
738
  self, output_messages: List[BaseMessage], prompt_tokens: int
558
739
  ) -> Dict[str, int]:
@@ -90,52 +90,106 @@ class DeductiveReasonerAgent(ChatAgent):
90
90
  """
91
91
  self.reset()
92
92
 
93
- deduce_prompt = """You are a deductive reasoner. You are tasked to complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the STARTING STATE A and the TARGET STATE B. You are given the CONTEXT CONTENT to help you complete the TASK.
94
- Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY fill in the BLANKs, and DO NOT alter or modify any other part of the template
93
+ deduce_prompt = """You are a deductive reasoner. You are tasked to
94
+ complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the
95
+ STARTING STATE A and the TARGET STATE B. You are given the CONTEXT
96
+ CONTENT to help you complete the TASK.
97
+ Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY
98
+ fill in the BLANKs, and DO NOT alter or modify any other part of the template
95
99
 
96
100
  ===== MODELING OF DEDUCTIVE REASONING =====
97
- You are tasked with understanding a mathematical model based on the components ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
101
+ You are tasked with understanding a mathematical model based on the components
102
+ ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
98
103
  - $A$ represents the known starting state.
99
104
  - $B$ represents the known target state.
100
105
  - $C$ represents the conditions required to transition from $A$ to $B$.
101
- - $Q$ represents the quality or effectiveness of the transition from $A$ to $B$.
106
+ - $Q$ represents the quality or effectiveness of the transition from $A$ to
107
+ $B$.
102
108
  - $L$ represents the path or process from $A$ to $B$.
103
109
 
104
110
  ===== THOUGHT OF DEDUCTIVE REASONING =====
105
111
  1. Define the Parameters of A and B:
106
- - Characterization: Before delving into transitions, thoroughly understand the nature and boundaries of both $A$ and $B$. This includes the type, properties, constraints, and possible interactions between the two.
107
- - Contrast and Compare: Highlight the similarities and differences between $A$ and $B$. This comparative analysis will give an insight into what needs changing and what remains constant.
112
+ - Characterization: Before delving into transitions, thoroughly understand
113
+ the nature and boundaries of both $A$ and $B$. This includes the type,
114
+ properties, constraints, and possible interactions between the two.
115
+ - Contrast and Compare: Highlight the similarities and differences between
116
+ $A$ and $B$. This comparative analysis will give an insight into what
117
+ needs changing and what remains constant.
108
118
  2. Historical & Empirical Analysis:
109
- - Previous Transitions according to the Knowledge Base of GPT: (if applicable) Extract conditions and patterns from the historical instances where a similar transition from a state comparable to $A$ moved towards $B$.
110
- - Scientific Principles: (if applicable) Consider the underlying scientific principles governing or related to the states and their transition. For example, if $A$ and $B$ are physical states, laws of physics might apply.
119
+ - Previous Transitions according to the Knowledge Base of GPT: (if
120
+ applicable) Extract conditions and patterns from the historical instances
121
+ where a similar transition from a state comparable to $A$ moved towards
122
+ $B$.
123
+ - Scientific Principles: (if applicable) Consider the underlying
124
+ scientific principles governing or related to the states and their
125
+ transition. For example, if $A$ and $B$ are physical states, laws of
126
+ physics might apply.
111
127
  3. Logical Deduction of Conditions ($C$):
112
- - Direct Path Analysis: What are the immediate and direct conditions required to move from $A$ to $B$?
113
- - Intermediate States: Are there states between $A$ and $B$ that must be transversed or can be used to make the transition smoother or more efficient? If yes, what is the content?
114
- - Constraints & Limitations: Identify potential barriers or restrictions in moving from $A$ to $B$. These can be external (e.g., environmental factors) or internal (properties of $A$ or $B$).
115
- - Resource and Information Analysis: What resources and information are required for the transition? This could be time, entity, factor, code language, software platform, unknowns, etc.
116
- - External Influences: Consider socio-economic, political, or environmental factors (if applicable) that could influence the transition conditions.
117
- - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s, no matter how unconventional they might seem. Utilize analogies, metaphors, or brainstorming techniques to envision possible conditions or paths from $A$ to $B$.
118
- - The conditions $C$ should be multiple but in one sentence. And each condition should be concerned with one aspect/entity.
128
+ - Direct Path Analysis: What are the immediate and direct conditions
129
+ required to move from $A$ to $B$?
130
+ - Intermediate States: Are there states between $A$ and $B$ that must be
131
+ transversed or can be used to make the transition smoother or more
132
+ efficient? If yes, what is the content?
133
+ - Constraints & Limitations: Identify potential barriers or restrictions
134
+ in moving from $A$ to $B$. These can be external (e.g., environmental
135
+ factors) or internal (properties of $A$ or $B$).
136
+ - Resource and Information Analysis: What resources and information are
137
+ required for the transition? This could be time, entity, factor, code
138
+ language, software platform, unknowns, etc.
139
+ - External Influences: Consider socio-economic, political, or
140
+ environmental factors (if applicable) that could influence the transition
141
+ conditions.
142
+ - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s,
143
+ no matter how unconventional they might seem. Utilize analogies,
144
+ metaphors, or brainstorming techniques to envision possible conditions or
145
+ paths from $A$ to $B$.
146
+ - The conditions $C$ should be multiple but in one sentence. And each
147
+ condition should be concerned with one aspect/entity.
119
148
  4. Entity/Label Recognition of Conditions ($C$):
120
- - Identify and categorize entities of Conditions ($C$) such as the names, locations, dates, specific technical terms or contextual parameters that might be associated with events, innovations post-2022.
121
- - The output of the entities/labels will be used as tags or labels for semantic similarity searches. The entities/labels may be the words, or phrases, each of them should contain valuable, high information entropy information, and should be independent.
122
- - Ensure that the identified entities are formatted in a manner suitable for database indexing and retrieval. Organize the entities into categories, and combine the category with its instance into a continuous phrase, without using colons or other separators.
123
- - Format these entities for database indexing: output the category rather than its instance/content into a continuous phrase. For example, instead of "Jan. 02", identify it as "Event time".
149
+ - Identify and categorize entities of Conditions ($C$) such as the names,
150
+ locations, dates, specific technical terms or contextual parameters that
151
+ might be associated with events, innovations post-2022.
152
+ - The output of the entities/labels will be used as tags or labels for
153
+ semantic similarity searches. The entities/labels may be the words, or
154
+ phrases, each of them should contain valuable, high information entropy
155
+ information, and should be independent.
156
+ - Ensure that the identified entities are formatted in a manner suitable
157
+ for database indexing and retrieval. Organize the entities into
158
+ categories, and combine the category with its instance into a continuous
159
+ phrase, without using colons or other separators.
160
+ - Format these entities for database indexing: output the category rather
161
+ than its instance/content into a continuous phrase. For example, instead
162
+ of "Jan. 02", identify it as "Event time".
124
163
  5. Quality Assessment ($Q$):
125
- - Efficiency: How efficient is the transition from $A$ to $B$, which measures the resources used versus the desired outcome?
126
- - Effectiveness: Did the transition achieve the desired outcome or was the target state achieved as intended?
127
- - Safety & Risks: Assess any risks associated with the transition and the measures to mitigate them.
128
- - Feedback Mechanisms: Incorporate feedback loops to continuously monitor and adjust the quality of transition, making it more adaptive.
164
+ - Efficiency: How efficient is the transition from $A$ to $B$, which
165
+ measures the resources used versus the desired outcome?
166
+ - Effectiveness: Did the transition achieve the desired outcome or was the
167
+ target state achieved as intended?
168
+ - Safety & Risks: Assess any risks associated with the transition and the
169
+ measures to mitigate them.
170
+ - Feedback Mechanisms: Incorporate feedback loops to continuously monitor
171
+ and adjust the quality of transition, making it more adaptive.
129
172
  6. Iterative Evaluation:
130
- - Test & Refine: Based on the initially deduced conditions and assessed quality, iterate the process to refine and optimize the transition. This might involve tweaking conditions, employing different paths, or changing resources.
131
- - Feedback Integration: Use feedback to make improvements and increase the quality of the transition.
132
- 7. Real-world scenarios often present challenges that may not be captured by models and frameworks. While using the model, maintain an adaptive mindset:
133
- - Scenario Exploration: Continuously imagine various possible scenarios, both positive and negative, to prepare for unexpected events.
134
- - Flexibility: Be prepared to modify conditions ($C$) or alter the path/process ($L$) if unforeseen challenges arise.
135
- - Feedback Integration: Rapidly integrate feedback from actual implementations to adjust the model's application, ensuring relevancy and effectiveness.
173
+ - Test & Refine: Based on the initially deduced conditions and assessed
174
+ quality, iterate the process to refine and optimize the transition. This
175
+ might involve tweaking conditions, employing different paths, or changing
176
+ resources.
177
+ - Feedback Integration: Use feedback to make improvements and increase the
178
+ quality of the transition.
179
+ 7. Real-world scenarios often present challenges that may not be captured by
180
+ models and frameworks. While using the model, maintain an adaptive mindset:
181
+ - Scenario Exploration: Continuously imagine various possible scenarios,
182
+ both positive and negative, to prepare for unexpected events.
183
+ - Flexibility: Be prepared to modify conditions ($C$) or alter the path/
184
+ process ($L$) if unforeseen challenges arise.
185
+ - Feedback Integration: Rapidly integrate feedback from actual
186
+ implementations to adjust the model's application, ensuring relevancy and
187
+ effectiveness.
136
188
 
137
189
  ===== TASK =====
138
- Given the starting state $A$ and the target state $B$, assuming that a path $L$ always exists between $A$ and $B$, how can one deduce or identify the necessary conditions $C$ and the quality $Q$ of the transition?
190
+ Given the starting state $A$ and the target state $B$, assuming that a path
191
+ $L$ always exists between $A$ and $B$, how can one deduce or identify the
192
+ necessary conditions $C$ and the quality $Q$ of the transition?
139
193
 
140
194
  ===== STARTING STATE $A$ =====
141
195
  {starting_state}
@@ -150,7 +204,8 @@ Given the starting state $A$ and the target state $B$, assuming that a path $L$
150
204
  - Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
151
205
  condition <NUM>:
152
206
  <BLANK>.
153
- - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include square brackets)
207
+ - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include
208
+ square brackets)
154
209
  - Quality Assessment ($Q$) (do not use symbols):
155
210
  <BLANK>.
156
211
  - Iterative Evaluation:\n<BLANK>/None"""
@@ -26,37 +26,49 @@ from camel.storages.graph_storages.graph_element import (
26
26
  from camel.types import ModelType, RoleType
27
27
 
28
28
  text_prompt = """
29
- You are tasked with extracting nodes and relationships from given content and structures them into Node and Relationship objects. Here's the outline of what you needs to do:
29
+ You are tasked with extracting nodes and relationships from given content and
30
+ structures them into Node and Relationship objects. Here's the outline of what
31
+ you needs to do:
30
32
 
31
33
  Content Extraction:
32
- You should be able to process input content and identify entities mentioned within it.
33
- Entities can be any noun phrases or concepts that represent distinct entities in the context of the given content.
34
+ You should be able to process input content and identify entities mentioned
35
+ within it.
36
+ Entities can be any noun phrases or concepts that represent distinct entities
37
+ in the context of the given content.
34
38
 
35
39
  Node Extraction:
36
40
  For each identified entity, you should create a Node object.
37
41
  Each Node object should have a unique identifier (id) and a type (type).
38
- Additional properties associated with the node can also be extracted and stored.
42
+ Additional properties associated with the node can also be extracted and
43
+ stored.
39
44
 
40
45
  Relationship Extraction:
41
46
  You should identify relationships between entities mentioned in the content.
42
47
  For each relationship, create a Relationship object.
43
- A Relationship object should have a subject (subj) and an object (obj) which are Node objects representing the entities involved in the relationship.
44
- Each relationship should also have a type (type), and additional properties if applicable.
48
+ A Relationship object should have a subject (subj) and an object (obj) which
49
+ are Node objects representing the entities involved in the relationship.
50
+ Each relationship should also have a type (type), and additional properties if
51
+ applicable.
45
52
 
46
53
  Output Formatting:
47
- The extracted nodes and relationships should be formatted as instances of the provided Node and Relationship classes.
54
+ The extracted nodes and relationships should be formatted as instances of the
55
+ provided Node and Relationship classes.
48
56
  Ensure that the extracted data adheres to the structure defined by the classes.
49
- Output the structured data in a format that can be easily validated against the provided code.
57
+ Output the structured data in a format that can be easily validated against
58
+ the provided code.
50
59
 
51
60
  Instructions for you:
52
61
  Read the provided content thoroughly.
53
- Identify distinct entities mentioned in the content and categorize them as nodes.
54
- Determine relationships between these entities and represent them as directed relationships.
62
+ Identify distinct entities mentioned in the content and categorize them as
63
+ nodes.
64
+ Determine relationships between these entities and represent them as directed
65
+ relationships.
55
66
  Provide the extracted nodes and relationships in the specified format below.
56
67
  Example for you:
57
68
 
58
69
  Example Content:
59
- "John works at XYZ Corporation. He is a software engineer. The company is located in New York City."
70
+ "John works at XYZ Corporation. He is a software engineer. The company is
71
+ located in New York City."
60
72
 
61
73
  Expected Output:
62
74
 
@@ -68,18 +80,23 @@ Node(id='New York City', type='Location', properties={'agent_generated'})
68
80
 
69
81
  Relationships:
70
82
 
71
- Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ Corporation', type='Organization'), type='WorksAt', properties={'agent_generated'})
72
- Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City', type='Location'), type='ResidesIn', properties={'agent_generated'})
83
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
84
+ Corporation', type='Organization'), type='WorksAt', properties=
85
+ {'agent_generated'})
86
+ Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
87
+ type='Location'), type='ResidesIn', properties={'agent_generated'})
73
88
 
74
89
  ===== TASK =====
75
- Please extracts nodes and relationships from given content and structures them into Node and Relationship objects.
90
+ Please extracts nodes and relationships from given content and structures them
91
+ into Node and Relationship objects.
76
92
 
77
93
  {task}
78
94
  """
79
95
 
80
96
 
81
97
  class KnowledgeGraphAgent(ChatAgent):
82
- r"""An agent that can extract node and relationship information for different entities from given `Element` content.
98
+ r"""An agent that can extract node and relationship information for
99
+ different entities from given `Element` content.
83
100
 
84
101
  Attributes:
85
102
  task_prompt (TextPrompt): A prompt for the agent to extract node and
@@ -106,7 +123,8 @@ class KnowledgeGraphAgent(ChatAgent):
106
123
  content="Your mission is to transform unstructured content "
107
124
  "intostructured graph data. Extract nodes and relationships with "
108
125
  "precision, and let the connections unfold. Your graphs will "
109
- "illuminate the hidden connections within the chaos of information.",
126
+ "illuminate the hidden connections within the chaos of "
127
+ "information.",
110
128
  )
111
129
  super().__init__(system_message, model_type, model_config)
112
130
 
@@ -124,7 +142,8 @@ class KnowledgeGraphAgent(ChatAgent):
124
142
 
125
143
  Returns:
126
144
  Union[str, GraphElement]: The extracted node and relationship
127
- information. If `parse_graph_elements` is `True` then return `GraphElement`, else return `str`.
145
+ information. If `parse_graph_elements` is `True` then return
146
+ `GraphElement`, else return `str`.
128
147
  """
129
148
  self.reset()
130
149
  self.element = element
@@ -191,7 +210,11 @@ class KnowledgeGraphAgent(ChatAgent):
191
210
 
192
211
  # Regular expressions to extract nodes and relationships
193
212
  node_pattern = r"Node\(id='(.*?)', type='(.*?)', properties=(.*?)\)"
194
- rel_pattern = r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', properties=\{(.*?)\}\)"
213
+ rel_pattern = (
214
+ r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
215
+ r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)', "
216
+ r"properties=\{(.*?)\}\)"
217
+ )
195
218
 
196
219
  nodes = {}
197
220
  relationships = []
@@ -22,6 +22,7 @@ from camel.types import ModelType, RoleType
22
22
 
23
23
  class RoleAssignmentAgent(ChatAgent):
24
24
  r"""An agent that generates role names based on the task prompt.
25
+
25
26
  Attributes:
26
27
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
27
28
  role names.
@@ -115,7 +116,9 @@ class RoleAssignmentAgent(ChatAgent):
115
116
  ]
116
117
 
117
118
  if len(role_names) != num_roles or len(role_descriptions) != num_roles:
118
- raise RuntimeError("Got None or insufficient information of roles.")
119
+ raise RuntimeError(
120
+ "Got None or insufficient information of roles."
121
+ )
119
122
  if terminated:
120
123
  raise RuntimeError("Role assignment failed.")
121
124