camel-ai 0.1.5__tar.gz → 0.1.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (148) hide show
  1. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/PKG-INFO +29 -13
  2. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/README.md +17 -11
  3. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/__init__.py +2 -0
  4. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/chat_agent.py +217 -36
  5. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/deductive_reasoner_agent.py +86 -31
  6. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/knowledge_graph_agent.py +41 -18
  7. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/role_assignment_agent.py +4 -1
  8. camel_ai-0.1.5.2/camel/agents/search_agent.py +122 -0
  9. camel_ai-0.1.5.2/camel/bots/__init__.py +20 -0
  10. camel_ai-0.1.5.2/camel/bots/discord_bot.py +103 -0
  11. camel_ai-0.1.5.2/camel/bots/telegram_bot.py +84 -0
  12. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/configs/__init__.py +3 -0
  13. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/configs/anthropic_config.py +1 -1
  14. camel_ai-0.1.5.2/camel/configs/litellm_config.py +113 -0
  15. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/embeddings/__init__.py +2 -0
  16. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/embeddings/openai_embedding.py +2 -2
  17. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/embeddings/sentence_transformers_embeddings.py +6 -5
  18. camel_ai-0.1.5.2/camel/embeddings/vlm_embedding.py +146 -0
  19. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/__init__.py +9 -0
  20. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/open_api_function.py +150 -29
  21. camel_ai-0.1.5.2/camel/functions/open_api_specs/biztoc/ai-plugin.json +34 -0
  22. camel_ai-0.1.5.2/camel/functions/open_api_specs/biztoc/openapi.yaml +21 -0
  23. camel_ai-0.1.5.2/camel/functions/open_api_specs/create_qr_code/openapi.yaml +44 -0
  24. camel_ai-0.1.5.2/camel/functions/open_api_specs/klarna/__init__.py +13 -0
  25. camel_ai-0.1.5.2/camel/functions/open_api_specs/nasa_apod/__init__.py +13 -0
  26. camel_ai-0.1.5.2/camel/functions/open_api_specs/nasa_apod/openapi.yaml +72 -0
  27. camel_ai-0.1.5.2/camel/functions/open_api_specs/outschool/__init__.py +13 -0
  28. camel_ai-0.1.5.2/camel/functions/open_api_specs/outschool/ai-plugin.json +34 -0
  29. camel_ai-0.1.5.2/camel/functions/open_api_specs/outschool/openapi.yaml +1 -0
  30. camel_ai-0.1.5.2/camel/functions/open_api_specs/outschool/paths/__init__.py +14 -0
  31. camel_ai-0.1.5.2/camel/functions/open_api_specs/outschool/paths/get_classes.py +29 -0
  32. camel_ai-0.1.5.2/camel/functions/open_api_specs/outschool/paths/search_teachers.py +29 -0
  33. camel_ai-0.1.5.2/camel/functions/open_api_specs/security_config.py +21 -0
  34. camel_ai-0.1.5.2/camel/functions/open_api_specs/speak/__init__.py +13 -0
  35. camel_ai-0.1.5.2/camel/functions/open_api_specs/web_scraper/__init__.py +13 -0
  36. camel_ai-0.1.5.2/camel/functions/open_api_specs/web_scraper/ai-plugin.json +34 -0
  37. camel_ai-0.1.5.2/camel/functions/open_api_specs/web_scraper/openapi.yaml +71 -0
  38. camel_ai-0.1.5.2/camel/functions/open_api_specs/web_scraper/paths/__init__.py +13 -0
  39. camel_ai-0.1.5.2/camel/functions/open_api_specs/web_scraper/paths/scraper.py +29 -0
  40. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/openai_function.py +3 -1
  41. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/search_functions.py +104 -171
  42. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/slack_functions.py +2 -1
  43. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/human.py +3 -1
  44. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/loaders/base_io.py +3 -1
  45. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/loaders/unstructured_io.py +16 -22
  46. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/messages/base.py +135 -46
  47. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/__init__.py +4 -0
  48. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/anthropic_model.py +20 -14
  49. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/base_model.py +2 -0
  50. camel_ai-0.1.5.2/camel/models/litellm_model.py +112 -0
  51. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/model_factory.py +8 -1
  52. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/open_source_model.py +1 -0
  53. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/openai_model.py +6 -2
  54. camel_ai-0.1.5.2/camel/models/zhipuai_model.py +125 -0
  55. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/__init__.py +2 -0
  56. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/base.py +2 -1
  57. camel_ai-0.1.5.2/camel/prompts/descripte_video_prompt.py +33 -0
  58. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/task_prompt_template.py +9 -3
  59. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/retrievers/auto_retriever.py +20 -11
  60. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/retrievers/base.py +4 -2
  61. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/retrievers/bm25_retriever.py +2 -1
  62. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/retrievers/cohere_rerank_retriever.py +2 -1
  63. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/retrievers/vector_retriever.py +10 -4
  64. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/societies/babyagi_playing.py +2 -1
  65. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/societies/role_playing.py +2 -1
  66. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/graph_storages/base.py +1 -0
  67. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/graph_storages/neo4j_graph.py +5 -3
  68. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/vectordb_storages/base.py +2 -1
  69. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/vectordb_storages/milvus.py +5 -2
  70. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/toolkits/github_toolkit.py +120 -26
  71. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/types/__init__.py +3 -2
  72. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/types/enums.py +25 -1
  73. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/utils/__init__.py +11 -2
  74. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/utils/commons.py +74 -4
  75. camel_ai-0.1.5.2/camel/utils/constants.py +26 -0
  76. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/utils/token_counting.py +58 -5
  77. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/pyproject.toml +49 -7
  78. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/__init__.py +0 -0
  79. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/base.py +0 -0
  80. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/critic_agent.py +0 -0
  81. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/embodied_agent.py +0 -0
  82. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/task_agent.py +0 -0
  83. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/tool_agents/__init__.py +0 -0
  84. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/tool_agents/base.py +0 -0
  85. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  86. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/configs/base_config.py +0 -0
  87. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/configs/openai_config.py +0 -0
  88. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/embeddings/base.py +0 -0
  89. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/google_maps_function.py +0 -0
  90. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/math_functions.py +0 -0
  91. {camel_ai-0.1.5/camel/functions/open_api_specs/coursera → camel_ai-0.1.5.2/camel/functions/open_api_specs/biztoc}/__init__.py +0 -0
  92. {camel_ai-0.1.5/camel/functions/open_api_specs/klarna → camel_ai-0.1.5.2/camel/functions/open_api_specs/coursera}/__init__.py +0 -0
  93. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/open_api_specs/coursera/openapi.yaml +0 -0
  94. {camel_ai-0.1.5/camel/functions/open_api_specs/speak → camel_ai-0.1.5.2/camel/functions/open_api_specs/create_qr_code}/__init__.py +0 -0
  95. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/open_api_specs/klarna/openapi.yaml +0 -0
  96. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/open_api_specs/speak/openapi.yaml +0 -0
  97. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/retrieval_functions.py +0 -0
  98. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/twitter_function.py +0 -0
  99. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/functions/weather_functions.py +0 -0
  100. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/generators.py +0 -0
  101. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/interpreters/__init__.py +0 -0
  102. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/interpreters/base.py +0 -0
  103. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/interpreters/internal_python_interpreter.py +0 -0
  104. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/interpreters/interpreter_error.py +0 -0
  105. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/interpreters/subprocess_interpreter.py +0 -0
  106. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/loaders/__init__.py +0 -0
  107. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/__init__.py +0 -0
  108. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/agent_memories.py +0 -0
  109. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/base.py +0 -0
  110. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/blocks/__init__.py +0 -0
  111. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/blocks/chat_history_block.py +0 -0
  112. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/blocks/vectordb_block.py +0 -0
  113. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/context_creators/__init__.py +0 -0
  114. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/context_creators/score_based.py +0 -0
  115. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/memories/records.py +0 -0
  116. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/messages/__init__.py +0 -0
  117. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/messages/func_message.py +0 -0
  118. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/openai_audio_models.py +0 -0
  119. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/models/stub_model.py +0 -0
  120. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/ai_society.py +0 -0
  121. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/code.py +0 -0
  122. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/evaluation.py +0 -0
  123. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/misalignment.py +0 -0
  124. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/object_recognition.py +0 -0
  125. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/prompt_templates.py +0 -0
  126. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/role_description_prompt_template.py +0 -0
  127. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/solution_extraction.py +0 -0
  128. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/prompts/translation.py +0 -0
  129. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/responses/__init__.py +0 -0
  130. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/responses/agent_responses.py +0 -0
  131. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/retrievers/__init__.py +0 -0
  132. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/societies/__init__.py +0 -0
  133. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/__init__.py +0 -0
  134. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/graph_storages/__init__.py +0 -0
  135. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/graph_storages/graph_element.py +0 -0
  136. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/key_value_storages/__init__.py +0 -0
  137. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/key_value_storages/base.py +0 -0
  138. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/key_value_storages/in_memory.py +0 -0
  139. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/key_value_storages/json.py +0 -0
  140. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/vectordb_storages/__init__.py +0 -0
  141. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/storages/vectordb_storages/qdrant.py +0 -0
  142. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/terminators/__init__.py +0 -0
  143. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/terminators/base.py +0 -0
  144. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/terminators/response_terminator.py +0 -0
  145. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/terminators/token_limit_terminator.py +0 -0
  146. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/toolkits/__init__.py +0 -0
  147. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/toolkits/base.py +0 -0
  148. {camel_ai-0.1.5 → camel_ai-0.1.5.2}/camel/types/openai_types.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5
3
+ Version: 0.1.5.2
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
@@ -16,37 +16,47 @@ Provides-Extra: all
16
16
  Provides-Extra: encoders
17
17
  Provides-Extra: graph-storages
18
18
  Provides-Extra: huggingface-agent
19
+ Provides-Extra: model-platforms
19
20
  Provides-Extra: retrievers
20
21
  Provides-Extra: test
21
22
  Provides-Extra: tools
22
23
  Provides-Extra: vector-databases
23
24
  Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
24
25
  Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
25
- Requires-Dist: anthropic (>=0.21.3,<0.22.0)
26
+ Requires-Dist: anthropic (>=0.28.0,<0.29.0)
26
27
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
27
28
  Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
28
29
  Requires-Dist: colorama (>=0,<1)
30
+ Requires-Dist: curl_cffi (==0.6.2)
29
31
  Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
30
32
  Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
33
+ Requires-Dist: discord.py (>=2.3.2,<3.0.0) ; extra == "tools" or extra == "all"
31
34
  Requires-Dist: docstring-parser (>=0.15,<0.16)
32
35
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
36
+ Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
33
37
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
38
+ Requires-Dist: imageio (>=2.34.1,<3.0.0) ; extra == "tools" or extra == "all"
34
39
  Requires-Dist: jsonschema (>=4,<5)
40
+ Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
35
41
  Requires-Dist: mock (>=5,<6) ; extra == "test"
36
42
  Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
43
+ Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
37
44
  Requires-Dist: numpy (>=1,<2)
38
45
  Requires-Dist: openai (>=1.2.3,<2.0.0)
39
46
  Requires-Dist: openapi-spec-validator (>=0.7.1,<0.8.0) ; extra == "tools" or extra == "all"
40
47
  Requires-Dist: opencv-python (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
41
48
  Requires-Dist: pathlib (>=1.0.1,<2.0.0)
49
+ Requires-Dist: pillow (>=10.2.0,<11.0.0) ; extra == "tools" or extra == "all"
42
50
  Requires-Dist: prance (>=23.6.21.0,<24.0.0.0) ; extra == "tools" or extra == "all"
43
51
  Requires-Dist: protobuf (>=4,<5)
52
+ Requires-Dist: pyTelegramBotAPI (>=4.18.0,<5.0.0) ; extra == "tools" or extra == "all"
44
53
  Requires-Dist: pydantic (>=1.9,<3)
45
54
  Requires-Dist: pydub (>=0.25.1,<0.26.0) ; extra == "tools" or extra == "all"
46
55
  Requires-Dist: pygithub (>=2.3.0,<3.0.0) ; extra == "tools" or extra == "all"
47
56
  Requires-Dist: pymilvus (>=2.4.0,<3.0.0) ; extra == "vector-databases" or extra == "all"
48
57
  Requires-Dist: pyowm (>=3.3.0,<4.0.0) ; extra == "tools" or extra == "all"
49
58
  Requires-Dist: pytest (>=7,<8) ; extra == "test"
59
+ Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
50
60
  Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
51
61
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
52
62
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
@@ -121,6 +131,10 @@ To install the base CAMEL library:
121
131
  pip install camel-ai
122
132
  ```
123
133
  Some features require extra dependencies:
134
+ - To install with all dependencies:
135
+ ```bash
136
+ pip install 'camel-ai[all]'
137
+ ```
124
138
  - To use the HuggingFace agents:
125
139
  ```bash
126
140
  pip install 'camel-ai[huggingface-agent]'
@@ -129,10 +143,6 @@ Some features require extra dependencies:
129
143
  ```bash
130
144
  pip install 'camel-ai[tools]'
131
145
  ```
132
- - To install with all dependencies:
133
- ```bash
134
- pip install 'camel-ai[all]'
135
- ```
136
146
 
137
147
  ### From Source
138
148
 
@@ -147,14 +157,20 @@ git clone https://github.com/camel-ai/camel.git
147
157
  # Change directory into project directory
148
158
  cd camel
149
159
 
150
- # Activate camel virtual environment
160
+ # If you didn't install peotry before
161
+ pip install poetry # (Optional)
162
+
163
+ # We suggest using python 3.10
164
+ poetry env use python3.10 # (Optional)
165
+
166
+ # Activate CAMEL virtual environment
151
167
  poetry shell
152
168
 
153
- # Install camel from source
154
- # It takes about 90 seconds to resolve dependencies
169
+ # Install the base CAMEL library
170
+ # It takes about 90 seconds
155
171
  poetry install
156
172
 
157
- # Or if you want to use all other extra packages
173
+ # Install CAMEL with all dependencies
158
174
  poetry install -E all # (Optional)
159
175
 
160
176
  # Exit the virtual environment
@@ -166,16 +182,16 @@ Install `CAMEL` from source with conda and pip:
166
182
  # Create a conda virtual environment
167
183
  conda create --name camel python=3.10
168
184
 
169
- # Activate camel conda environment
185
+ # Activate CAMEL conda environment
170
186
  conda activate camel
171
187
 
172
188
  # Clone github repo
173
- git clone -b v0.1.5 https://github.com/camel-ai/camel.git
189
+ git clone -b v0.1.5.2 https://github.com/camel-ai/camel.git
174
190
 
175
191
  # Change directory into project directory
176
192
  cd camel
177
193
 
178
- # Install camel from source
194
+ # Install CAMEL from source
179
195
  pip install -e .
180
196
 
181
197
  # Or if you want to use all other extra packages
@@ -55,6 +55,10 @@ To install the base CAMEL library:
55
55
  pip install camel-ai
56
56
  ```
57
57
  Some features require extra dependencies:
58
+ - To install with all dependencies:
59
+ ```bash
60
+ pip install 'camel-ai[all]'
61
+ ```
58
62
  - To use the HuggingFace agents:
59
63
  ```bash
60
64
  pip install 'camel-ai[huggingface-agent]'
@@ -63,10 +67,6 @@ Some features require extra dependencies:
63
67
  ```bash
64
68
  pip install 'camel-ai[tools]'
65
69
  ```
66
- - To install with all dependencies:
67
- ```bash
68
- pip install 'camel-ai[all]'
69
- ```
70
70
 
71
71
  ### From Source
72
72
 
@@ -81,14 +81,20 @@ git clone https://github.com/camel-ai/camel.git
81
81
  # Change directory into project directory
82
82
  cd camel
83
83
 
84
- # Activate camel virtual environment
84
+ # If you didn't install peotry before
85
+ pip install poetry # (Optional)
86
+
87
+ # We suggest using python 3.10
88
+ poetry env use python3.10 # (Optional)
89
+
90
+ # Activate CAMEL virtual environment
85
91
  poetry shell
86
92
 
87
- # Install camel from source
88
- # It takes about 90 seconds to resolve dependencies
93
+ # Install the base CAMEL library
94
+ # It takes about 90 seconds
89
95
  poetry install
90
96
 
91
- # Or if you want to use all other extra packages
97
+ # Install CAMEL with all dependencies
92
98
  poetry install -E all # (Optional)
93
99
 
94
100
  # Exit the virtual environment
@@ -100,16 +106,16 @@ Install `CAMEL` from source with conda and pip:
100
106
  # Create a conda virtual environment
101
107
  conda create --name camel python=3.10
102
108
 
103
- # Activate camel conda environment
109
+ # Activate CAMEL conda environment
104
110
  conda activate camel
105
111
 
106
112
  # Clone github repo
107
- git clone -b v0.1.5 https://github.com/camel-ai/camel.git
113
+ git clone -b v0.1.5.2 https://github.com/camel-ai/camel.git
108
114
 
109
115
  # Change directory into project directory
110
116
  cd camel
111
117
 
112
- # Install camel from source
118
+ # Install CAMEL from source
113
119
  pip install -e .
114
120
 
115
121
  # Or if you want to use all other extra packages
@@ -17,6 +17,7 @@ from .critic_agent import CriticAgent
17
17
  from .embodied_agent import EmbodiedAgent
18
18
  from .knowledge_graph_agent import KnowledgeGraphAgent
19
19
  from .role_assignment_agent import RoleAssignmentAgent
20
+ from .search_agent import SearchAgent
20
21
  from .task_agent import (
21
22
  TaskCreationAgent,
22
23
  TaskPlannerAgent,
@@ -38,5 +39,6 @@ __all__ = [
38
39
  'HuggingFaceToolAgent',
39
40
  'EmbodiedAgent',
40
41
  'RoleAssignmentAgent',
42
+ 'SearchAgent',
41
43
  'KnowledgeGraphAgent',
42
44
  ]
@@ -306,7 +306,7 @@ class ChatAgent(BaseAgent):
306
306
  tool_calls: List[FunctionCallingRecord] = []
307
307
  while True:
308
308
  # Format messages and get the token number
309
- openai_messages: Optional[List[OpenAIMessage]]
309
+ openai_messages: list[OpenAIMessage] | None
310
310
 
311
311
  try:
312
312
  openai_messages, num_tokens = self.memory.get_context()
@@ -314,18 +314,13 @@ class ChatAgent(BaseAgent):
314
314
  return self.step_token_exceed(
315
315
  e.args[1], tool_calls, "max_tokens_exceeded"
316
316
  )
317
-
318
- # Obtain the model's response
319
- response = self.model_backend.run(openai_messages)
320
-
321
- if isinstance(response, ChatCompletion):
322
- output_messages, finish_reasons, usage_dict, response_id = (
323
- self.handle_batch_response(response)
324
- )
325
- else:
326
- output_messages, finish_reasons, usage_dict, response_id = (
327
- self.handle_stream_response(response, num_tokens)
328
- )
317
+ (
318
+ response,
319
+ output_messages,
320
+ finish_reasons,
321
+ usage_dict,
322
+ response_id,
323
+ ) = self._step_model_response(openai_messages, num_tokens)
329
324
 
330
325
  if (
331
326
  self.is_tools_added()
@@ -350,38 +345,165 @@ class ChatAgent(BaseAgent):
350
345
 
351
346
  else:
352
347
  # Function calling disabled or not a function calling
348
+ info = self._step_get_info(
349
+ output_messages,
350
+ finish_reasons,
351
+ usage_dict,
352
+ response_id,
353
+ tool_calls,
354
+ num_tokens,
355
+ )
356
+ break
357
+
358
+ return ChatAgentResponse(output_messages, self.terminated, info)
359
+
360
+ async def step_async(
361
+ self,
362
+ input_message: BaseMessage,
363
+ ) -> ChatAgentResponse:
364
+ r"""Performs a single step in the chat session by generating a response
365
+ to the input message. This agent step can call async function calls.
366
+
367
+ Args:
368
+ input_message (BaseMessage): The input message to the agent.
369
+ Its `role` field that specifies the role at backend may be either
370
+ `user` or `assistant` but it will be set to `user` anyway since
371
+ for the self agent any incoming message is external.
372
+
373
+ Returns:
374
+ ChatAgentResponse: A struct containing the output messages,
375
+ a boolean indicating whether the chat session has terminated,
376
+ and information about the chat session.
377
+ """
378
+ self.update_memory(input_message, OpenAIBackendRole.USER)
379
+
380
+ output_messages: List[BaseMessage]
381
+ info: Dict[str, Any]
382
+ tool_calls: List[FunctionCallingRecord] = []
383
+ while True:
384
+ # Format messages and get the token number
385
+ openai_messages: list[OpenAIMessage] | None
386
+
387
+ try:
388
+ openai_messages, num_tokens = self.memory.get_context()
389
+ except RuntimeError as e:
390
+ return self.step_token_exceed(
391
+ e.args[1], tool_calls, "max_tokens_exceeded"
392
+ )
393
+ (
394
+ response,
395
+ output_messages,
396
+ finish_reasons,
397
+ usage_dict,
398
+ response_id,
399
+ ) = self._step_model_response(openai_messages, num_tokens)
400
+
401
+ if (
402
+ self.is_tools_added()
403
+ and isinstance(response, ChatCompletion)
404
+ and response.choices[0].message.tool_calls is not None
405
+ ):
406
+ # Tools added for function calling and not in stream mode
353
407
 
354
- # Loop over responses terminators, get list of termination
355
- # tuples with whether the terminator terminates the agent
356
- # and termination reason
357
- termination = [
358
- terminator.is_terminated(output_messages)
359
- for terminator in self.response_terminators
360
- ]
361
- # Terminate the agent if any of the terminator terminates
362
- self.terminated, termination_reason = next(
363
- (
364
- (terminated, termination_reason)
365
- for terminated, termination_reason in termination
366
- if terminated
367
- ),
368
- (False, None),
408
+ # Do function calling
409
+ (
410
+ func_assistant_msg,
411
+ func_result_msg,
412
+ func_record,
413
+ ) = await self.step_tool_call_async(response)
414
+
415
+ # Update the messages
416
+ self.update_memory(
417
+ func_assistant_msg, OpenAIBackendRole.ASSISTANT
369
418
  )
370
- # For now only retain the first termination reason
371
- if self.terminated and termination_reason is not None:
372
- finish_reasons = [termination_reason] * len(finish_reasons)
419
+ self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
373
420
 
374
- info = self.get_info(
375
- response_id,
376
- usage_dict,
421
+ # Record the function calling
422
+ tool_calls.append(func_record)
423
+
424
+ else:
425
+ # Function calling disabled or not a function calling
426
+ info = self._step_get_info(
427
+ output_messages,
377
428
  finish_reasons,
378
- num_tokens,
429
+ usage_dict,
430
+ response_id,
379
431
  tool_calls,
432
+ num_tokens,
380
433
  )
381
434
  break
382
435
 
383
436
  return ChatAgentResponse(output_messages, self.terminated, info)
384
437
 
438
+ def _step_model_response(
439
+ self,
440
+ openai_messages: list[OpenAIMessage],
441
+ num_tokens: int,
442
+ ) -> tuple[
443
+ ChatCompletion | Stream[ChatCompletionChunk],
444
+ list[BaseMessage],
445
+ list[str],
446
+ dict[str, int],
447
+ str,
448
+ ]:
449
+ r"""Internal function for agent step model response."""
450
+ # Obtain the model's response
451
+ response = self.model_backend.run(openai_messages)
452
+
453
+ if isinstance(response, ChatCompletion):
454
+ output_messages, finish_reasons, usage_dict, response_id = (
455
+ self.handle_batch_response(response)
456
+ )
457
+ else:
458
+ output_messages, finish_reasons, usage_dict, response_id = (
459
+ self.handle_stream_response(response, num_tokens)
460
+ )
461
+ return (
462
+ response,
463
+ output_messages,
464
+ finish_reasons,
465
+ usage_dict,
466
+ response_id,
467
+ )
468
+
469
+ def _step_get_info(
470
+ self,
471
+ output_messages: List[BaseMessage],
472
+ finish_reasons: List[str],
473
+ usage_dict: Dict[str, int],
474
+ response_id: str,
475
+ tool_calls: List[FunctionCallingRecord],
476
+ num_tokens: int,
477
+ ) -> Dict[str, Any]:
478
+ # Loop over responses terminators, get list of termination
479
+ # tuples with whether the terminator terminates the agent
480
+ # and termination reason
481
+ termination = [
482
+ terminator.is_terminated(output_messages)
483
+ for terminator in self.response_terminators
484
+ ]
485
+ # Terminate the agent if any of the terminator terminates
486
+ self.terminated, termination_reason = next(
487
+ (
488
+ (terminated, termination_reason)
489
+ for terminated, termination_reason in termination
490
+ if terminated
491
+ ),
492
+ (False, None),
493
+ )
494
+ # For now only retain the first termination reason
495
+ if self.terminated and termination_reason is not None:
496
+ finish_reasons = [termination_reason] * len(finish_reasons)
497
+
498
+ info = self.get_info(
499
+ response_id,
500
+ usage_dict,
501
+ finish_reasons,
502
+ num_tokens,
503
+ tool_calls,
504
+ )
505
+ return info
506
+
385
507
  def handle_batch_response(
386
508
  self, response: ChatCompletion
387
509
  ) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
@@ -516,7 +638,7 @@ class ChatAgent(BaseAgent):
516
638
  """
517
639
  choice = response.choices[0]
518
640
  if choice.message.tool_calls is None:
519
- raise RuntimeError("Tool calls is None")
641
+ raise RuntimeError("Tool call is None")
520
642
  func_name = choice.message.tool_calls[0].function.name
521
643
  func = self.func_dict[func_name]
522
644
 
@@ -553,6 +675,65 @@ class ChatAgent(BaseAgent):
553
675
  func_record = FunctionCallingRecord(func_name, args, result)
554
676
  return assist_msg, func_msg, func_record
555
677
 
678
+ async def step_tool_call_async(
679
+ self,
680
+ response: ChatCompletion,
681
+ ) -> Tuple[
682
+ FunctionCallingMessage, FunctionCallingMessage, FunctionCallingRecord
683
+ ]:
684
+ r"""Execute the async function with arguments following the model's
685
+ response.
686
+
687
+ Args:
688
+ response (Dict[str, Any]): The response obtained by calling the
689
+ model.
690
+
691
+ Returns:
692
+ tuple: A tuple consisting of two obj:`FunctionCallingMessage`,
693
+ one about the arguments and the other about the execution
694
+ result, and a struct for logging information about this
695
+ function call.
696
+ """
697
+ # Note that when function calling is enabled, `n` is set to 1.
698
+ choice = response.choices[0]
699
+ if choice.message.tool_calls is None:
700
+ raise RuntimeError("Tool call is None")
701
+ func_name = choice.message.tool_calls[0].function.name
702
+ func = self.func_dict[func_name]
703
+
704
+ args_str: str = choice.message.tool_calls[0].function.arguments
705
+ args = json.loads(args_str.replace("'", "\""))
706
+
707
+ # Pass the extracted arguments to the indicated function
708
+ try:
709
+ result = await func(**args)
710
+ except Exception:
711
+ raise ValueError(
712
+ f"Execution of function {func.__name__} failed with "
713
+ f"arguments being {args}."
714
+ )
715
+
716
+ assist_msg = FunctionCallingMessage(
717
+ role_name=self.role_name,
718
+ role_type=self.role_type,
719
+ meta_dict=None,
720
+ content="",
721
+ func_name=func_name,
722
+ args=args,
723
+ )
724
+ func_msg = FunctionCallingMessage(
725
+ role_name=self.role_name,
726
+ role_type=self.role_type,
727
+ meta_dict=None,
728
+ content="",
729
+ func_name=func_name,
730
+ result=result,
731
+ )
732
+
733
+ # Record information about this function call
734
+ func_record = FunctionCallingRecord(func_name, args, result)
735
+ return assist_msg, func_msg, func_record
736
+
556
737
  def get_usage_dict(
557
738
  self, output_messages: List[BaseMessage], prompt_tokens: int
558
739
  ) -> Dict[str, int]:
@@ -90,52 +90,106 @@ class DeductiveReasonerAgent(ChatAgent):
90
90
  """
91
91
  self.reset()
92
92
 
93
- deduce_prompt = """You are a deductive reasoner. You are tasked to complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the STARTING STATE A and the TARGET STATE B. You are given the CONTEXT CONTENT to help you complete the TASK.
94
- Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY fill in the BLANKs, and DO NOT alter or modify any other part of the template
93
+ deduce_prompt = """You are a deductive reasoner. You are tasked to
94
+ complete the TASK based on the THOUGHT OF DEDUCTIVE REASONING, the
95
+ STARTING STATE A and the TARGET STATE B. You are given the CONTEXT
96
+ CONTENT to help you complete the TASK.
97
+ Your answer MUST strictly adhere to the structure of ANSWER TEMPLATE, ONLY
98
+ fill in the BLANKs, and DO NOT alter or modify any other part of the template
95
99
 
96
100
  ===== MODELING OF DEDUCTIVE REASONING =====
97
- You are tasked with understanding a mathematical model based on the components ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
101
+ You are tasked with understanding a mathematical model based on the components
102
+ ${A, B, C, Q, L}$. In this model: ``L: A ⊕ C -> q * B``.
98
103
  - $A$ represents the known starting state.
99
104
  - $B$ represents the known target state.
100
105
  - $C$ represents the conditions required to transition from $A$ to $B$.
101
- - $Q$ represents the quality or effectiveness of the transition from $A$ to $B$.
106
+ - $Q$ represents the quality or effectiveness of the transition from $A$ to
107
+ $B$.
102
108
  - $L$ represents the path or process from $A$ to $B$.
103
109
 
104
110
  ===== THOUGHT OF DEDUCTIVE REASONING =====
105
111
  1. Define the Parameters of A and B:
106
- - Characterization: Before delving into transitions, thoroughly understand the nature and boundaries of both $A$ and $B$. This includes the type, properties, constraints, and possible interactions between the two.
107
- - Contrast and Compare: Highlight the similarities and differences between $A$ and $B$. This comparative analysis will give an insight into what needs changing and what remains constant.
112
+ - Characterization: Before delving into transitions, thoroughly understand
113
+ the nature and boundaries of both $A$ and $B$. This includes the type,
114
+ properties, constraints, and possible interactions between the two.
115
+ - Contrast and Compare: Highlight the similarities and differences between
116
+ $A$ and $B$. This comparative analysis will give an insight into what
117
+ needs changing and what remains constant.
108
118
  2. Historical & Empirical Analysis:
109
- - Previous Transitions according to the Knowledge Base of GPT: (if applicable) Extract conditions and patterns from the historical instances where a similar transition from a state comparable to $A$ moved towards $B$.
110
- - Scientific Principles: (if applicable) Consider the underlying scientific principles governing or related to the states and their transition. For example, if $A$ and $B$ are physical states, laws of physics might apply.
119
+ - Previous Transitions according to the Knowledge Base of GPT: (if
120
+ applicable) Extract conditions and patterns from the historical instances
121
+ where a similar transition from a state comparable to $A$ moved towards
122
+ $B$.
123
+ - Scientific Principles: (if applicable) Consider the underlying
124
+ scientific principles governing or related to the states and their
125
+ transition. For example, if $A$ and $B$ are physical states, laws of
126
+ physics might apply.
111
127
  3. Logical Deduction of Conditions ($C$):
112
- - Direct Path Analysis: What are the immediate and direct conditions required to move from $A$ to $B$?
113
- - Intermediate States: Are there states between $A$ and $B$ that must be transversed or can be used to make the transition smoother or more efficient? If yes, what is the content?
114
- - Constraints & Limitations: Identify potential barriers or restrictions in moving from $A$ to $B$. These can be external (e.g., environmental factors) or internal (properties of $A$ or $B$).
115
- - Resource and Information Analysis: What resources and information are required for the transition? This could be time, entity, factor, code language, software platform, unknowns, etc.
116
- - External Influences: Consider socio-economic, political, or environmental factors (if applicable) that could influence the transition conditions.
117
- - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s, no matter how unconventional they might seem. Utilize analogies, metaphors, or brainstorming techniques to envision possible conditions or paths from $A$ to $B$.
118
- - The conditions $C$ should be multiple but in one sentence. And each condition should be concerned with one aspect/entity.
128
+ - Direct Path Analysis: What are the immediate and direct conditions
129
+ required to move from $A$ to $B$?
130
+ - Intermediate States: Are there states between $A$ and $B$ that must be
131
+ transversed or can be used to make the transition smoother or more
132
+ efficient? If yes, what is the content?
133
+ - Constraints & Limitations: Identify potential barriers or restrictions
134
+ in moving from $A$ to $B$. These can be external (e.g., environmental
135
+ factors) or internal (properties of $A$ or $B$).
136
+ - Resource and Information Analysis: What resources and information are
137
+ required for the transition? This could be time, entity, factor, code
138
+ language, software platform, unknowns, etc.
139
+ - External Influences: Consider socio-economic, political, or
140
+ environmental factors (if applicable) that could influence the transition
141
+ conditions.
142
+ - Creative/Heuristic Reasoning: Open your mind to multiple possible $C$'s,
143
+ no matter how unconventional they might seem. Utilize analogies,
144
+ metaphors, or brainstorming techniques to envision possible conditions or
145
+ paths from $A$ to $B$.
146
+ - The conditions $C$ should be multiple but in one sentence. And each
147
+ condition should be concerned with one aspect/entity.
119
148
  4. Entity/Label Recognition of Conditions ($C$):
120
- - Identify and categorize entities of Conditions ($C$) such as the names, locations, dates, specific technical terms or contextual parameters that might be associated with events, innovations post-2022.
121
- - The output of the entities/labels will be used as tags or labels for semantic similarity searches. The entities/labels may be the words, or phrases, each of them should contain valuable, high information entropy information, and should be independent.
122
- - Ensure that the identified entities are formatted in a manner suitable for database indexing and retrieval. Organize the entities into categories, and combine the category with its instance into a continuous phrase, without using colons or other separators.
123
- - Format these entities for database indexing: output the category rather than its instance/content into a continuous phrase. For example, instead of "Jan. 02", identify it as "Event time".
149
+ - Identify and categorize entities of Conditions ($C$) such as the names,
150
+ locations, dates, specific technical terms or contextual parameters that
151
+ might be associated with events, innovations post-2022.
152
+ - The output of the entities/labels will be used as tags or labels for
153
+ semantic similarity searches. The entities/labels may be the words, or
154
+ phrases, each of them should contain valuable, high information entropy
155
+ information, and should be independent.
156
+ - Ensure that the identified entities are formatted in a manner suitable
157
+ for database indexing and retrieval. Organize the entities into
158
+ categories, and combine the category with its instance into a continuous
159
+ phrase, without using colons or other separators.
160
+ - Format these entities for database indexing: output the category rather
161
+ than its instance/content into a continuous phrase. For example, instead
162
+ of "Jan. 02", identify it as "Event time".
124
163
  5. Quality Assessment ($Q$):
125
- - Efficiency: How efficient is the transition from $A$ to $B$, which measures the resources used versus the desired outcome?
126
- - Effectiveness: Did the transition achieve the desired outcome or was the target state achieved as intended?
127
- - Safety & Risks: Assess any risks associated with the transition and the measures to mitigate them.
128
- - Feedback Mechanisms: Incorporate feedback loops to continuously monitor and adjust the quality of transition, making it more adaptive.
164
+ - Efficiency: How efficient is the transition from $A$ to $B$, which
165
+ measures the resources used versus the desired outcome?
166
+ - Effectiveness: Did the transition achieve the desired outcome or was the
167
+ target state achieved as intended?
168
+ - Safety & Risks: Assess any risks associated with the transition and the
169
+ measures to mitigate them.
170
+ - Feedback Mechanisms: Incorporate feedback loops to continuously monitor
171
+ and adjust the quality of transition, making it more adaptive.
129
172
  6. Iterative Evaluation:
130
- - Test & Refine: Based on the initially deduced conditions and assessed quality, iterate the process to refine and optimize the transition. This might involve tweaking conditions, employing different paths, or changing resources.
131
- - Feedback Integration: Use feedback to make improvements and increase the quality of the transition.
132
- 7. Real-world scenarios often present challenges that may not be captured by models and frameworks. While using the model, maintain an adaptive mindset:
133
- - Scenario Exploration: Continuously imagine various possible scenarios, both positive and negative, to prepare for unexpected events.
134
- - Flexibility: Be prepared to modify conditions ($C$) or alter the path/process ($L$) if unforeseen challenges arise.
135
- - Feedback Integration: Rapidly integrate feedback from actual implementations to adjust the model's application, ensuring relevancy and effectiveness.
173
+ - Test & Refine: Based on the initially deduced conditions and assessed
174
+ quality, iterate the process to refine and optimize the transition. This
175
+ might involve tweaking conditions, employing different paths, or changing
176
+ resources.
177
+ - Feedback Integration: Use feedback to make improvements and increase the
178
+ quality of the transition.
179
+ 7. Real-world scenarios often present challenges that may not be captured by
180
+ models and frameworks. While using the model, maintain an adaptive mindset:
181
+ - Scenario Exploration: Continuously imagine various possible scenarios,
182
+ both positive and negative, to prepare for unexpected events.
183
+ - Flexibility: Be prepared to modify conditions ($C$) or alter the path/
184
+ process ($L$) if unforeseen challenges arise.
185
+ - Feedback Integration: Rapidly integrate feedback from actual
186
+ implementations to adjust the model's application, ensuring relevancy and
187
+ effectiveness.
136
188
 
137
189
  ===== TASK =====
138
- Given the starting state $A$ and the target state $B$, assuming that a path $L$ always exists between $A$ and $B$, how can one deduce or identify the necessary conditions $C$ and the quality $Q$ of the transition?
190
+ Given the starting state $A$ and the target state $B$, assuming that a path
191
+ $L$ always exists between $A$ and $B$, how can one deduce or identify the
192
+ necessary conditions $C$ and the quality $Q$ of the transition?
139
193
 
140
194
  ===== STARTING STATE $A$ =====
141
195
  {starting_state}
@@ -150,7 +204,8 @@ Given the starting state $A$ and the target state $B$, assuming that a path $L$
150
204
  - Logical Deduction of Conditions ($C$) (multiple conditions can be deduced):
151
205
  condition <NUM>:
152
206
  <BLANK>.
153
- - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include square brackets)
207
+ - Entity/Label Recognition of Conditions:\n[<BLANK>, <BLANK>, ...] (include
208
+ square brackets)
154
209
  - Quality Assessment ($Q$) (do not use symbols):
155
210
  <BLANK>.
156
211
  - Iterative Evaluation:\n<BLANK>/None"""