camel-ai 0.1.5.2__tar.gz → 0.1.5.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (152) hide show
  1. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/PKG-INFO +60 -47
  2. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/README.md +55 -42
  3. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/chat_agent.py +21 -17
  4. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/critic_agent.py +6 -9
  5. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/deductive_reasoner_agent.py +7 -9
  6. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/embodied_agent.py +6 -9
  7. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/knowledge_graph_agent.py +12 -10
  8. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/role_assignment_agent.py +10 -11
  9. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/search_agent.py +5 -5
  10. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/task_agent.py +26 -38
  11. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/openai_config.py +14 -0
  12. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/base.py +10 -9
  13. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/openai_embedding.py +25 -12
  14. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/sentence_transformers_embeddings.py +28 -14
  15. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_function.py +11 -4
  16. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/slack_functions.py +14 -2
  17. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/__init__.py +4 -0
  18. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/anthropic_model.py +4 -2
  19. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/base_model.py +4 -1
  20. camel_ai-0.1.5.4/camel/models/model_factory.py +93 -0
  21. camel_ai-0.1.5.4/camel/models/nemotron_model.py +71 -0
  22. camel_ai-0.1.5.4/camel/models/ollama_model.py +121 -0
  23. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/open_source_model.py +7 -2
  24. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/openai_model.py +8 -3
  25. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/stub_model.py +3 -1
  26. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/__init__.py +4 -0
  27. camel_ai-0.1.5.4/camel/prompts/generate_text_embedding_data.py +79 -0
  28. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/task_prompt_template.py +4 -0
  29. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/auto_retriever.py +2 -2
  30. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/societies/role_playing.py +16 -19
  31. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/graph_element.py +9 -1
  32. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/types/__init__.py +2 -0
  33. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/types/enums.py +84 -22
  34. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/commons.py +4 -0
  35. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/token_counting.py +5 -3
  36. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/pyproject.toml +6 -6
  37. camel_ai-0.1.5.2/camel/bots/__init__.py +0 -20
  38. camel_ai-0.1.5.2/camel/bots/discord_bot.py +0 -103
  39. camel_ai-0.1.5.2/camel/bots/telegram_bot.py +0 -84
  40. camel_ai-0.1.5.2/camel/models/model_factory.py +0 -72
  41. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/__init__.py +0 -0
  42. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/__init__.py +0 -0
  43. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/base.py +0 -0
  44. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/tool_agents/__init__.py +0 -0
  45. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/tool_agents/base.py +0 -0
  46. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
  47. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/__init__.py +0 -0
  48. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/anthropic_config.py +0 -0
  49. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/base_config.py +0 -0
  50. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/litellm_config.py +0 -0
  51. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/__init__.py +0 -0
  52. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/vlm_embedding.py +0 -0
  53. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/__init__.py +0 -0
  54. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/google_maps_function.py +0 -0
  55. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/math_functions.py +0 -0
  56. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/biztoc/__init__.py +0 -0
  57. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/biztoc/ai-plugin.json +0 -0
  58. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/biztoc/openapi.yaml +0 -0
  59. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/coursera/__init__.py +0 -0
  60. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/coursera/openapi.yaml +0 -0
  61. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/create_qr_code/__init__.py +0 -0
  62. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/create_qr_code/openapi.yaml +0 -0
  63. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/klarna/__init__.py +0 -0
  64. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/klarna/openapi.yaml +0 -0
  65. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/nasa_apod/__init__.py +0 -0
  66. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/nasa_apod/openapi.yaml +0 -0
  67. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/__init__.py +0 -0
  68. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/ai-plugin.json +0 -0
  69. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/openapi.yaml +0 -0
  70. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/paths/__init__.py +0 -0
  71. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/paths/get_classes.py +0 -0
  72. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/paths/search_teachers.py +0 -0
  73. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/security_config.py +0 -0
  74. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/speak/__init__.py +0 -0
  75. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/speak/openapi.yaml +0 -0
  76. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/__init__.py +0 -0
  77. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/ai-plugin.json +0 -0
  78. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/openapi.yaml +0 -0
  79. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/paths/__init__.py +0 -0
  80. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/paths/scraper.py +0 -0
  81. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/openai_function.py +0 -0
  82. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/retrieval_functions.py +0 -0
  83. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/search_functions.py +0 -0
  84. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/twitter_function.py +0 -0
  85. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/weather_functions.py +0 -0
  86. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/generators.py +0 -0
  87. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/human.py +0 -0
  88. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/__init__.py +0 -0
  89. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/base.py +0 -0
  90. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/internal_python_interpreter.py +0 -0
  91. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/interpreter_error.py +0 -0
  92. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/subprocess_interpreter.py +0 -0
  93. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/loaders/__init__.py +0 -0
  94. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/loaders/base_io.py +0 -0
  95. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/loaders/unstructured_io.py +0 -0
  96. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/__init__.py +0 -0
  97. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/agent_memories.py +0 -0
  98. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/base.py +0 -0
  99. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/blocks/__init__.py +0 -0
  100. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/blocks/chat_history_block.py +0 -0
  101. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/blocks/vectordb_block.py +0 -0
  102. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/context_creators/__init__.py +0 -0
  103. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/context_creators/score_based.py +0 -0
  104. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/records.py +0 -0
  105. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/messages/__init__.py +0 -0
  106. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/messages/base.py +0 -0
  107. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/messages/func_message.py +0 -0
  108. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/litellm_model.py +0 -0
  109. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/openai_audio_models.py +0 -0
  110. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/zhipuai_model.py +0 -0
  111. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/ai_society.py +0 -0
  112. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/base.py +0 -0
  113. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/code.py +0 -0
  114. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/descripte_video_prompt.py +0 -0
  115. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/evaluation.py +0 -0
  116. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/misalignment.py +0 -0
  117. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/object_recognition.py +0 -0
  118. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/prompt_templates.py +0 -0
  119. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/role_description_prompt_template.py +0 -0
  120. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/solution_extraction.py +0 -0
  121. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/translation.py +0 -0
  122. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/responses/__init__.py +0 -0
  123. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/responses/agent_responses.py +0 -0
  124. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/__init__.py +0 -0
  125. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/base.py +0 -0
  126. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/bm25_retriever.py +0 -0
  127. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/cohere_rerank_retriever.py +0 -0
  128. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/vector_retriever.py +0 -0
  129. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/societies/__init__.py +0 -0
  130. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/societies/babyagi_playing.py +0 -0
  131. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/__init__.py +0 -0
  132. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/__init__.py +0 -0
  133. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/base.py +0 -0
  134. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/neo4j_graph.py +0 -0
  135. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/__init__.py +0 -0
  136. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/base.py +0 -0
  137. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/in_memory.py +0 -0
  138. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/json.py +0 -0
  139. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/__init__.py +0 -0
  140. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/base.py +0 -0
  141. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/milvus.py +0 -0
  142. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/qdrant.py +0 -0
  143. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/__init__.py +0 -0
  144. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/base.py +0 -0
  145. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/response_terminator.py +0 -0
  146. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/token_limit_terminator.py +0 -0
  147. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/toolkits/__init__.py +0 -0
  148. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/toolkits/base.py +0 -0
  149. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/toolkits/github_toolkit.py +0 -0
  150. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/types/openai_types.py +0 -0
  151. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/__init__.py +0 -0
  152. {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/constants.py +0 -0
@@ -1,12 +1,12 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: camel-ai
3
- Version: 0.1.5.2
3
+ Version: 0.1.5.4
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Home-page: https://www.camel-ai.org/
6
6
  License: Apache-2.0
7
7
  Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
8
8
  Author: CAMEL-AI.org
9
- Requires-Python: >=3.8.1,<3.12
9
+ Requires-Python: >=3.9.0,<3.12
10
10
  Classifier: License :: OSI Approved :: Apache Software License
11
11
  Classifier: Programming Language :: Python :: 3
12
12
  Classifier: Programming Language :: Python :: 3.9
@@ -23,7 +23,7 @@ Provides-Extra: tools
23
23
  Provides-Extra: vector-databases
24
24
  Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
25
25
  Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
26
- Requires-Dist: anthropic (>=0.28.0,<0.29.0)
26
+ Requires-Dist: anthropic (>=0.29.0,<0.30.0)
27
27
  Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
28
28
  Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
29
29
  Requires-Dist: colorama (>=0,<1)
@@ -35,7 +35,7 @@ Requires-Dist: docstring-parser (>=0.15,<0.16)
35
35
  Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
36
36
  Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
37
37
  Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
38
- Requires-Dist: imageio (>=2.34.1,<3.0.0) ; extra == "tools" or extra == "all"
38
+ Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
39
39
  Requires-Dist: jsonschema (>=4,<5)
40
40
  Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
41
41
  Requires-Dist: mock (>=5,<6) ; extra == "test"
@@ -60,7 +60,7 @@ Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
60
60
  Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
61
61
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
62
62
  Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
63
- Requires-Dist: sentence-transformers (>=2.2.2,<3.0.0) ; extra == "encoders" or extra == "all"
63
+ Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
64
64
  Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
65
65
  Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
66
66
  Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
@@ -180,13 +180,13 @@ exit
180
180
  Install `CAMEL` from source with conda and pip:
181
181
  ```sh
182
182
  # Create a conda virtual environment
183
- conda create --name camel python=3.10
183
+ conda create --name camel python=3.9
184
184
 
185
185
  # Activate CAMEL conda environment
186
186
  conda activate camel
187
187
 
188
188
  # Clone github repo
189
- git clone -b v0.1.5.2 https://github.com/camel-ai/camel.git
189
+ git clone -b v0.1.5.4 https://github.com/camel-ai/camel.git
190
190
 
191
191
  # Change directory into project directory
192
192
  cd camel
@@ -246,54 +246,67 @@ python examples/ai_society/role_playing.py
246
246
  Please note that the environment variable is session-specific. If you open a new terminal window or tab, you will need to set the API key again in that new session.
247
247
 
248
248
 
249
- ## Use Open-Source Models as Backends
249
+ ## Use Open-Source Models as Backends (ex. using Ollama to set Llama 3 locally)
250
250
 
251
- The basic workflow of using an open-sourced model as the backend is based on an external server running LLM inference service, e.g. during the development we chose [FastChat](https://github.com/lm-sys/FastChat) to run the service.
252
-
253
- We do not fix the choice of server to decouple the implementation of any specific LLM inference server with CAMEL (indicating the server needs to be deployed by the user himself). But the server to be deployed must satisfy that **it supports OpenAI-compatible APIs, especially the method `openai.ChatCompletion.create`**.
254
-
255
- Here are some instructions for enabling open-source backends, where we use the [FastChat](https://github.com/lm-sys/FastChat) and a LLaMA2-based model ([`meta-llama/Llama-2-7b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)) in the example. Please install FastChat in advance following their installation guidance.
256
-
257
- 1. Before running CAMEL, we should firstly launch FastChat server following the guidance on https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md. The instructions summarized below should be kept running **in separate processes**:
258
-
259
- ```sh
260
- # Launch the controller
261
- python -m fastchat.serve.controller
262
-
263
- # Launch the model worker(s)
264
- python3 -m fastchat.serve.model_worker --model-path meta-llama/Llama-2-7b-chat-hf
265
-
266
- # Launch the RESTful API server
267
- python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
268
- ```
251
+ - Download [Ollama](https://ollama.com/download).
252
+ - After setting up Ollama, pull the Llama3 model by typing the following command into the terminal:
253
+ ```bash
254
+ ollama pull llama3
255
+ ```
256
+ - Create a ModelFile similar the one below in your project directory.
257
+ ```bash
258
+ FROM llama3
269
259
 
270
- 2. After observing the controller successfully receiving the heart beat signal from the worker, the server should be ready for use at http://localhost:8000/v1.
260
+ # Set parameters
261
+ PARAMETER temperature 0.8
262
+ PARAMETER stop Result
271
263
 
272
- 3. Then we can try on running `role_playing_with_open_source_model.py`, where each agent in this example is initialized with specifying the `model_path` and `server_url`, similar to the example code below:
264
+ # Sets a custom system message to specify the behavior of the chat assistant
273
265
 
274
- ```python
275
- system_message = # ...
266
+ # Leaving it blank for now.
276
267
 
277
- agent_kwargs = dict(
278
- model=model_type,
279
- model_config=OpenSourceConfig(
280
- model_path="meta-llama/Llama-2-7b-chat-hf",
281
- server_url="http://localhost:8000/v1",
282
- ),
283
- )
268
+ SYSTEM """ """
269
+ ```
270
+ - Create a script to get the base model (llama3) and create a custom model using the ModelFile above. Save this as a .sh file:
271
+ ```bash
272
+ #!/bin/zsh
284
273
 
285
- agent = ChatAgent(
286
- system_message,
287
- **agent_kwargs,
288
- )
289
- ```
274
+ # variables
275
+ model_name="llama3"
276
+ custom_model_name="camel-llama3"
290
277
 
291
- ### Supported Models
278
+ #get the base model
279
+ ollama pull $model_name
292
280
 
293
- - LLaMA2-based models
294
- - example: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
295
- - Vicuna-based models
296
- - example: [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5)
281
+ #create the model file
282
+ ollama create $custom_model_name -f ./Llama3ModelFile
283
+ ```
284
+ - Navigate to the directory where the script and ModelFile are located and run the script. Enjoy your Llama3 model, enhanced by CAMEL's excellent agents.
285
+ ```python
286
+ from camel.agents import ChatAgent
287
+ from camel.messages import BaseMessage
288
+ from camel.models import ModelFactory
289
+ from camel.types import ModelPlatformType
290
+
291
+ ollama_model = ModelFactory.create(
292
+ model_platform=ModelPlatformType.OLLAMA,
293
+ model_type="llama3",
294
+ url="http://localhost:11434/v1",
295
+ model_config_dict={"temperature": 0.4},
296
+ )
297
+
298
+ assistant_sys_msg = BaseMessage.make_assistant_message(
299
+ role_name="Assistant",
300
+ content="You are a helpful assistant.",
301
+ )
302
+ agent = ChatAgent(assistant_sys_msg, model=ollama_model, token_limit=4096)
303
+
304
+ user_msg = BaseMessage.make_user_message(
305
+ role_name="User", content="Say hi to CAMEL"
306
+ )
307
+ assistant_response = agent.step(user_msg)
308
+ print(assistant_response.msg.content)
309
+ ```
297
310
 
298
311
  ## Data (Hosted on Hugging Face)
299
312
  | Dataset | Chat format | Instruction format | Chat format (translated) |
@@ -104,13 +104,13 @@ exit
104
104
  Install `CAMEL` from source with conda and pip:
105
105
  ```sh
106
106
  # Create a conda virtual environment
107
- conda create --name camel python=3.10
107
+ conda create --name camel python=3.9
108
108
 
109
109
  # Activate CAMEL conda environment
110
110
  conda activate camel
111
111
 
112
112
  # Clone github repo
113
- git clone -b v0.1.5.2 https://github.com/camel-ai/camel.git
113
+ git clone -b v0.1.5.4 https://github.com/camel-ai/camel.git
114
114
 
115
115
  # Change directory into project directory
116
116
  cd camel
@@ -170,54 +170,67 @@ python examples/ai_society/role_playing.py
170
170
  Please note that the environment variable is session-specific. If you open a new terminal window or tab, you will need to set the API key again in that new session.
171
171
 
172
172
 
173
- ## Use Open-Source Models as Backends
173
+ ## Use Open-Source Models as Backends (ex. using Ollama to set Llama 3 locally)
174
174
 
175
- The basic workflow of using an open-sourced model as the backend is based on an external server running LLM inference service, e.g. during the development we chose [FastChat](https://github.com/lm-sys/FastChat) to run the service.
176
-
177
- We do not fix the choice of server to decouple the implementation of any specific LLM inference server with CAMEL (indicating the server needs to be deployed by the user himself). But the server to be deployed must satisfy that **it supports OpenAI-compatible APIs, especially the method `openai.ChatCompletion.create`**.
178
-
179
- Here are some instructions for enabling open-source backends, where we use the [FastChat](https://github.com/lm-sys/FastChat) and a LLaMA2-based model ([`meta-llama/Llama-2-7b-chat-hf`](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)) in the example. Please install FastChat in advance following their installation guidance.
180
-
181
- 1. Before running CAMEL, we should firstly launch FastChat server following the guidance on https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md. The instructions summarized below should be kept running **in separate processes**:
182
-
183
- ```sh
184
- # Launch the controller
185
- python -m fastchat.serve.controller
186
-
187
- # Launch the model worker(s)
188
- python3 -m fastchat.serve.model_worker --model-path meta-llama/Llama-2-7b-chat-hf
189
-
190
- # Launch the RESTful API server
191
- python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
192
- ```
175
+ - Download [Ollama](https://ollama.com/download).
176
+ - After setting up Ollama, pull the Llama3 model by typing the following command into the terminal:
177
+ ```bash
178
+ ollama pull llama3
179
+ ```
180
+ - Create a ModelFile similar the one below in your project directory.
181
+ ```bash
182
+ FROM llama3
193
183
 
194
- 2. After observing the controller successfully receiving the heart beat signal from the worker, the server should be ready for use at http://localhost:8000/v1.
184
+ # Set parameters
185
+ PARAMETER temperature 0.8
186
+ PARAMETER stop Result
195
187
 
196
- 3. Then we can try on running `role_playing_with_open_source_model.py`, where each agent in this example is initialized with specifying the `model_path` and `server_url`, similar to the example code below:
188
+ # Sets a custom system message to specify the behavior of the chat assistant
197
189
 
198
- ```python
199
- system_message = # ...
190
+ # Leaving it blank for now.
200
191
 
201
- agent_kwargs = dict(
202
- model=model_type,
203
- model_config=OpenSourceConfig(
204
- model_path="meta-llama/Llama-2-7b-chat-hf",
205
- server_url="http://localhost:8000/v1",
206
- ),
207
- )
192
+ SYSTEM """ """
193
+ ```
194
+ - Create a script to get the base model (llama3) and create a custom model using the ModelFile above. Save this as a .sh file:
195
+ ```bash
196
+ #!/bin/zsh
208
197
 
209
- agent = ChatAgent(
210
- system_message,
211
- **agent_kwargs,
212
- )
213
- ```
198
+ # variables
199
+ model_name="llama3"
200
+ custom_model_name="camel-llama3"
214
201
 
215
- ### Supported Models
202
+ #get the base model
203
+ ollama pull $model_name
216
204
 
217
- - LLaMA2-based models
218
- - example: [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)
219
- - Vicuna-based models
220
- - example: [lmsys/vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5)
205
+ #create the model file
206
+ ollama create $custom_model_name -f ./Llama3ModelFile
207
+ ```
208
+ - Navigate to the directory where the script and ModelFile are located and run the script. Enjoy your Llama3 model, enhanced by CAMEL's excellent agents.
209
+ ```python
210
+ from camel.agents import ChatAgent
211
+ from camel.messages import BaseMessage
212
+ from camel.models import ModelFactory
213
+ from camel.types import ModelPlatformType
214
+
215
+ ollama_model = ModelFactory.create(
216
+ model_platform=ModelPlatformType.OLLAMA,
217
+ model_type="llama3",
218
+ url="http://localhost:11434/v1",
219
+ model_config_dict={"temperature": 0.4},
220
+ )
221
+
222
+ assistant_sys_msg = BaseMessage.make_assistant_message(
223
+ role_name="Assistant",
224
+ content="You are a helpful assistant.",
225
+ )
226
+ agent = ChatAgent(assistant_sys_msg, model=ollama_model, token_limit=4096)
227
+
228
+ user_msg = BaseMessage.make_user_message(
229
+ role_name="User", content="Say hi to CAMEL"
230
+ )
231
+ assistant_response = agent.step(user_msg)
232
+ print(assistant_response.msg.content)
233
+ ```
221
234
 
222
235
  ## Data (Hosted on Hugging Face)
223
236
  | Dataset | Chat format | Instruction format | Chat format (translated) |
@@ -32,6 +32,7 @@ from camel.responses import ChatAgentResponse
32
32
  from camel.types import (
33
33
  ChatCompletion,
34
34
  ChatCompletionChunk,
35
+ ModelPlatformType,
35
36
  ModelType,
36
37
  OpenAIBackendRole,
37
38
  RoleType,
@@ -41,7 +42,6 @@ from camel.utils import get_model_encoding
41
42
  if TYPE_CHECKING:
42
43
  from openai import Stream
43
44
 
44
- from camel.configs import BaseConfig
45
45
  from camel.functions import OpenAIFunction
46
46
  from camel.terminators import ResponseTerminator
47
47
 
@@ -80,10 +80,9 @@ class ChatAgent(BaseAgent):
80
80
 
81
81
  Args:
82
82
  system_message (BaseMessage): The system message for the chat agent.
83
- model_type (ModelType, optional): The LLM model to use for generating
84
- responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
85
- model_config (BaseConfig, optional): Configuration options for the
86
- LLM model. (default: :obj:`None`)
83
+ model (BaseModelBackend, optional): The model backend to use for
84
+ generating responses. (default: :obj:`OpenAIModel` with
85
+ `GPT_3_5_TURBO`)
87
86
  api_key (str, optional): The API key for authenticating with the
88
87
  LLM service. Only OpenAI and Anthropic model supported (default:
89
88
  :obj:`None`)
@@ -109,8 +108,7 @@ class ChatAgent(BaseAgent):
109
108
  def __init__(
110
109
  self,
111
110
  system_message: BaseMessage,
112
- model_type: Optional[ModelType] = None,
113
- model_config: Optional[BaseConfig] = None,
111
+ model: Optional[BaseModelBackend] = None,
114
112
  api_key: Optional[str] = None,
115
113
  memory: Optional[AgentMemory] = None,
116
114
  message_window_size: Optional[int] = None,
@@ -123,24 +121,30 @@ class ChatAgent(BaseAgent):
123
121
  self.system_message = system_message
124
122
  self.role_name: str = system_message.role_name
125
123
  self.role_type: RoleType = system_message.role_type
124
+ self._api_key = api_key
125
+ self.model_backend: BaseModelBackend = (
126
+ model
127
+ if model is not None
128
+ else ModelFactory.create(
129
+ model_platform=ModelPlatformType.OPENAI,
130
+ model_type=ModelType.GPT_3_5_TURBO,
131
+ model_config_dict=ChatGPTConfig().__dict__,
132
+ api_key=self._api_key,
133
+ )
134
+ )
126
135
  self.output_language: Optional[str] = output_language
127
136
  if self.output_language is not None:
128
137
  self.set_output_language(self.output_language)
129
138
 
130
- self.model_type: ModelType = (
131
- model_type if model_type is not None else ModelType.GPT_3_5_TURBO
132
- )
139
+ self.model_type: ModelType = self.model_backend.model_type
133
140
 
134
141
  self.func_dict: Dict[str, Callable] = {}
135
142
  if tools is not None:
136
143
  for func in tools:
137
144
  self.func_dict[func.get_function_name()] = func.func
138
145
 
139
- self.model_config = model_config or ChatGPTConfig()
140
- self._api_key = api_key
141
- self.model_backend: BaseModelBackend = ModelFactory.create(
142
- self.model_type, self.model_config.__dict__, self._api_key
143
- )
146
+ self.model_config_dict = self.model_backend.model_config_dict
147
+
144
148
  self.model_token_limit = token_limit or self.model_backend.token_limit
145
149
  context_creator = ScoreBasedContextCreator(
146
150
  self.model_backend.token_counter,
@@ -643,7 +647,7 @@ class ChatAgent(BaseAgent):
643
647
  func = self.func_dict[func_name]
644
648
 
645
649
  args_str: str = choice.message.tool_calls[0].function.arguments
646
- args = json.loads(args_str.replace("'", "\""))
650
+ args = json.loads(args_str)
647
651
 
648
652
  # Pass the extracted arguments to the indicated function
649
653
  try:
@@ -702,7 +706,7 @@ class ChatAgent(BaseAgent):
702
706
  func = self.func_dict[func_name]
703
707
 
704
708
  args_str: str = choice.message.tool_calls[0].function.arguments
705
- args = json.loads(args_str.replace("'", "\""))
709
+ args = json.loads(args_str)
706
710
 
707
711
  # Pass the extracted arguments to the indicated function
708
712
  try:
@@ -20,8 +20,8 @@ from colorama import Fore
20
20
  from camel.agents.chat_agent import ChatAgent
21
21
  from camel.memories import AgentMemory
22
22
  from camel.messages import BaseMessage
23
+ from camel.models import BaseModelBackend
23
24
  from camel.responses import ChatAgentResponse
24
- from camel.types import ModelType
25
25
  from camel.utils import get_first_int, print_text_animated
26
26
 
27
27
 
@@ -31,10 +31,9 @@ class CriticAgent(ChatAgent):
31
31
  Args:
32
32
  system_message (BaseMessage): The system message for the critic
33
33
  agent.
34
- model_type (ModelType, optional): The LLM model to use for generating
35
- responses. (default :obj:`ModelType.GPT_3_5_TURBO`)
36
- model_config (Any, optional): Configuration options for the LLM model.
37
- (default: :obj:`None`)
34
+ model (BaseModelBackend, optional): The model backend to use for
35
+ generating responses. (default: :obj:`OpenAIModel` with
36
+ `GPT_3_5_TURBO`)
38
37
  message_window_size (int, optional): The maximum number of previous
39
38
  messages to include in the context window. If `None`, no windowing
40
39
  is performed. (default: :obj:`6`)
@@ -48,8 +47,7 @@ class CriticAgent(ChatAgent):
48
47
  def __init__(
49
48
  self,
50
49
  system_message: BaseMessage,
51
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
52
- model_config: Optional[Any] = None,
50
+ model: Optional[BaseModelBackend] = None,
53
51
  memory: Optional[AgentMemory] = None,
54
52
  message_window_size: int = 6,
55
53
  retry_attempts: int = 2,
@@ -58,8 +56,7 @@ class CriticAgent(ChatAgent):
58
56
  ) -> None:
59
57
  super().__init__(
60
58
  system_message,
61
- model_type=model_type,
62
- model_config=model_config,
59
+ model=model,
63
60
  memory=memory,
64
61
  message_window_size=message_window_size,
65
62
  )
@@ -15,10 +15,10 @@ import re
15
15
  from typing import Dict, List, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
- from camel.configs import BaseConfig
19
18
  from camel.messages import BaseMessage
19
+ from camel.models import BaseModelBackend
20
20
  from camel.prompts import TextPrompt
21
- from camel.types import ModelType, RoleType
21
+ from camel.types import RoleType
22
22
 
23
23
 
24
24
  class DeductiveReasonerAgent(ChatAgent):
@@ -33,16 +33,14 @@ class DeductiveReasonerAgent(ChatAgent):
33
33
  - L represents the path or process from A to B.
34
34
 
35
35
  Args:
36
- model_type (ModelType, optional): The type of model to use for the
37
- agent. (default: :obj: `None`)
38
- model_config (BaseConfig, optional): The configuration for the model.
39
- (default: :obj:`None`)
36
+ model (BaseModelBackend, optional): The model backend to use for
37
+ generating responses. (default: :obj:`OpenAIModel` with
38
+ `GPT_3_5_TURBO`)
40
39
  """
41
40
 
42
41
  def __init__(
43
42
  self,
44
- model_type: Optional[ModelType] = None,
45
- model_config: Optional[BaseConfig] = None,
43
+ model: Optional[BaseModelBackend] = None,
46
44
  ) -> None:
47
45
  system_message = BaseMessage(
48
46
  role_name="Insight Agent",
@@ -50,7 +48,7 @@ class DeductiveReasonerAgent(ChatAgent):
50
48
  meta_dict=None,
51
49
  content="You assign roles based on tasks.",
52
50
  )
53
- super().__init__(system_message, model_type, model_config)
51
+ super().__init__(system_message, model=model)
54
52
 
55
53
  def deduce_conditions_and_quality(
56
54
  self,
@@ -23,8 +23,8 @@ from camel.interpreters import (
23
23
  SubprocessInterpreter,
24
24
  )
25
25
  from camel.messages import BaseMessage
26
+ from camel.models import BaseModelBackend
26
27
  from camel.responses import ChatAgentResponse
27
- from camel.types import ModelType
28
28
  from camel.utils import print_text_animated
29
29
 
30
30
 
@@ -33,10 +33,9 @@ class EmbodiedAgent(ChatAgent):
33
33
 
34
34
  Args:
35
35
  system_message (BaseMessage): The system message for the chat agent.
36
- model_type (ModelType, optional): The LLM model to use for generating
37
- responses. (default :obj:`ModelType.GPT_4`)
38
- model_config (Any, optional): Configuration options for the LLM model.
39
- (default: :obj:`None`)
36
+ model (BaseModelBackend, optional): The model backend to use for
37
+ generating responses. (default: :obj:`OpenAIModel` with
38
+ `GPT_3_5_TURBO`)
40
39
  message_window_size (int, optional): The maximum number of previous
41
40
  messages to include in the context window. If `None`, no windowing
42
41
  is performed. (default: :obj:`None`)
@@ -55,8 +54,7 @@ class EmbodiedAgent(ChatAgent):
55
54
  def __init__(
56
55
  self,
57
56
  system_message: BaseMessage,
58
- model_type: ModelType = ModelType.GPT_4,
59
- model_config: Optional[Any] = None,
57
+ model: Optional[BaseModelBackend] = None,
60
58
  message_window_size: Optional[int] = None,
61
59
  tool_agents: Optional[List[BaseToolAgent]] = None,
62
60
  code_interpreter: Optional[BaseInterpreter] = None,
@@ -78,8 +76,7 @@ class EmbodiedAgent(ChatAgent):
78
76
  self.logger_color = logger_color
79
77
  super().__init__(
80
78
  system_message=system_message,
81
- model_type=model_type,
82
- model_config=model_config,
79
+ model=model,
83
80
  message_window_size=message_window_size,
84
81
  )
85
82
 
@@ -11,19 +11,23 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Optional, Union
14
+ from typing import Optional, Union
15
15
 
16
- from unstructured.documents.elements import Element
16
+ try:
17
+ from unstructured.documents.elements import Element
18
+ except ImportError:
19
+ Element = None
17
20
 
18
21
  from camel.agents import ChatAgent
19
22
  from camel.messages import BaseMessage
23
+ from camel.models import BaseModelBackend
20
24
  from camel.prompts import TextPrompt
21
25
  from camel.storages.graph_storages.graph_element import (
22
26
  GraphElement,
23
27
  Node,
24
28
  Relationship,
25
29
  )
26
- from camel.types import ModelType, RoleType
30
+ from camel.types import RoleType
27
31
 
28
32
  text_prompt = """
29
33
  You are tasked with extracting nodes and relationships from given content and
@@ -105,16 +109,14 @@ class KnowledgeGraphAgent(ChatAgent):
105
109
 
106
110
  def __init__(
107
111
  self,
108
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
109
- model_config: Optional[Any] = None,
112
+ model: Optional[BaseModelBackend] = None,
110
113
  ) -> None:
111
114
  r"""Initialize the `KnowledgeGraphAgent`.
112
115
 
113
116
  Args:
114
- model_type (ModelType, optional): The type of model to use for the
115
- agent. Defaults to `ModelType.GPT_3_5_TURBO`.
116
- model_config (Any, optional): The configuration for the model.
117
- Defaults to `None`.
117
+ model (BaseModelBackend, optional): The model backend to use for
118
+ generating responses. (default: :obj:`OpenAIModel` with
119
+ `GPT_3_5_TURBO`)
118
120
  """
119
121
  system_message = BaseMessage(
120
122
  role_name="Graphify",
@@ -126,7 +128,7 @@ class KnowledgeGraphAgent(ChatAgent):
126
128
  "illuminate the hidden connections within the chaos of "
127
129
  "information.",
128
130
  )
129
- super().__init__(system_message, model_type, model_config)
131
+ super().__init__(system_message, model=model)
130
132
 
131
133
  def run(
132
134
  self,
@@ -12,32 +12,31 @@
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
14
  import re
15
- from typing import Any, Dict, Optional, Union
15
+ from typing import Dict, Optional, Union
16
16
 
17
17
  from camel.agents.chat_agent import ChatAgent
18
18
  from camel.messages import BaseMessage
19
+ from camel.models import BaseModelBackend
19
20
  from camel.prompts import TextPrompt
20
- from camel.types import ModelType, RoleType
21
+ from camel.types import RoleType
21
22
 
22
23
 
23
24
  class RoleAssignmentAgent(ChatAgent):
24
25
  r"""An agent that generates role names based on the task prompt.
25
26
 
27
+ Args:
28
+ model (BaseModelBackend, optional): The model backend to use for
29
+ generating responses. (default: :obj:`OpenAIModel` with
30
+ `GPT_3_5_TURBO`)
31
+
26
32
  Attributes:
27
33
  role_assignment_prompt (TextPrompt): A prompt for the agent to generate
28
34
  role names.
29
-
30
- Args:
31
- model_type (ModelType, optional): The type of model to use for the
32
- agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
33
- model_config (Any, optional): The configuration for the model.
34
- (default: :obj:`None`)
35
35
  """
36
36
 
37
37
  def __init__(
38
38
  self,
39
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
40
- model_config: Optional[Any] = None,
39
+ model: Optional[BaseModelBackend] = None,
41
40
  ) -> None:
42
41
  system_message = BaseMessage(
43
42
  role_name="Role Assigner",
@@ -45,7 +44,7 @@ class RoleAssignmentAgent(ChatAgent):
45
44
  meta_dict=None,
46
45
  content="You assign roles based on tasks.",
47
46
  )
48
- super().__init__(system_message, model_type, model_config)
47
+ super().__init__(system_message, model=model)
49
48
 
50
49
  def run(
51
50
  self,
@@ -11,12 +11,13 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
14
- from typing import Any, Optional
14
+ from typing import Optional
15
15
 
16
16
  from camel.agents.chat_agent import ChatAgent
17
17
  from camel.messages import BaseMessage
18
+ from camel.models import BaseModelBackend
18
19
  from camel.prompts import TextPrompt
19
- from camel.types import ModelType, RoleType
20
+ from camel.types import RoleType
20
21
  from camel.utils import create_chunks
21
22
 
22
23
 
@@ -33,8 +34,7 @@ class SearchAgent(ChatAgent):
33
34
 
34
35
  def __init__(
35
36
  self,
36
- model_type: ModelType = ModelType.GPT_3_5_TURBO,
37
- model_config: Optional[Any] = None,
37
+ model: Optional[BaseModelBackend] = None,
38
38
  ) -> None:
39
39
  system_message = BaseMessage(
40
40
  role_name="Assistant",
@@ -42,7 +42,7 @@ class SearchAgent(ChatAgent):
42
42
  meta_dict=None,
43
43
  content="You are a helpful assistant.",
44
44
  )
45
- super().__init__(system_message, model_type, model_config)
45
+ super().__init__(system_message, model=model)
46
46
 
47
47
  def summarize_text(self, text: str, query: str) -> str:
48
48
  r"""Summarize the information from the text, base on the query.