camel-ai 0.1.5.2__tar.gz → 0.1.5.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/PKG-INFO +60 -47
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/README.md +55 -42
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/chat_agent.py +21 -17
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/critic_agent.py +6 -9
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/deductive_reasoner_agent.py +7 -9
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/embodied_agent.py +6 -9
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/knowledge_graph_agent.py +12 -10
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/role_assignment_agent.py +10 -11
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/search_agent.py +5 -5
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/task_agent.py +26 -38
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/openai_config.py +14 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/base.py +10 -9
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/openai_embedding.py +25 -12
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/sentence_transformers_embeddings.py +28 -14
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_function.py +11 -4
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/slack_functions.py +14 -2
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/__init__.py +4 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/anthropic_model.py +4 -2
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/base_model.py +4 -1
- camel_ai-0.1.5.4/camel/models/model_factory.py +93 -0
- camel_ai-0.1.5.4/camel/models/nemotron_model.py +71 -0
- camel_ai-0.1.5.4/camel/models/ollama_model.py +121 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/open_source_model.py +7 -2
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/openai_model.py +8 -3
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/stub_model.py +3 -1
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/__init__.py +4 -0
- camel_ai-0.1.5.4/camel/prompts/generate_text_embedding_data.py +79 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/task_prompt_template.py +4 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/auto_retriever.py +2 -2
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/societies/role_playing.py +16 -19
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/graph_element.py +9 -1
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/types/__init__.py +2 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/types/enums.py +84 -22
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/commons.py +4 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/token_counting.py +5 -3
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/pyproject.toml +6 -6
- camel_ai-0.1.5.2/camel/bots/__init__.py +0 -20
- camel_ai-0.1.5.2/camel/bots/discord_bot.py +0 -103
- camel_ai-0.1.5.2/camel/bots/telegram_bot.py +0 -84
- camel_ai-0.1.5.2/camel/models/model_factory.py +0 -72
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/base_config.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/configs/litellm_config.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/google_maps_function.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/math_functions.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/security_config.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/openai_function.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/retrieval_functions.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/search_functions.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/twitter_function.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/functions/weather_functions.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/generators.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/human.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/memories/records.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/messages/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/messages/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/messages/func_message.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/litellm_model.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/models/zhipuai_model.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/code.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/descripte_video_prompt.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/prompts/translation.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/responses/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/societies/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/toolkits/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/toolkits/base.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/toolkits/github_toolkit.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/types/openai_types.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/__init__.py +0 -0
- {camel_ai-0.1.5.2 → camel_ai-0.1.5.4}/camel/utils/constants.py +0 -0
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.5.
|
|
3
|
+
Version: 0.1.5.4
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
7
7
|
Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
|
|
8
8
|
Author: CAMEL-AI.org
|
|
9
|
-
Requires-Python: >=3.
|
|
9
|
+
Requires-Python: >=3.9.0,<3.12
|
|
10
10
|
Classifier: License :: OSI Approved :: Apache Software License
|
|
11
11
|
Classifier: Programming Language :: Python :: 3
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.9
|
|
@@ -23,7 +23,7 @@ Provides-Extra: tools
|
|
|
23
23
|
Provides-Extra: vector-databases
|
|
24
24
|
Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
|
|
25
25
|
Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
26
|
-
Requires-Dist: anthropic (>=0.
|
|
26
|
+
Requires-Dist: anthropic (>=0.29.0,<0.30.0)
|
|
27
27
|
Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
|
|
28
28
|
Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
|
|
29
29
|
Requires-Dist: colorama (>=0,<1)
|
|
@@ -35,7 +35,7 @@ Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
|
35
35
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
36
36
|
Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
37
37
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
38
|
-
Requires-Dist: imageio (>=2.34.
|
|
38
|
+
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
39
39
|
Requires-Dist: jsonschema (>=4,<5)
|
|
40
40
|
Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
|
|
41
41
|
Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
@@ -60,7 +60,7 @@ Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
|
|
|
60
60
|
Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
|
|
61
61
|
Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
|
|
62
62
|
Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
63
|
-
Requires-Dist: sentence-transformers (>=
|
|
63
|
+
Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
|
|
64
64
|
Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
65
65
|
Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
|
|
66
66
|
Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
@@ -180,13 +180,13 @@ exit
|
|
|
180
180
|
Install `CAMEL` from source with conda and pip:
|
|
181
181
|
```sh
|
|
182
182
|
# Create a conda virtual environment
|
|
183
|
-
conda create --name camel python=3.
|
|
183
|
+
conda create --name camel python=3.9
|
|
184
184
|
|
|
185
185
|
# Activate CAMEL conda environment
|
|
186
186
|
conda activate camel
|
|
187
187
|
|
|
188
188
|
# Clone github repo
|
|
189
|
-
git clone -b v0.1.5.
|
|
189
|
+
git clone -b v0.1.5.4 https://github.com/camel-ai/camel.git
|
|
190
190
|
|
|
191
191
|
# Change directory into project directory
|
|
192
192
|
cd camel
|
|
@@ -246,54 +246,67 @@ python examples/ai_society/role_playing.py
|
|
|
246
246
|
Please note that the environment variable is session-specific. If you open a new terminal window or tab, you will need to set the API key again in that new session.
|
|
247
247
|
|
|
248
248
|
|
|
249
|
-
## Use Open-Source Models as Backends
|
|
249
|
+
## Use Open-Source Models as Backends (ex. using Ollama to set Llama 3 locally)
|
|
250
250
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
```sh
|
|
260
|
-
# Launch the controller
|
|
261
|
-
python -m fastchat.serve.controller
|
|
262
|
-
|
|
263
|
-
# Launch the model worker(s)
|
|
264
|
-
python3 -m fastchat.serve.model_worker --model-path meta-llama/Llama-2-7b-chat-hf
|
|
265
|
-
|
|
266
|
-
# Launch the RESTful API server
|
|
267
|
-
python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
|
|
268
|
-
```
|
|
251
|
+
- Download [Ollama](https://ollama.com/download).
|
|
252
|
+
- After setting up Ollama, pull the Llama3 model by typing the following command into the terminal:
|
|
253
|
+
```bash
|
|
254
|
+
ollama pull llama3
|
|
255
|
+
```
|
|
256
|
+
- Create a ModelFile similar the one below in your project directory.
|
|
257
|
+
```bash
|
|
258
|
+
FROM llama3
|
|
269
259
|
|
|
270
|
-
|
|
260
|
+
# Set parameters
|
|
261
|
+
PARAMETER temperature 0.8
|
|
262
|
+
PARAMETER stop Result
|
|
271
263
|
|
|
272
|
-
|
|
264
|
+
# Sets a custom system message to specify the behavior of the chat assistant
|
|
273
265
|
|
|
274
|
-
|
|
275
|
-
system_message = # ...
|
|
266
|
+
# Leaving it blank for now.
|
|
276
267
|
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
),
|
|
283
|
-
)
|
|
268
|
+
SYSTEM """ """
|
|
269
|
+
```
|
|
270
|
+
- Create a script to get the base model (llama3) and create a custom model using the ModelFile above. Save this as a .sh file:
|
|
271
|
+
```bash
|
|
272
|
+
#!/bin/zsh
|
|
284
273
|
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
)
|
|
289
|
-
```
|
|
274
|
+
# variables
|
|
275
|
+
model_name="llama3"
|
|
276
|
+
custom_model_name="camel-llama3"
|
|
290
277
|
|
|
291
|
-
|
|
278
|
+
#get the base model
|
|
279
|
+
ollama pull $model_name
|
|
292
280
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
281
|
+
#create the model file
|
|
282
|
+
ollama create $custom_model_name -f ./Llama3ModelFile
|
|
283
|
+
```
|
|
284
|
+
- Navigate to the directory where the script and ModelFile are located and run the script. Enjoy your Llama3 model, enhanced by CAMEL's excellent agents.
|
|
285
|
+
```python
|
|
286
|
+
from camel.agents import ChatAgent
|
|
287
|
+
from camel.messages import BaseMessage
|
|
288
|
+
from camel.models import ModelFactory
|
|
289
|
+
from camel.types import ModelPlatformType
|
|
290
|
+
|
|
291
|
+
ollama_model = ModelFactory.create(
|
|
292
|
+
model_platform=ModelPlatformType.OLLAMA,
|
|
293
|
+
model_type="llama3",
|
|
294
|
+
url="http://localhost:11434/v1",
|
|
295
|
+
model_config_dict={"temperature": 0.4},
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
assistant_sys_msg = BaseMessage.make_assistant_message(
|
|
299
|
+
role_name="Assistant",
|
|
300
|
+
content="You are a helpful assistant.",
|
|
301
|
+
)
|
|
302
|
+
agent = ChatAgent(assistant_sys_msg, model=ollama_model, token_limit=4096)
|
|
303
|
+
|
|
304
|
+
user_msg = BaseMessage.make_user_message(
|
|
305
|
+
role_name="User", content="Say hi to CAMEL"
|
|
306
|
+
)
|
|
307
|
+
assistant_response = agent.step(user_msg)
|
|
308
|
+
print(assistant_response.msg.content)
|
|
309
|
+
```
|
|
297
310
|
|
|
298
311
|
## Data (Hosted on Hugging Face)
|
|
299
312
|
| Dataset | Chat format | Instruction format | Chat format (translated) |
|
|
@@ -104,13 +104,13 @@ exit
|
|
|
104
104
|
Install `CAMEL` from source with conda and pip:
|
|
105
105
|
```sh
|
|
106
106
|
# Create a conda virtual environment
|
|
107
|
-
conda create --name camel python=3.
|
|
107
|
+
conda create --name camel python=3.9
|
|
108
108
|
|
|
109
109
|
# Activate CAMEL conda environment
|
|
110
110
|
conda activate camel
|
|
111
111
|
|
|
112
112
|
# Clone github repo
|
|
113
|
-
git clone -b v0.1.5.
|
|
113
|
+
git clone -b v0.1.5.4 https://github.com/camel-ai/camel.git
|
|
114
114
|
|
|
115
115
|
# Change directory into project directory
|
|
116
116
|
cd camel
|
|
@@ -170,54 +170,67 @@ python examples/ai_society/role_playing.py
|
|
|
170
170
|
Please note that the environment variable is session-specific. If you open a new terminal window or tab, you will need to set the API key again in that new session.
|
|
171
171
|
|
|
172
172
|
|
|
173
|
-
## Use Open-Source Models as Backends
|
|
173
|
+
## Use Open-Source Models as Backends (ex. using Ollama to set Llama 3 locally)
|
|
174
174
|
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
```sh
|
|
184
|
-
# Launch the controller
|
|
185
|
-
python -m fastchat.serve.controller
|
|
186
|
-
|
|
187
|
-
# Launch the model worker(s)
|
|
188
|
-
python3 -m fastchat.serve.model_worker --model-path meta-llama/Llama-2-7b-chat-hf
|
|
189
|
-
|
|
190
|
-
# Launch the RESTful API server
|
|
191
|
-
python3 -m fastchat.serve.openai_api_server --host localhost --port 8000
|
|
192
|
-
```
|
|
175
|
+
- Download [Ollama](https://ollama.com/download).
|
|
176
|
+
- After setting up Ollama, pull the Llama3 model by typing the following command into the terminal:
|
|
177
|
+
```bash
|
|
178
|
+
ollama pull llama3
|
|
179
|
+
```
|
|
180
|
+
- Create a ModelFile similar the one below in your project directory.
|
|
181
|
+
```bash
|
|
182
|
+
FROM llama3
|
|
193
183
|
|
|
194
|
-
|
|
184
|
+
# Set parameters
|
|
185
|
+
PARAMETER temperature 0.8
|
|
186
|
+
PARAMETER stop Result
|
|
195
187
|
|
|
196
|
-
|
|
188
|
+
# Sets a custom system message to specify the behavior of the chat assistant
|
|
197
189
|
|
|
198
|
-
|
|
199
|
-
system_message = # ...
|
|
190
|
+
# Leaving it blank for now.
|
|
200
191
|
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
),
|
|
207
|
-
)
|
|
192
|
+
SYSTEM """ """
|
|
193
|
+
```
|
|
194
|
+
- Create a script to get the base model (llama3) and create a custom model using the ModelFile above. Save this as a .sh file:
|
|
195
|
+
```bash
|
|
196
|
+
#!/bin/zsh
|
|
208
197
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
)
|
|
213
|
-
```
|
|
198
|
+
# variables
|
|
199
|
+
model_name="llama3"
|
|
200
|
+
custom_model_name="camel-llama3"
|
|
214
201
|
|
|
215
|
-
|
|
202
|
+
#get the base model
|
|
203
|
+
ollama pull $model_name
|
|
216
204
|
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
205
|
+
#create the model file
|
|
206
|
+
ollama create $custom_model_name -f ./Llama3ModelFile
|
|
207
|
+
```
|
|
208
|
+
- Navigate to the directory where the script and ModelFile are located and run the script. Enjoy your Llama3 model, enhanced by CAMEL's excellent agents.
|
|
209
|
+
```python
|
|
210
|
+
from camel.agents import ChatAgent
|
|
211
|
+
from camel.messages import BaseMessage
|
|
212
|
+
from camel.models import ModelFactory
|
|
213
|
+
from camel.types import ModelPlatformType
|
|
214
|
+
|
|
215
|
+
ollama_model = ModelFactory.create(
|
|
216
|
+
model_platform=ModelPlatformType.OLLAMA,
|
|
217
|
+
model_type="llama3",
|
|
218
|
+
url="http://localhost:11434/v1",
|
|
219
|
+
model_config_dict={"temperature": 0.4},
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
assistant_sys_msg = BaseMessage.make_assistant_message(
|
|
223
|
+
role_name="Assistant",
|
|
224
|
+
content="You are a helpful assistant.",
|
|
225
|
+
)
|
|
226
|
+
agent = ChatAgent(assistant_sys_msg, model=ollama_model, token_limit=4096)
|
|
227
|
+
|
|
228
|
+
user_msg = BaseMessage.make_user_message(
|
|
229
|
+
role_name="User", content="Say hi to CAMEL"
|
|
230
|
+
)
|
|
231
|
+
assistant_response = agent.step(user_msg)
|
|
232
|
+
print(assistant_response.msg.content)
|
|
233
|
+
```
|
|
221
234
|
|
|
222
235
|
## Data (Hosted on Hugging Face)
|
|
223
236
|
| Dataset | Chat format | Instruction format | Chat format (translated) |
|
|
@@ -32,6 +32,7 @@ from camel.responses import ChatAgentResponse
|
|
|
32
32
|
from camel.types import (
|
|
33
33
|
ChatCompletion,
|
|
34
34
|
ChatCompletionChunk,
|
|
35
|
+
ModelPlatformType,
|
|
35
36
|
ModelType,
|
|
36
37
|
OpenAIBackendRole,
|
|
37
38
|
RoleType,
|
|
@@ -41,7 +42,6 @@ from camel.utils import get_model_encoding
|
|
|
41
42
|
if TYPE_CHECKING:
|
|
42
43
|
from openai import Stream
|
|
43
44
|
|
|
44
|
-
from camel.configs import BaseConfig
|
|
45
45
|
from camel.functions import OpenAIFunction
|
|
46
46
|
from camel.terminators import ResponseTerminator
|
|
47
47
|
|
|
@@ -80,10 +80,9 @@ class ChatAgent(BaseAgent):
|
|
|
80
80
|
|
|
81
81
|
Args:
|
|
82
82
|
system_message (BaseMessage): The system message for the chat agent.
|
|
83
|
-
|
|
84
|
-
responses. (default :obj:`
|
|
85
|
-
|
|
86
|
-
LLM model. (default: :obj:`None`)
|
|
83
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
84
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
85
|
+
`GPT_3_5_TURBO`)
|
|
87
86
|
api_key (str, optional): The API key for authenticating with the
|
|
88
87
|
LLM service. Only OpenAI and Anthropic model supported (default:
|
|
89
88
|
:obj:`None`)
|
|
@@ -109,8 +108,7 @@ class ChatAgent(BaseAgent):
|
|
|
109
108
|
def __init__(
|
|
110
109
|
self,
|
|
111
110
|
system_message: BaseMessage,
|
|
112
|
-
|
|
113
|
-
model_config: Optional[BaseConfig] = None,
|
|
111
|
+
model: Optional[BaseModelBackend] = None,
|
|
114
112
|
api_key: Optional[str] = None,
|
|
115
113
|
memory: Optional[AgentMemory] = None,
|
|
116
114
|
message_window_size: Optional[int] = None,
|
|
@@ -123,24 +121,30 @@ class ChatAgent(BaseAgent):
|
|
|
123
121
|
self.system_message = system_message
|
|
124
122
|
self.role_name: str = system_message.role_name
|
|
125
123
|
self.role_type: RoleType = system_message.role_type
|
|
124
|
+
self._api_key = api_key
|
|
125
|
+
self.model_backend: BaseModelBackend = (
|
|
126
|
+
model
|
|
127
|
+
if model is not None
|
|
128
|
+
else ModelFactory.create(
|
|
129
|
+
model_platform=ModelPlatformType.OPENAI,
|
|
130
|
+
model_type=ModelType.GPT_3_5_TURBO,
|
|
131
|
+
model_config_dict=ChatGPTConfig().__dict__,
|
|
132
|
+
api_key=self._api_key,
|
|
133
|
+
)
|
|
134
|
+
)
|
|
126
135
|
self.output_language: Optional[str] = output_language
|
|
127
136
|
if self.output_language is not None:
|
|
128
137
|
self.set_output_language(self.output_language)
|
|
129
138
|
|
|
130
|
-
self.model_type: ModelType =
|
|
131
|
-
model_type if model_type is not None else ModelType.GPT_3_5_TURBO
|
|
132
|
-
)
|
|
139
|
+
self.model_type: ModelType = self.model_backend.model_type
|
|
133
140
|
|
|
134
141
|
self.func_dict: Dict[str, Callable] = {}
|
|
135
142
|
if tools is not None:
|
|
136
143
|
for func in tools:
|
|
137
144
|
self.func_dict[func.get_function_name()] = func.func
|
|
138
145
|
|
|
139
|
-
self.
|
|
140
|
-
|
|
141
|
-
self.model_backend: BaseModelBackend = ModelFactory.create(
|
|
142
|
-
self.model_type, self.model_config.__dict__, self._api_key
|
|
143
|
-
)
|
|
146
|
+
self.model_config_dict = self.model_backend.model_config_dict
|
|
147
|
+
|
|
144
148
|
self.model_token_limit = token_limit or self.model_backend.token_limit
|
|
145
149
|
context_creator = ScoreBasedContextCreator(
|
|
146
150
|
self.model_backend.token_counter,
|
|
@@ -643,7 +647,7 @@ class ChatAgent(BaseAgent):
|
|
|
643
647
|
func = self.func_dict[func_name]
|
|
644
648
|
|
|
645
649
|
args_str: str = choice.message.tool_calls[0].function.arguments
|
|
646
|
-
args = json.loads(args_str
|
|
650
|
+
args = json.loads(args_str)
|
|
647
651
|
|
|
648
652
|
# Pass the extracted arguments to the indicated function
|
|
649
653
|
try:
|
|
@@ -702,7 +706,7 @@ class ChatAgent(BaseAgent):
|
|
|
702
706
|
func = self.func_dict[func_name]
|
|
703
707
|
|
|
704
708
|
args_str: str = choice.message.tool_calls[0].function.arguments
|
|
705
|
-
args = json.loads(args_str
|
|
709
|
+
args = json.loads(args_str)
|
|
706
710
|
|
|
707
711
|
# Pass the extracted arguments to the indicated function
|
|
708
712
|
try:
|
|
@@ -20,8 +20,8 @@ from colorama import Fore
|
|
|
20
20
|
from camel.agents.chat_agent import ChatAgent
|
|
21
21
|
from camel.memories import AgentMemory
|
|
22
22
|
from camel.messages import BaseMessage
|
|
23
|
+
from camel.models import BaseModelBackend
|
|
23
24
|
from camel.responses import ChatAgentResponse
|
|
24
|
-
from camel.types import ModelType
|
|
25
25
|
from camel.utils import get_first_int, print_text_animated
|
|
26
26
|
|
|
27
27
|
|
|
@@ -31,10 +31,9 @@ class CriticAgent(ChatAgent):
|
|
|
31
31
|
Args:
|
|
32
32
|
system_message (BaseMessage): The system message for the critic
|
|
33
33
|
agent.
|
|
34
|
-
|
|
35
|
-
responses. (default :obj:`
|
|
36
|
-
|
|
37
|
-
(default: :obj:`None`)
|
|
34
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
35
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
36
|
+
`GPT_3_5_TURBO`)
|
|
38
37
|
message_window_size (int, optional): The maximum number of previous
|
|
39
38
|
messages to include in the context window. If `None`, no windowing
|
|
40
39
|
is performed. (default: :obj:`6`)
|
|
@@ -48,8 +47,7 @@ class CriticAgent(ChatAgent):
|
|
|
48
47
|
def __init__(
|
|
49
48
|
self,
|
|
50
49
|
system_message: BaseMessage,
|
|
51
|
-
|
|
52
|
-
model_config: Optional[Any] = None,
|
|
50
|
+
model: Optional[BaseModelBackend] = None,
|
|
53
51
|
memory: Optional[AgentMemory] = None,
|
|
54
52
|
message_window_size: int = 6,
|
|
55
53
|
retry_attempts: int = 2,
|
|
@@ -58,8 +56,7 @@ class CriticAgent(ChatAgent):
|
|
|
58
56
|
) -> None:
|
|
59
57
|
super().__init__(
|
|
60
58
|
system_message,
|
|
61
|
-
|
|
62
|
-
model_config=model_config,
|
|
59
|
+
model=model,
|
|
63
60
|
memory=memory,
|
|
64
61
|
message_window_size=message_window_size,
|
|
65
62
|
)
|
|
@@ -15,10 +15,10 @@ import re
|
|
|
15
15
|
from typing import Dict, List, Optional, Union
|
|
16
16
|
|
|
17
17
|
from camel.agents.chat_agent import ChatAgent
|
|
18
|
-
from camel.configs import BaseConfig
|
|
19
18
|
from camel.messages import BaseMessage
|
|
19
|
+
from camel.models import BaseModelBackend
|
|
20
20
|
from camel.prompts import TextPrompt
|
|
21
|
-
from camel.types import
|
|
21
|
+
from camel.types import RoleType
|
|
22
22
|
|
|
23
23
|
|
|
24
24
|
class DeductiveReasonerAgent(ChatAgent):
|
|
@@ -33,16 +33,14 @@ class DeductiveReasonerAgent(ChatAgent):
|
|
|
33
33
|
- L represents the path or process from A to B.
|
|
34
34
|
|
|
35
35
|
Args:
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
(default: :obj:`None`)
|
|
36
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
37
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
38
|
+
`GPT_3_5_TURBO`)
|
|
40
39
|
"""
|
|
41
40
|
|
|
42
41
|
def __init__(
|
|
43
42
|
self,
|
|
44
|
-
|
|
45
|
-
model_config: Optional[BaseConfig] = None,
|
|
43
|
+
model: Optional[BaseModelBackend] = None,
|
|
46
44
|
) -> None:
|
|
47
45
|
system_message = BaseMessage(
|
|
48
46
|
role_name="Insight Agent",
|
|
@@ -50,7 +48,7 @@ class DeductiveReasonerAgent(ChatAgent):
|
|
|
50
48
|
meta_dict=None,
|
|
51
49
|
content="You assign roles based on tasks.",
|
|
52
50
|
)
|
|
53
|
-
super().__init__(system_message,
|
|
51
|
+
super().__init__(system_message, model=model)
|
|
54
52
|
|
|
55
53
|
def deduce_conditions_and_quality(
|
|
56
54
|
self,
|
|
@@ -23,8 +23,8 @@ from camel.interpreters import (
|
|
|
23
23
|
SubprocessInterpreter,
|
|
24
24
|
)
|
|
25
25
|
from camel.messages import BaseMessage
|
|
26
|
+
from camel.models import BaseModelBackend
|
|
26
27
|
from camel.responses import ChatAgentResponse
|
|
27
|
-
from camel.types import ModelType
|
|
28
28
|
from camel.utils import print_text_animated
|
|
29
29
|
|
|
30
30
|
|
|
@@ -33,10 +33,9 @@ class EmbodiedAgent(ChatAgent):
|
|
|
33
33
|
|
|
34
34
|
Args:
|
|
35
35
|
system_message (BaseMessage): The system message for the chat agent.
|
|
36
|
-
|
|
37
|
-
responses. (default :obj:`
|
|
38
|
-
|
|
39
|
-
(default: :obj:`None`)
|
|
36
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
37
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
38
|
+
`GPT_3_5_TURBO`)
|
|
40
39
|
message_window_size (int, optional): The maximum number of previous
|
|
41
40
|
messages to include in the context window. If `None`, no windowing
|
|
42
41
|
is performed. (default: :obj:`None`)
|
|
@@ -55,8 +54,7 @@ class EmbodiedAgent(ChatAgent):
|
|
|
55
54
|
def __init__(
|
|
56
55
|
self,
|
|
57
56
|
system_message: BaseMessage,
|
|
58
|
-
|
|
59
|
-
model_config: Optional[Any] = None,
|
|
57
|
+
model: Optional[BaseModelBackend] = None,
|
|
60
58
|
message_window_size: Optional[int] = None,
|
|
61
59
|
tool_agents: Optional[List[BaseToolAgent]] = None,
|
|
62
60
|
code_interpreter: Optional[BaseInterpreter] = None,
|
|
@@ -78,8 +76,7 @@ class EmbodiedAgent(ChatAgent):
|
|
|
78
76
|
self.logger_color = logger_color
|
|
79
77
|
super().__init__(
|
|
80
78
|
system_message=system_message,
|
|
81
|
-
|
|
82
|
-
model_config=model_config,
|
|
79
|
+
model=model,
|
|
83
80
|
message_window_size=message_window_size,
|
|
84
81
|
)
|
|
85
82
|
|
|
@@ -11,19 +11,23 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from typing import
|
|
14
|
+
from typing import Optional, Union
|
|
15
15
|
|
|
16
|
-
|
|
16
|
+
try:
|
|
17
|
+
from unstructured.documents.elements import Element
|
|
18
|
+
except ImportError:
|
|
19
|
+
Element = None
|
|
17
20
|
|
|
18
21
|
from camel.agents import ChatAgent
|
|
19
22
|
from camel.messages import BaseMessage
|
|
23
|
+
from camel.models import BaseModelBackend
|
|
20
24
|
from camel.prompts import TextPrompt
|
|
21
25
|
from camel.storages.graph_storages.graph_element import (
|
|
22
26
|
GraphElement,
|
|
23
27
|
Node,
|
|
24
28
|
Relationship,
|
|
25
29
|
)
|
|
26
|
-
from camel.types import
|
|
30
|
+
from camel.types import RoleType
|
|
27
31
|
|
|
28
32
|
text_prompt = """
|
|
29
33
|
You are tasked with extracting nodes and relationships from given content and
|
|
@@ -105,16 +109,14 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
105
109
|
|
|
106
110
|
def __init__(
|
|
107
111
|
self,
|
|
108
|
-
|
|
109
|
-
model_config: Optional[Any] = None,
|
|
112
|
+
model: Optional[BaseModelBackend] = None,
|
|
110
113
|
) -> None:
|
|
111
114
|
r"""Initialize the `KnowledgeGraphAgent`.
|
|
112
115
|
|
|
113
116
|
Args:
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
Defaults to `None`.
|
|
117
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
118
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
119
|
+
`GPT_3_5_TURBO`)
|
|
118
120
|
"""
|
|
119
121
|
system_message = BaseMessage(
|
|
120
122
|
role_name="Graphify",
|
|
@@ -126,7 +128,7 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
126
128
|
"illuminate the hidden connections within the chaos of "
|
|
127
129
|
"information.",
|
|
128
130
|
)
|
|
129
|
-
super().__init__(system_message,
|
|
131
|
+
super().__init__(system_message, model=model)
|
|
130
132
|
|
|
131
133
|
def run(
|
|
132
134
|
self,
|
|
@@ -12,32 +12,31 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
import re
|
|
15
|
-
from typing import
|
|
15
|
+
from typing import Dict, Optional, Union
|
|
16
16
|
|
|
17
17
|
from camel.agents.chat_agent import ChatAgent
|
|
18
18
|
from camel.messages import BaseMessage
|
|
19
|
+
from camel.models import BaseModelBackend
|
|
19
20
|
from camel.prompts import TextPrompt
|
|
20
|
-
from camel.types import
|
|
21
|
+
from camel.types import RoleType
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
class RoleAssignmentAgent(ChatAgent):
|
|
24
25
|
r"""An agent that generates role names based on the task prompt.
|
|
25
26
|
|
|
27
|
+
Args:
|
|
28
|
+
model (BaseModelBackend, optional): The model backend to use for
|
|
29
|
+
generating responses. (default: :obj:`OpenAIModel` with
|
|
30
|
+
`GPT_3_5_TURBO`)
|
|
31
|
+
|
|
26
32
|
Attributes:
|
|
27
33
|
role_assignment_prompt (TextPrompt): A prompt for the agent to generate
|
|
28
34
|
role names.
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
model_type (ModelType, optional): The type of model to use for the
|
|
32
|
-
agent. (default: :obj:`ModelType.GPT_3_5_TURBO`)
|
|
33
|
-
model_config (Any, optional): The configuration for the model.
|
|
34
|
-
(default: :obj:`None`)
|
|
35
35
|
"""
|
|
36
36
|
|
|
37
37
|
def __init__(
|
|
38
38
|
self,
|
|
39
|
-
|
|
40
|
-
model_config: Optional[Any] = None,
|
|
39
|
+
model: Optional[BaseModelBackend] = None,
|
|
41
40
|
) -> None:
|
|
42
41
|
system_message = BaseMessage(
|
|
43
42
|
role_name="Role Assigner",
|
|
@@ -45,7 +44,7 @@ class RoleAssignmentAgent(ChatAgent):
|
|
|
45
44
|
meta_dict=None,
|
|
46
45
|
content="You assign roles based on tasks.",
|
|
47
46
|
)
|
|
48
|
-
super().__init__(system_message,
|
|
47
|
+
super().__init__(system_message, model=model)
|
|
49
48
|
|
|
50
49
|
def run(
|
|
51
50
|
self,
|
|
@@ -11,12 +11,13 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
-
from typing import
|
|
14
|
+
from typing import Optional
|
|
15
15
|
|
|
16
16
|
from camel.agents.chat_agent import ChatAgent
|
|
17
17
|
from camel.messages import BaseMessage
|
|
18
|
+
from camel.models import BaseModelBackend
|
|
18
19
|
from camel.prompts import TextPrompt
|
|
19
|
-
from camel.types import
|
|
20
|
+
from camel.types import RoleType
|
|
20
21
|
from camel.utils import create_chunks
|
|
21
22
|
|
|
22
23
|
|
|
@@ -33,8 +34,7 @@ class SearchAgent(ChatAgent):
|
|
|
33
34
|
|
|
34
35
|
def __init__(
|
|
35
36
|
self,
|
|
36
|
-
|
|
37
|
-
model_config: Optional[Any] = None,
|
|
37
|
+
model: Optional[BaseModelBackend] = None,
|
|
38
38
|
) -> None:
|
|
39
39
|
system_message = BaseMessage(
|
|
40
40
|
role_name="Assistant",
|
|
@@ -42,7 +42,7 @@ class SearchAgent(ChatAgent):
|
|
|
42
42
|
meta_dict=None,
|
|
43
43
|
content="You are a helpful assistant.",
|
|
44
44
|
)
|
|
45
|
-
super().__init__(system_message,
|
|
45
|
+
super().__init__(system_message, model=model)
|
|
46
46
|
|
|
47
47
|
def summarize_text(self, text: str, query: str) -> str:
|
|
48
48
|
r"""Summarize the information from the text, base on the query.
|