camel-ai 0.1.5.6__tar.gz → 0.1.6.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/PKG-INFO +45 -3
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/README.md +36 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/__init__.py +1 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/chat_agent.py +249 -36
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/critic_agent.py +18 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/deductive_reasoner_agent.py +16 -4
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/embodied_agent.py +20 -6
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/knowledge_graph_agent.py +24 -5
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/role_assignment_agent.py +13 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/search_agent.py +16 -5
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/task_agent.py +20 -5
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/__init__.py +11 -9
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/anthropic_config.py +5 -6
- camel_ai-0.1.6.1/camel/configs/base_config.py +68 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/gemini_config.py +69 -17
- camel_ai-0.1.6.1/camel/configs/groq_config.py +105 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/litellm_config.py +2 -8
- camel_ai-0.1.6.1/camel/configs/mistral_config.py +78 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/ollama_config.py +5 -7
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/openai_config.py +12 -23
- camel_ai-0.1.6.1/camel/configs/vllm_config.py +102 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/configs/zhipuai_config.py +5 -11
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/embeddings/__init__.py +2 -0
- camel_ai-0.1.6.1/camel/embeddings/mistral_embedding.py +89 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/human.py +1 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/interpreters/__init__.py +2 -0
- camel_ai-0.1.6.1/camel/interpreters/ipython_interpreter.py +167 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/loaders/__init__.py +2 -0
- camel_ai-0.1.6.1/camel/loaders/firecrawl_reader.py +213 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/agent_memories.py +1 -4
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/blocks/chat_history_block.py +6 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/blocks/vectordb_block.py +3 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/context_creators/score_based.py +6 -6
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/records.py +9 -7
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/messages/base.py +1 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/__init__.py +8 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/anthropic_model.py +7 -2
- camel_ai-0.1.6.1/camel/models/azure_openai_model.py +152 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/base_model.py +9 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/gemini_model.py +14 -2
- camel_ai-0.1.6.1/camel/models/groq_model.py +131 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/litellm_model.py +26 -4
- camel_ai-0.1.6.1/camel/models/mistral_model.py +169 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/model_factory.py +30 -3
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/ollama_model.py +21 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/open_source_model.py +13 -5
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/openai_model.py +7 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/stub_model.py +4 -4
- camel_ai-0.1.6.1/camel/models/vllm_model.py +138 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/zhipuai_model.py +7 -4
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/__init__.py +8 -1
- camel_ai-0.1.6.1/camel/prompts/image_craft.py +34 -0
- camel_ai-0.1.6.1/camel/prompts/multi_condition_image_craft.py +34 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/task_prompt_template.py +10 -4
- camel_ai-0.1.5.6/camel/prompts/descripte_video_prompt.py → camel_ai-0.1.6.1/camel/prompts/video_description_prompt.py +1 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/responses/agent_responses.py +4 -3
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/retrievers/auto_retriever.py +2 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/societies/babyagi_playing.py +6 -4
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/societies/role_playing.py +16 -8
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/graph_storages/graph_element.py +10 -14
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/graph_storages/neo4j_graph.py +5 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/vectordb_storages/base.py +24 -13
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/vectordb_storages/milvus.py +1 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/vectordb_storages/qdrant.py +2 -3
- camel_ai-0.1.6.1/camel/tasks/__init__.py +22 -0
- camel_ai-0.1.6.1/camel/tasks/task.py +408 -0
- camel_ai-0.1.6.1/camel/tasks/task_prompt.py +65 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/__init__.py +26 -15
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/toolkits/base.py +4 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/toolkits/code_execution.py +1 -1
- camel_ai-0.1.6.1/camel/toolkits/dalle_toolkit.py +146 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/toolkits/github_toolkit.py +19 -34
- camel_ai-0.1.6.1/camel/toolkits/google_maps_toolkit.py +368 -0
- camel_ai-0.1.6.1/camel/toolkits/math_toolkit.py +79 -0
- camel_ai-0.1.6.1/camel/toolkits/open_api_toolkit.py +547 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/openai_function.py +2 -7
- camel_ai-0.1.6.1/camel/toolkits/retrieval_toolkit.py +76 -0
- camel_ai-0.1.6.1/camel/toolkits/search_toolkit.py +326 -0
- camel_ai-0.1.6.1/camel/toolkits/slack_toolkit.py +308 -0
- camel_ai-0.1.6.1/camel/toolkits/twitter_toolkit.py +522 -0
- camel_ai-0.1.6.1/camel/toolkits/weather_toolkit.py +173 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/types/enums.py +154 -35
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/utils/__init__.py +14 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/utils/async_func.py +1 -1
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/utils/commons.py +152 -2
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/utils/constants.py +3 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/utils/token_counting.py +148 -40
- camel_ai-0.1.6.1/camel/workforce/__init__.py +23 -0
- camel_ai-0.1.6.1/camel/workforce/base.py +50 -0
- camel_ai-0.1.6.1/camel/workforce/manager_node.py +299 -0
- camel_ai-0.1.6.1/camel/workforce/role_playing_node.py +168 -0
- camel_ai-0.1.6.1/camel/workforce/single_agent_node.py +77 -0
- camel_ai-0.1.6.1/camel/workforce/task_channel.py +173 -0
- camel_ai-0.1.6.1/camel/workforce/utils.py +97 -0
- camel_ai-0.1.6.1/camel/workforce/worker_node.py +115 -0
- camel_ai-0.1.6.1/camel/workforce/workforce.py +49 -0
- camel_ai-0.1.6.1/camel/workforce/workforce_prompt.py +125 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/pyproject.toml +35 -11
- camel_ai-0.1.5.6/camel/configs/base_config.py +0 -22
- camel_ai-0.1.5.6/camel/functions/google_maps_function.py +0 -335
- camel_ai-0.1.5.6/camel/functions/math_functions.py +0 -61
- camel_ai-0.1.5.6/camel/functions/open_api_function.py +0 -508
- camel_ai-0.1.5.6/camel/functions/retrieval_functions.py +0 -61
- camel_ai-0.1.5.6/camel/functions/search_functions.py +0 -298
- camel_ai-0.1.5.6/camel/functions/slack_functions.py +0 -286
- camel_ai-0.1.5.6/camel/functions/twitter_function.py +0 -479
- camel_ai-0.1.5.6/camel/functions/weather_functions.py +0 -144
- camel_ai-0.1.5.6/camel/toolkits/__init__.py +0 -23
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/embeddings/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/generators.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/interpreters/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/messages/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/messages/func_message.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/code.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/prompts/translation.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/responses/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/retrievers/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/societies/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/terminators/base.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/security_config.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.1.5.6/camel/functions → camel_ai-0.1.6.1/camel/toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/types/__init__.py +0 -0
- {camel_ai-0.1.5.6 → camel_ai-0.1.6.1}/camel/types/openai_types.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.6.1
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -24,6 +24,7 @@ Provides-Extra: tools
|
|
|
24
24
|
Provides-Extra: vector-databases
|
|
25
25
|
Requires-Dist: PyMuPDF (>=1.22.5,<2.0.0) ; extra == "tools" or extra == "all"
|
|
26
26
|
Requires-Dist: accelerate (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
27
|
+
Requires-Dist: agentops (>=0.3.6,<0.4.0) ; extra == "tools" or extra == "all"
|
|
27
28
|
Requires-Dist: anthropic (>=0.29.0,<0.30.0)
|
|
28
29
|
Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
|
|
29
30
|
Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
|
|
@@ -36,12 +37,18 @@ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
|
|
|
36
37
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
37
38
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
38
39
|
Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
40
|
+
Requires-Dist: eval-type-backport (==0.2.0)
|
|
41
|
+
Requires-Dist: firecrawl-py (>=0.0.20,<0.0.21) ; extra == "tools" or extra == "all"
|
|
39
42
|
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
40
43
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
44
|
+
Requires-Dist: groq (>=0.5.0,<0.6.0)
|
|
41
45
|
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
46
|
+
Requires-Dist: ipykernel (>=6.0.0,<7.0.0)
|
|
42
47
|
Requires-Dist: jsonschema (>=4,<5)
|
|
48
|
+
Requires-Dist: jupyter_client (>=8.6.2,<9.0.0) ; extra == "tools" or extra == "all"
|
|
43
49
|
Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
|
|
44
|
-
Requires-Dist:
|
|
50
|
+
Requires-Dist: mistral-common (>=1.3.3,<2.0.0) ; extra == "model-platforms" or extra == "all"
|
|
51
|
+
Requires-Dist: mistralai (>=0.4.2,<0.5.0) ; extra == "model-platforms" or extra == "all"
|
|
45
52
|
Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
46
53
|
Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
|
|
47
54
|
Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
|
|
@@ -191,7 +198,7 @@ conda create --name camel python=3.9
|
|
|
191
198
|
conda activate camel
|
|
192
199
|
|
|
193
200
|
# Clone github repo
|
|
194
|
-
git clone -b v0.1.
|
|
201
|
+
git clone -b v0.1.6.1 https://github.com/camel-ai/camel.git
|
|
195
202
|
|
|
196
203
|
# Change directory into project directory
|
|
197
204
|
cd camel
|
|
@@ -317,6 +324,41 @@ Please note that the environment variable is session-specific. If you open a new
|
|
|
317
324
|
print(assistant_response.msg.content)
|
|
318
325
|
```
|
|
319
326
|
|
|
327
|
+
## Use Open-Source Models as Backends (ex. using vLLM to set Phi-3 locally)
|
|
328
|
+
- [Install vLLM](https://docs.vllm.ai/en/latest/getting_started/installation.html)
|
|
329
|
+
- After setting up vLLM, start an OpenAI compatible server for example by
|
|
330
|
+
```bash
|
|
331
|
+
python -m vllm.entrypoints.openai.api_server --model microsoft/Phi-3-mini-4k-instruct --api-key vllm --dtype bfloat16
|
|
332
|
+
```
|
|
333
|
+
- Create and run following script (more details please refer to this [example](https://github.com/camel-ai/camel/blob/master/examples/models/vllm_model_example.py))
|
|
334
|
+
```python
|
|
335
|
+
from camel.agents import ChatAgent
|
|
336
|
+
from camel.messages import BaseMessage
|
|
337
|
+
from camel.models import ModelFactory
|
|
338
|
+
from camel.types import ModelPlatformType
|
|
339
|
+
|
|
340
|
+
vllm_model = ModelFactory.create(
|
|
341
|
+
model_platform=ModelPlatformType.VLLM,
|
|
342
|
+
model_type="microsoft/Phi-3-mini-4k-instruct",
|
|
343
|
+
url="http://localhost:8000/v1",
|
|
344
|
+
model_config_dict={"temperature": 0.0},
|
|
345
|
+
api_key="vllm",
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
assistant_sys_msg = BaseMessage.make_assistant_message(
|
|
349
|
+
role_name="Assistant",
|
|
350
|
+
content="You are a helpful assistant.",
|
|
351
|
+
)
|
|
352
|
+
agent = ChatAgent(assistant_sys_msg, model=vllm_model, token_limit=4096)
|
|
353
|
+
|
|
354
|
+
user_msg = BaseMessage.make_user_message(
|
|
355
|
+
role_name="User",
|
|
356
|
+
content="Say hi to CAMEL AI",
|
|
357
|
+
)
|
|
358
|
+
assistant_response = agent.step(user_msg)
|
|
359
|
+
print(assistant_response.msg.content)
|
|
360
|
+
```
|
|
361
|
+
|
|
320
362
|
## Data (Hosted on Hugging Face)
|
|
321
363
|
| Dataset | Chat format | Instruction format | Chat format (translated) |
|
|
322
364
|
|----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
|
|
@@ -110,7 +110,7 @@ conda create --name camel python=3.9
|
|
|
110
110
|
conda activate camel
|
|
111
111
|
|
|
112
112
|
# Clone github repo
|
|
113
|
-
git clone -b v0.1.
|
|
113
|
+
git clone -b v0.1.6.1 https://github.com/camel-ai/camel.git
|
|
114
114
|
|
|
115
115
|
# Change directory into project directory
|
|
116
116
|
cd camel
|
|
@@ -236,6 +236,41 @@ Please note that the environment variable is session-specific. If you open a new
|
|
|
236
236
|
print(assistant_response.msg.content)
|
|
237
237
|
```
|
|
238
238
|
|
|
239
|
+
## Use Open-Source Models as Backends (ex. using vLLM to set Phi-3 locally)
|
|
240
|
+
- [Install vLLM](https://docs.vllm.ai/en/latest/getting_started/installation.html)
|
|
241
|
+
- After setting up vLLM, start an OpenAI compatible server for example by
|
|
242
|
+
```bash
|
|
243
|
+
python -m vllm.entrypoints.openai.api_server --model microsoft/Phi-3-mini-4k-instruct --api-key vllm --dtype bfloat16
|
|
244
|
+
```
|
|
245
|
+
- Create and run following script (more details please refer to this [example](https://github.com/camel-ai/camel/blob/master/examples/models/vllm_model_example.py))
|
|
246
|
+
```python
|
|
247
|
+
from camel.agents import ChatAgent
|
|
248
|
+
from camel.messages import BaseMessage
|
|
249
|
+
from camel.models import ModelFactory
|
|
250
|
+
from camel.types import ModelPlatformType
|
|
251
|
+
|
|
252
|
+
vllm_model = ModelFactory.create(
|
|
253
|
+
model_platform=ModelPlatformType.VLLM,
|
|
254
|
+
model_type="microsoft/Phi-3-mini-4k-instruct",
|
|
255
|
+
url="http://localhost:8000/v1",
|
|
256
|
+
model_config_dict={"temperature": 0.0},
|
|
257
|
+
api_key="vllm",
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
assistant_sys_msg = BaseMessage.make_assistant_message(
|
|
261
|
+
role_name="Assistant",
|
|
262
|
+
content="You are a helpful assistant.",
|
|
263
|
+
)
|
|
264
|
+
agent = ChatAgent(assistant_sys_msg, model=vllm_model, token_limit=4096)
|
|
265
|
+
|
|
266
|
+
user_msg = BaseMessage.make_user_message(
|
|
267
|
+
role_name="User",
|
|
268
|
+
content="Say hi to CAMEL AI",
|
|
269
|
+
)
|
|
270
|
+
assistant_response = agent.step(user_msg)
|
|
271
|
+
print(assistant_response.msg.content)
|
|
272
|
+
```
|
|
273
|
+
|
|
239
274
|
## Data (Hosted on Hugging Face)
|
|
240
275
|
| Dataset | Chat format | Instruction format | Chat format (translated) |
|
|
241
276
|
|----------------|-----------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
|
|
@@ -15,8 +15,18 @@ from __future__ import annotations
|
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
17
|
from collections import defaultdict
|
|
18
|
-
from
|
|
19
|
-
|
|
18
|
+
from typing import (
|
|
19
|
+
TYPE_CHECKING,
|
|
20
|
+
Any,
|
|
21
|
+
Callable,
|
|
22
|
+
Dict,
|
|
23
|
+
List,
|
|
24
|
+
Optional,
|
|
25
|
+
Tuple,
|
|
26
|
+
Union,
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
from pydantic import BaseModel
|
|
20
30
|
|
|
21
31
|
from camel.agents.base import BaseAgent
|
|
22
32
|
from camel.configs import ChatGPTConfig
|
|
@@ -37,17 +47,33 @@ from camel.types import (
|
|
|
37
47
|
OpenAIBackendRole,
|
|
38
48
|
RoleType,
|
|
39
49
|
)
|
|
40
|
-
from camel.utils import
|
|
50
|
+
from camel.utils import (
|
|
51
|
+
Constants,
|
|
52
|
+
func_string_to_callable,
|
|
53
|
+
get_model_encoding,
|
|
54
|
+
get_pydantic_object_schema,
|
|
55
|
+
json_to_function_code,
|
|
56
|
+
)
|
|
41
57
|
|
|
42
58
|
if TYPE_CHECKING:
|
|
43
59
|
from openai import Stream
|
|
44
60
|
|
|
45
|
-
from camel.functions import OpenAIFunction
|
|
46
61
|
from camel.terminators import ResponseTerminator
|
|
62
|
+
from camel.toolkits import OpenAIFunction
|
|
63
|
+
|
|
64
|
+
# AgentOps decorator setting
|
|
65
|
+
try:
|
|
66
|
+
from agentops import track_agent
|
|
67
|
+
except ImportError:
|
|
47
68
|
|
|
69
|
+
def track_agent():
|
|
70
|
+
def noop(f):
|
|
71
|
+
return f
|
|
48
72
|
|
|
49
|
-
|
|
50
|
-
|
|
73
|
+
return noop
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class FunctionCallingRecord(BaseModel):
|
|
51
77
|
r"""Historical records of functions called in the conversation.
|
|
52
78
|
|
|
53
79
|
Attributes:
|
|
@@ -67,14 +93,17 @@ class FunctionCallingRecord:
|
|
|
67
93
|
Returns:
|
|
68
94
|
str: Modified string to represent the function calling.
|
|
69
95
|
"""
|
|
70
|
-
|
|
71
96
|
return (
|
|
72
97
|
f"Function Execution: {self.func_name}\n"
|
|
73
98
|
f"\tArgs: {self.args}\n"
|
|
74
99
|
f"\tResult: {self.result}"
|
|
75
100
|
)
|
|
76
101
|
|
|
102
|
+
def as_dict(self) -> dict[str, Any]:
|
|
103
|
+
return self.model_dump()
|
|
77
104
|
|
|
105
|
+
|
|
106
|
+
@track_agent(name="ChatAgent")
|
|
78
107
|
class ChatAgent(BaseAgent):
|
|
79
108
|
r"""Class for managing conversations of CAMEL Chat Agents.
|
|
80
109
|
|
|
@@ -82,7 +111,7 @@ class ChatAgent(BaseAgent):
|
|
|
82
111
|
system_message (BaseMessage): The system message for the chat agent.
|
|
83
112
|
model (BaseModelBackend, optional): The model backend to use for
|
|
84
113
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
85
|
-
`
|
|
114
|
+
`GPT_4O_MINI`)
|
|
86
115
|
api_key (str, optional): The API key for authenticating with the
|
|
87
116
|
LLM service. Only OpenAI and Anthropic model supported (default:
|
|
88
117
|
:obj:`None`)
|
|
@@ -127,8 +156,8 @@ class ChatAgent(BaseAgent):
|
|
|
127
156
|
if model is not None
|
|
128
157
|
else ModelFactory.create(
|
|
129
158
|
model_platform=ModelPlatformType.OPENAI,
|
|
130
|
-
model_type=ModelType.
|
|
131
|
-
model_config_dict=ChatGPTConfig().
|
|
159
|
+
model_type=ModelType.GPT_4O_MINI,
|
|
160
|
+
model_config_dict=ChatGPTConfig().as_dict(),
|
|
132
161
|
api_key=self._api_key,
|
|
133
162
|
)
|
|
134
163
|
)
|
|
@@ -209,7 +238,9 @@ class ChatAgent(BaseAgent):
|
|
|
209
238
|
messages.
|
|
210
239
|
role (OpenAIBackendRole): The backend role type.
|
|
211
240
|
"""
|
|
212
|
-
self.memory.write_record(
|
|
241
|
+
self.memory.write_record(
|
|
242
|
+
MemoryRecord(message=message, role_at_backend=role)
|
|
243
|
+
)
|
|
213
244
|
|
|
214
245
|
def set_output_language(self, output_language: str) -> BaseMessage:
|
|
215
246
|
r"""Sets the output language for the system message. This method
|
|
@@ -268,7 +299,8 @@ class ChatAgent(BaseAgent):
|
|
|
268
299
|
message.
|
|
269
300
|
"""
|
|
270
301
|
system_record = MemoryRecord(
|
|
271
|
-
self.system_message,
|
|
302
|
+
message=self.system_message,
|
|
303
|
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
|
272
304
|
)
|
|
273
305
|
self.memory.clear()
|
|
274
306
|
self.memory.write_record(system_record)
|
|
@@ -287,6 +319,7 @@ class ChatAgent(BaseAgent):
|
|
|
287
319
|
def step(
|
|
288
320
|
self,
|
|
289
321
|
input_message: BaseMessage,
|
|
322
|
+
output_schema: Optional[BaseModel] = None,
|
|
290
323
|
) -> ChatAgentResponse:
|
|
291
324
|
r"""Performs a single step in the chat session by generating a response
|
|
292
325
|
to the input message.
|
|
@@ -297,6 +330,10 @@ class ChatAgent(BaseAgent):
|
|
|
297
330
|
either `user` or `assistant` but it will be set to `user`
|
|
298
331
|
anyway since for the self agent any incoming message is
|
|
299
332
|
external.
|
|
333
|
+
output_schema (Optional[BaseModel]): An optional pydantic model
|
|
334
|
+
that includes value types and field descriptions used to
|
|
335
|
+
generate a structured response by LLM. This schema helps
|
|
336
|
+
in defining the expected output format.
|
|
300
337
|
|
|
301
338
|
Returns:
|
|
302
339
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -310,7 +347,7 @@ class ChatAgent(BaseAgent):
|
|
|
310
347
|
tool_calls: List[FunctionCallingRecord] = []
|
|
311
348
|
while True:
|
|
312
349
|
# Format messages and get the token number
|
|
313
|
-
openai_messages:
|
|
350
|
+
openai_messages: Optional[List[OpenAIMessage]]
|
|
314
351
|
|
|
315
352
|
try:
|
|
316
353
|
openai_messages, num_tokens = self.memory.get_context()
|
|
@@ -318,6 +355,15 @@ class ChatAgent(BaseAgent):
|
|
|
318
355
|
return self.step_token_exceed(
|
|
319
356
|
e.args[1], tool_calls, "max_tokens_exceeded"
|
|
320
357
|
)
|
|
358
|
+
# use structed output response without tools
|
|
359
|
+
# If the user provides the output_schema parameter and does not
|
|
360
|
+
# specify the use of tools, then in the model config of the
|
|
361
|
+
# chatgent, call the model specified by tools with
|
|
362
|
+
# return_json_response of OpenAIFunction format, and return a
|
|
363
|
+
# structured response with the user-specified output schema.
|
|
364
|
+
if output_schema is not None and len(self.func_dict) == 0:
|
|
365
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
366
|
+
|
|
321
367
|
(
|
|
322
368
|
response,
|
|
323
369
|
output_messages,
|
|
@@ -333,9 +379,8 @@ class ChatAgent(BaseAgent):
|
|
|
333
379
|
):
|
|
334
380
|
# Tools added for function calling and not in stream mode
|
|
335
381
|
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
self.step_tool_call(response)
|
|
382
|
+
tool_calls, func_assistant_msg, func_result_msg = (
|
|
383
|
+
self._add_tools_for_func_call(response, tool_calls)
|
|
339
384
|
)
|
|
340
385
|
|
|
341
386
|
# Update the messages
|
|
@@ -344,11 +389,44 @@ class ChatAgent(BaseAgent):
|
|
|
344
389
|
)
|
|
345
390
|
self.update_memory(func_result_msg, OpenAIBackendRole.FUNCTION)
|
|
346
391
|
|
|
347
|
-
# Record the function calling
|
|
348
|
-
tool_calls.append(func_record)
|
|
349
|
-
|
|
350
392
|
else:
|
|
351
|
-
#
|
|
393
|
+
# If the user specifies tools, it is necessary to wait for the
|
|
394
|
+
# model to complete all tools' calls. Finally, use the
|
|
395
|
+
# generated response as the input for the structure,
|
|
396
|
+
# simultaneously calling the return_json_response function.
|
|
397
|
+
# Call the model again with return_json_response in the format
|
|
398
|
+
# of OpenAIFunction as the last tool, returning a structured
|
|
399
|
+
# response with the user-specified output schema.
|
|
400
|
+
if output_schema is not None and all(
|
|
401
|
+
record.func_name
|
|
402
|
+
!= Constants.FUNC_NAME_FOR_STRUCTURE_OUTPUT
|
|
403
|
+
for record in tool_calls
|
|
404
|
+
):
|
|
405
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
406
|
+
|
|
407
|
+
(
|
|
408
|
+
response,
|
|
409
|
+
output_messages,
|
|
410
|
+
finish_reasons,
|
|
411
|
+
usage_dict,
|
|
412
|
+
response_id,
|
|
413
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
414
|
+
|
|
415
|
+
if isinstance(response, ChatCompletion):
|
|
416
|
+
# Tools added for function calling and not in stream
|
|
417
|
+
# mode
|
|
418
|
+
tool_calls, func_assistant_msg, func_result_msg = (
|
|
419
|
+
self._add_tools_for_func_call(response, tool_calls)
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Update the messages
|
|
423
|
+
self.update_memory(
|
|
424
|
+
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
425
|
+
)
|
|
426
|
+
self.update_memory(
|
|
427
|
+
func_result_msg, OpenAIBackendRole.FUNCTION
|
|
428
|
+
)
|
|
429
|
+
|
|
352
430
|
info = self._step_get_info(
|
|
353
431
|
output_messages,
|
|
354
432
|
finish_reasons,
|
|
@@ -359,20 +437,34 @@ class ChatAgent(BaseAgent):
|
|
|
359
437
|
)
|
|
360
438
|
break
|
|
361
439
|
|
|
362
|
-
|
|
440
|
+
# if use structure response, set structure result as content of
|
|
441
|
+
# BaseMessage
|
|
442
|
+
if output_schema and self.model_type.is_openai:
|
|
443
|
+
for base_message_item in output_messages:
|
|
444
|
+
base_message_item.content = str(info['tool_calls'][-1].result)
|
|
445
|
+
|
|
446
|
+
return ChatAgentResponse(
|
|
447
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
448
|
+
)
|
|
363
449
|
|
|
364
450
|
async def step_async(
|
|
365
451
|
self,
|
|
366
452
|
input_message: BaseMessage,
|
|
453
|
+
output_schema: Optional[BaseModel] = None,
|
|
367
454
|
) -> ChatAgentResponse:
|
|
368
455
|
r"""Performs a single step in the chat session by generating a response
|
|
369
456
|
to the input message. This agent step can call async function calls.
|
|
370
457
|
|
|
371
458
|
Args:
|
|
372
459
|
input_message (BaseMessage): The input message to the agent.
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
460
|
+
Its `role` field that specifies the role at backend may be
|
|
461
|
+
either `user` or `assistant` but it will be set to `user`
|
|
462
|
+
anyway since for the self agent any incoming message is
|
|
463
|
+
external.
|
|
464
|
+
output_schema (Optional[BaseModel]): An optional pydantic model
|
|
465
|
+
that includes value types and field descriptions used to
|
|
466
|
+
generate a structured response by LLM. This schema helps
|
|
467
|
+
in defining the expected output format.
|
|
376
468
|
|
|
377
469
|
Returns:
|
|
378
470
|
ChatAgentResponse: A struct containing the output messages,
|
|
@@ -386,7 +478,7 @@ class ChatAgent(BaseAgent):
|
|
|
386
478
|
tool_calls: List[FunctionCallingRecord] = []
|
|
387
479
|
while True:
|
|
388
480
|
# Format messages and get the token number
|
|
389
|
-
openai_messages:
|
|
481
|
+
openai_messages: Optional[List[OpenAIMessage]]
|
|
390
482
|
|
|
391
483
|
try:
|
|
392
484
|
openai_messages, num_tokens = self.memory.get_context()
|
|
@@ -394,6 +486,9 @@ class ChatAgent(BaseAgent):
|
|
|
394
486
|
return self.step_token_exceed(
|
|
395
487
|
e.args[1], tool_calls, "max_tokens_exceeded"
|
|
396
488
|
)
|
|
489
|
+
if output_schema is not None:
|
|
490
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
491
|
+
|
|
397
492
|
(
|
|
398
493
|
response,
|
|
399
494
|
output_messages,
|
|
@@ -426,6 +521,37 @@ class ChatAgent(BaseAgent):
|
|
|
426
521
|
tool_calls.append(func_record)
|
|
427
522
|
|
|
428
523
|
else:
|
|
524
|
+
# use structed output response without tools
|
|
525
|
+
if output_schema is not None and all(
|
|
526
|
+
record.func_name
|
|
527
|
+
!= Constants.FUNC_NAME_FOR_STRUCTURE_OUTPUT
|
|
528
|
+
for record in tool_calls
|
|
529
|
+
):
|
|
530
|
+
self._add_output_schema_to_tool_list(output_schema)
|
|
531
|
+
|
|
532
|
+
(
|
|
533
|
+
response,
|
|
534
|
+
output_messages,
|
|
535
|
+
finish_reasons,
|
|
536
|
+
usage_dict,
|
|
537
|
+
response_id,
|
|
538
|
+
) = self._step_model_response(openai_messages, num_tokens)
|
|
539
|
+
|
|
540
|
+
if isinstance(response, ChatCompletion):
|
|
541
|
+
# Tools added for function calling and not in stream
|
|
542
|
+
# mode
|
|
543
|
+
tool_calls, func_assistant_msg, func_result_msg = (
|
|
544
|
+
self._add_tools_for_func_call(response, tool_calls)
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
# Update the messages
|
|
548
|
+
self.update_memory(
|
|
549
|
+
func_assistant_msg, OpenAIBackendRole.ASSISTANT
|
|
550
|
+
)
|
|
551
|
+
self.update_memory(
|
|
552
|
+
func_result_msg, OpenAIBackendRole.FUNCTION
|
|
553
|
+
)
|
|
554
|
+
|
|
429
555
|
# Function calling disabled or not a function calling
|
|
430
556
|
info = self._step_get_info(
|
|
431
557
|
output_messages,
|
|
@@ -437,17 +563,100 @@ class ChatAgent(BaseAgent):
|
|
|
437
563
|
)
|
|
438
564
|
break
|
|
439
565
|
|
|
440
|
-
|
|
566
|
+
# if use structure response, set structure result as content of
|
|
567
|
+
# BaseMessage
|
|
568
|
+
if output_schema and self.model_type.is_openai:
|
|
569
|
+
for base_message_item in output_messages:
|
|
570
|
+
base_message_item.content = str(info['tool_calls'][0].result)
|
|
571
|
+
|
|
572
|
+
return ChatAgentResponse(
|
|
573
|
+
msgs=output_messages, terminated=self.terminated, info=info
|
|
574
|
+
)
|
|
575
|
+
|
|
576
|
+
def _add_tools_for_func_call(
|
|
577
|
+
self,
|
|
578
|
+
response: ChatCompletion,
|
|
579
|
+
tool_calls: List[FunctionCallingRecord],
|
|
580
|
+
) -> tuple[
|
|
581
|
+
List[FunctionCallingRecord],
|
|
582
|
+
FunctionCallingMessage,
|
|
583
|
+
FunctionCallingMessage,
|
|
584
|
+
]:
|
|
585
|
+
r"""
|
|
586
|
+
Handles adding tools for function calls based on the response.
|
|
587
|
+
This method processes a function call within the chat completion
|
|
588
|
+
response, and records the function call in the provided
|
|
589
|
+
list of tool calls.
|
|
590
|
+
Args:
|
|
591
|
+
response (ChatCompletion): The response object from the chat
|
|
592
|
+
completion.
|
|
593
|
+
tool_calls (List[FunctionCallingRecord]): The list to record
|
|
594
|
+
function calls.
|
|
595
|
+
Returns:
|
|
596
|
+
tuple: A tuple containing:
|
|
597
|
+
- List[FunctionCallingRecord]: The updated list of function
|
|
598
|
+
call records.
|
|
599
|
+
- FunctionCallingMessage: The assistant's message regarding the
|
|
600
|
+
function call.
|
|
601
|
+
- FunctionCallingMessage: The result message of the function
|
|
602
|
+
call.
|
|
603
|
+
"""
|
|
604
|
+
|
|
605
|
+
# Perform function calling
|
|
606
|
+
func_assistant_msg, func_result_msg, func_record = self.step_tool_call(
|
|
607
|
+
response
|
|
608
|
+
)
|
|
609
|
+
|
|
610
|
+
# Record the function call in the list of tool calls
|
|
611
|
+
tool_calls.append(func_record)
|
|
612
|
+
|
|
613
|
+
# Return updated tool calls list, assistant's message, and function
|
|
614
|
+
# result message
|
|
615
|
+
return tool_calls, func_assistant_msg, func_result_msg
|
|
616
|
+
|
|
617
|
+
def _add_output_schema_to_tool_list(self, output_schema: BaseModel):
|
|
618
|
+
r"""Handles the structured output response for OpenAI.
|
|
619
|
+
This method processes the given output schema and integrates the
|
|
620
|
+
resulting function into the tools for the OpenAI model configuration.
|
|
621
|
+
Args:
|
|
622
|
+
output_schema (BaseModel): The schema representing the expected
|
|
623
|
+
output structure.
|
|
624
|
+
"""
|
|
625
|
+
from camel.toolkits import OpenAIFunction
|
|
626
|
+
|
|
627
|
+
# step 1 extract the output_schema info as json.
|
|
628
|
+
schema_json = get_pydantic_object_schema(output_schema)
|
|
629
|
+
|
|
630
|
+
# step 2 convert output schema json as callable string
|
|
631
|
+
func_str = json_to_function_code(schema_json)
|
|
632
|
+
|
|
633
|
+
# step 3 get callable function from string
|
|
634
|
+
func_callable = func_string_to_callable(func_str)
|
|
635
|
+
|
|
636
|
+
# step 4 add return_json_func into tools
|
|
637
|
+
func = OpenAIFunction(func_callable)
|
|
638
|
+
tools = [func]
|
|
639
|
+
self.func_dict[func.get_function_name()] = func.func
|
|
640
|
+
if self.model_type.is_openai:
|
|
641
|
+
self.model_backend.model_config_dict = ChatGPTConfig(
|
|
642
|
+
tools=tools
|
|
643
|
+
).as_dict()
|
|
644
|
+
elif self.model_type.is_gemini:
|
|
645
|
+
from camel.configs.gemini_config import GeminiConfig
|
|
646
|
+
|
|
647
|
+
self.model_backend.model_config_dict = GeminiConfig(
|
|
648
|
+
tools=tools
|
|
649
|
+
).as_dict()
|
|
441
650
|
|
|
442
651
|
def _step_model_response(
|
|
443
652
|
self,
|
|
444
|
-
openai_messages:
|
|
653
|
+
openai_messages: List[OpenAIMessage],
|
|
445
654
|
num_tokens: int,
|
|
446
655
|
) -> tuple[
|
|
447
|
-
ChatCompletion
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
656
|
+
Union[ChatCompletion, Stream],
|
|
657
|
+
List[BaseMessage],
|
|
658
|
+
List[str],
|
|
659
|
+
Dict[str, int],
|
|
451
660
|
str,
|
|
452
661
|
]:
|
|
453
662
|
r"""Internal function for agent step model response."""
|
|
@@ -617,9 +826,9 @@ class ChatAgent(BaseAgent):
|
|
|
617
826
|
)
|
|
618
827
|
|
|
619
828
|
return ChatAgentResponse(
|
|
620
|
-
output_messages,
|
|
621
|
-
self.terminated,
|
|
622
|
-
info,
|
|
829
|
+
msgs=output_messages,
|
|
830
|
+
terminated=self.terminated,
|
|
831
|
+
info=info,
|
|
623
832
|
)
|
|
624
833
|
|
|
625
834
|
def step_tool_call(
|
|
@@ -676,7 +885,9 @@ class ChatAgent(BaseAgent):
|
|
|
676
885
|
)
|
|
677
886
|
|
|
678
887
|
# Record information about this function call
|
|
679
|
-
func_record = FunctionCallingRecord(
|
|
888
|
+
func_record = FunctionCallingRecord(
|
|
889
|
+
func_name=func_name, args=args, result=result
|
|
890
|
+
)
|
|
680
891
|
return assist_msg, func_msg, func_record
|
|
681
892
|
|
|
682
893
|
async def step_tool_call_async(
|
|
@@ -735,7 +946,9 @@ class ChatAgent(BaseAgent):
|
|
|
735
946
|
)
|
|
736
947
|
|
|
737
948
|
# Record information about this function call
|
|
738
|
-
func_record = FunctionCallingRecord(
|
|
949
|
+
func_record = FunctionCallingRecord(
|
|
950
|
+
func_name=func_name, args=args, result=result
|
|
951
|
+
)
|
|
739
952
|
return assist_msg, func_msg, func_record
|
|
740
953
|
|
|
741
954
|
def get_usage_dict(
|
|
@@ -24,7 +24,19 @@ from camel.models import BaseModelBackend
|
|
|
24
24
|
from camel.responses import ChatAgentResponse
|
|
25
25
|
from camel.utils import get_first_int, print_text_animated
|
|
26
26
|
|
|
27
|
+
# AgentOps decorator setting
|
|
28
|
+
try:
|
|
29
|
+
from agentops import track_agent
|
|
30
|
+
except ImportError:
|
|
27
31
|
|
|
32
|
+
def track_agent():
|
|
33
|
+
def noop(f):
|
|
34
|
+
return f
|
|
35
|
+
|
|
36
|
+
return noop
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@track_agent(name="CriticAgent")
|
|
28
40
|
class CriticAgent(ChatAgent):
|
|
29
41
|
r"""A class for the critic agent that assists in selecting an option.
|
|
30
42
|
|
|
@@ -33,7 +45,7 @@ class CriticAgent(ChatAgent):
|
|
|
33
45
|
agent.
|
|
34
46
|
model (BaseModelBackend, optional): The model backend to use for
|
|
35
47
|
generating responses. (default: :obj:`OpenAIModel` with
|
|
36
|
-
`
|
|
48
|
+
`GPT_4O_MINI`)
|
|
37
49
|
message_window_size (int, optional): The maximum number of previous
|
|
38
50
|
messages to include in the context window. If `None`, no windowing
|
|
39
51
|
is performed. (default: :obj:`6`)
|
|
@@ -184,4 +196,8 @@ class CriticAgent(ChatAgent):
|
|
|
184
196
|
output_msg = meta_chat_message.create_new_instance(option)
|
|
185
197
|
|
|
186
198
|
# TODO: The return `info` can be improved.
|
|
187
|
-
return ChatAgentResponse(
|
|
199
|
+
return ChatAgentResponse(
|
|
200
|
+
msgs=[output_msg],
|
|
201
|
+
terminated=False,
|
|
202
|
+
info={},
|
|
203
|
+
)
|