camel-ai 0.1.6.7__tar.gz → 0.1.6.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/PKG-INFO +19 -10
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/README.md +15 -6
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/__init__.py +1 -1
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/chat_agent.py +44 -3
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/critic_agent.py +0 -1
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/__init__.py +3 -0
- camel_ai-0.1.6.9/camel/configs/reka_config.py +74 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/__init__.py +2 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/model_factory.py +3 -0
- camel_ai-0.1.6.9/camel/models/reka_model.py +232 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/retrievers/auto_retriever.py +4 -1
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/retrievers/vector_retriever.py +6 -1
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/societies/babyagi_playing.py +0 -3
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/societies/role_playing.py +18 -2
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/object_storages/amazon_s3.py +12 -10
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/types/enums.py +33 -1
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/pyproject.toml +9 -6
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/knowledge_graph_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/task_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/base_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/gemini_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/groq_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/litellm_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/mistral_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/ollama_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/openai_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/samba_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/togetherai_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/vllm_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/configs/zhipuai_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/embeddings/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/embeddings/mistral_embedding.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/generators.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/human.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/ipython_interpreter.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/loaders/firecrawl_reader.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/memories/records.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/messages/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/messages/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/messages/func_message.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/anthropic_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/azure_openai_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/base_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/gemini_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/groq_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/litellm_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/mistral_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/ollama_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/open_source_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/openai_compatibility_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/openai_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/samba_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/stub_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/togetherai_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/vllm_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/models/zhipuai_model.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/code.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/image_craft.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/multi_condition_image_craft.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/translation.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/prompts/video_description_prompt.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/responses/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/retrievers/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/societies/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/object_storages/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/object_storages/azure_blob.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/object_storages/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/object_storages/google_cloud.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/tasks/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/tasks/task.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/tasks/task_prompt.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/terminators/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/code_execution.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/dalle_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/github_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/google_maps_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/linkedin_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/math_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/security_config.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/open_api_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/openai_function.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/retrieval_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/search_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/slack_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/twitter_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/toolkits/weather_toolkit.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/types/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/types/openai_types.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/utils/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/utils/async_func.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/utils/commons.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/utils/constants.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/utils/token_counting.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/__init__.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/base.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/manager_node.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/role_playing_node.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/single_agent_node.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/task_channel.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/utils.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/worker_node.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/workforce.py +0 -0
- {camel_ai-0.1.6.7 → camel_ai-0.1.6.9}/camel/workforce/workforce_prompt.py +0 -0
|
@@ -1,15 +1,14 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.6.
|
|
3
|
+
Version: 0.1.6.9
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
7
7
|
Keywords: communicative-ai,ai-societies,artificial-intelligence,deep-learning,multi-agent-systems,cooperative-ai,natural-language-processing,large-language-models
|
|
8
8
|
Author: CAMEL-AI.org
|
|
9
|
-
Requires-Python: >=3.
|
|
9
|
+
Requires-Python: >=3.10.0,<3.12
|
|
10
10
|
Classifier: License :: OSI Approved :: Apache Software License
|
|
11
11
|
Classifier: Programming Language :: Python :: 3
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
13
12
|
Classifier: Programming Language :: Python :: 3.10
|
|
14
13
|
Classifier: Programming Language :: Python :: 3.11
|
|
15
14
|
Provides-Extra: all
|
|
@@ -29,7 +28,7 @@ Requires-Dist: agentops (>=0.3.6,<0.4.0) ; extra == "tools" or extra == "all"
|
|
|
29
28
|
Requires-Dist: anthropic (>=0.29.0,<0.30.0)
|
|
30
29
|
Requires-Dist: azure-storage-blob (>=12.21.0,<13.0.0) ; extra == "object-storages" or extra == "all"
|
|
31
30
|
Requires-Dist: beautifulsoup4 (>=4,<5) ; extra == "tools" or extra == "all"
|
|
32
|
-
Requires-Dist:
|
|
31
|
+
Requires-Dist: botocore (>=1.35.3,<2.0.0) ; extra == "object-storages" or extra == "all"
|
|
33
32
|
Requires-Dist: cohere (>=4.56,<5.0) ; extra == "retrievers" or extra == "all"
|
|
34
33
|
Requires-Dist: colorama (>=0,<1)
|
|
35
34
|
Requires-Dist: curl_cffi (==0.6.2)
|
|
@@ -75,6 +74,7 @@ Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
|
|
|
75
74
|
Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
|
|
76
75
|
Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
|
|
77
76
|
Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
|
|
77
|
+
Requires-Dist: reka-api (>=3.0.8,<4.0.0) ; extra == "model-platforms" or extra == "all"
|
|
78
78
|
Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
79
79
|
Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
|
|
80
80
|
Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
@@ -164,7 +164,7 @@ Some features require extra dependencies:
|
|
|
164
164
|
|
|
165
165
|
Install `CAMEL` from source with poetry (Recommended):
|
|
166
166
|
```sh
|
|
167
|
-
# Make sure your python version is later than 3.
|
|
167
|
+
# Make sure your python version is later than 3.10
|
|
168
168
|
# You can use pyenv to manage multiple python verisons in your sytstem
|
|
169
169
|
|
|
170
170
|
# Clone github repo
|
|
@@ -187,22 +187,31 @@ poetry shell
|
|
|
187
187
|
poetry install
|
|
188
188
|
|
|
189
189
|
# Install CAMEL with all dependencies
|
|
190
|
-
poetry install -E all
|
|
190
|
+
poetry install -E all # (Optional)
|
|
191
191
|
|
|
192
192
|
# Exit the virtual environment
|
|
193
193
|
exit
|
|
194
194
|
```
|
|
195
195
|
|
|
196
|
+
> [!TIP]
|
|
197
|
+
> If you encounter errors when running `poetry install`, it may be due to a cache-related problem. You can try running:
|
|
198
|
+
> ```sh
|
|
199
|
+
> poetry install --no-cache
|
|
200
|
+
> ```
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
|
|
196
205
|
Install `CAMEL` from source with conda and pip:
|
|
197
206
|
```sh
|
|
198
207
|
# Create a conda virtual environment
|
|
199
|
-
conda create --name camel python=3.
|
|
208
|
+
conda create --name camel python=3.10
|
|
200
209
|
|
|
201
210
|
# Activate CAMEL conda environment
|
|
202
211
|
conda activate camel
|
|
203
212
|
|
|
204
213
|
# Clone github repo
|
|
205
|
-
git clone -b v0.1.6.
|
|
214
|
+
git clone -b v0.1.6.9 https://github.com/camel-ai/camel.git
|
|
206
215
|
|
|
207
216
|
# Change directory into project directory
|
|
208
217
|
cd camel
|
|
@@ -415,8 +424,8 @@ We appreciate your interest in contributing to our open-source initiative. We pr
|
|
|
415
424
|
## Contact
|
|
416
425
|
For more information please contact camel.ai.team@gmail.com.
|
|
417
426
|
|
|
418
|
-
[python-image]: https://img.shields.io/badge/Python-3.
|
|
419
|
-
[python-url]: https://docs.python.org/3.
|
|
427
|
+
[python-image]: https://img.shields.io/badge/Python-3.10%2B-brightgreen.svg
|
|
428
|
+
[python-url]: https://docs.python.org/3.10/
|
|
420
429
|
[pytest-image]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml/badge.svg
|
|
421
430
|
[pytest-url]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml
|
|
422
431
|
[docs-image]: https://img.shields.io/badge/Documentation-grey.svg?logo=github
|
|
@@ -72,7 +72,7 @@ Some features require extra dependencies:
|
|
|
72
72
|
|
|
73
73
|
Install `CAMEL` from source with poetry (Recommended):
|
|
74
74
|
```sh
|
|
75
|
-
# Make sure your python version is later than 3.
|
|
75
|
+
# Make sure your python version is later than 3.10
|
|
76
76
|
# You can use pyenv to manage multiple python verisons in your sytstem
|
|
77
77
|
|
|
78
78
|
# Clone github repo
|
|
@@ -95,22 +95,31 @@ poetry shell
|
|
|
95
95
|
poetry install
|
|
96
96
|
|
|
97
97
|
# Install CAMEL with all dependencies
|
|
98
|
-
poetry install -E all
|
|
98
|
+
poetry install -E all # (Optional)
|
|
99
99
|
|
|
100
100
|
# Exit the virtual environment
|
|
101
101
|
exit
|
|
102
102
|
```
|
|
103
103
|
|
|
104
|
+
> [!TIP]
|
|
105
|
+
> If you encounter errors when running `poetry install`, it may be due to a cache-related problem. You can try running:
|
|
106
|
+
> ```sh
|
|
107
|
+
> poetry install --no-cache
|
|
108
|
+
> ```
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
|
|
104
113
|
Install `CAMEL` from source with conda and pip:
|
|
105
114
|
```sh
|
|
106
115
|
# Create a conda virtual environment
|
|
107
|
-
conda create --name camel python=3.
|
|
116
|
+
conda create --name camel python=3.10
|
|
108
117
|
|
|
109
118
|
# Activate CAMEL conda environment
|
|
110
119
|
conda activate camel
|
|
111
120
|
|
|
112
121
|
# Clone github repo
|
|
113
|
-
git clone -b v0.1.6.
|
|
122
|
+
git clone -b v0.1.6.9 https://github.com/camel-ai/camel.git
|
|
114
123
|
|
|
115
124
|
# Change directory into project directory
|
|
116
125
|
cd camel
|
|
@@ -323,8 +332,8 @@ We appreciate your interest in contributing to our open-source initiative. We pr
|
|
|
323
332
|
## Contact
|
|
324
333
|
For more information please contact camel.ai.team@gmail.com.
|
|
325
334
|
|
|
326
|
-
[python-image]: https://img.shields.io/badge/Python-3.
|
|
327
|
-
[python-url]: https://docs.python.org/3.
|
|
335
|
+
[python-image]: https://img.shields.io/badge/Python-3.10%2B-brightgreen.svg
|
|
336
|
+
[python-url]: https://docs.python.org/3.10/
|
|
328
337
|
[pytest-image]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml/badge.svg
|
|
329
338
|
[pytest-url]: https://github.com/camel-ai/camel/actions/workflows/pytest_package.yml
|
|
330
339
|
[docs-image]: https://img.shields.io/badge/Documentation-grey.svg?logo=github
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
16
|
import json
|
|
17
|
+
import logging
|
|
17
18
|
from collections import defaultdict
|
|
18
19
|
from typing import (
|
|
19
20
|
TYPE_CHECKING,
|
|
@@ -61,6 +62,9 @@ if TYPE_CHECKING:
|
|
|
61
62
|
from camel.terminators import ResponseTerminator
|
|
62
63
|
from camel.toolkits import OpenAIFunction
|
|
63
64
|
|
|
65
|
+
|
|
66
|
+
logger = logging.getLogger(__name__)
|
|
67
|
+
|
|
64
68
|
# AgentOps decorator setting
|
|
65
69
|
try:
|
|
66
70
|
import os
|
|
@@ -437,10 +441,22 @@ class ChatAgent(BaseAgent):
|
|
|
437
441
|
for base_message_item in output_messages:
|
|
438
442
|
base_message_item.content = str(info['tool_calls'][-1].result)
|
|
439
443
|
|
|
440
|
-
|
|
444
|
+
chat_agent_response = ChatAgentResponse(
|
|
441
445
|
msgs=output_messages, terminated=self.terminated, info=info
|
|
442
446
|
)
|
|
443
447
|
|
|
448
|
+
# If the output result is single message, it will be
|
|
449
|
+
# automatically added to the memory.
|
|
450
|
+
if len(chat_agent_response.msgs) == 1:
|
|
451
|
+
self.record_message(chat_agent_response.msg)
|
|
452
|
+
else:
|
|
453
|
+
logger.warning(
|
|
454
|
+
"Multiple messages are available in `ChatAgentResponse`. "
|
|
455
|
+
"Please manually run the `record_message` function to "
|
|
456
|
+
"record the selected message."
|
|
457
|
+
)
|
|
458
|
+
return chat_agent_response
|
|
459
|
+
|
|
444
460
|
async def step_async(
|
|
445
461
|
self,
|
|
446
462
|
input_message: BaseMessage,
|
|
@@ -563,10 +579,23 @@ class ChatAgent(BaseAgent):
|
|
|
563
579
|
for base_message_item in output_messages:
|
|
564
580
|
base_message_item.content = str(info['tool_calls'][0].result)
|
|
565
581
|
|
|
566
|
-
|
|
582
|
+
chat_agent_response = ChatAgentResponse(
|
|
567
583
|
msgs=output_messages, terminated=self.terminated, info=info
|
|
568
584
|
)
|
|
569
585
|
|
|
586
|
+
# If the output result is single message, it will be
|
|
587
|
+
# automatically added to the memory.
|
|
588
|
+
if len(chat_agent_response.msgs) == 1:
|
|
589
|
+
self.record_message(chat_agent_response.msg)
|
|
590
|
+
else:
|
|
591
|
+
logger.warning(
|
|
592
|
+
"Multiple messages are presented in `chat_agent_response`. "
|
|
593
|
+
"Please manually call the `record_message` function to "
|
|
594
|
+
"record the chosen message."
|
|
595
|
+
)
|
|
596
|
+
|
|
597
|
+
return chat_agent_response
|
|
598
|
+
|
|
570
599
|
def _add_tools_for_func_call(
|
|
571
600
|
self,
|
|
572
601
|
response: ChatCompletion,
|
|
@@ -736,7 +765,9 @@ class ChatAgent(BaseAgent):
|
|
|
736
765
|
str(choice.finish_reason) for choice in response.choices
|
|
737
766
|
]
|
|
738
767
|
usage = (
|
|
739
|
-
|
|
768
|
+
self._safe_model_dump(response.usage)
|
|
769
|
+
if response.usage is not None
|
|
770
|
+
else {}
|
|
740
771
|
)
|
|
741
772
|
return (
|
|
742
773
|
output_messages,
|
|
@@ -745,6 +776,16 @@ class ChatAgent(BaseAgent):
|
|
|
745
776
|
response.id,
|
|
746
777
|
)
|
|
747
778
|
|
|
779
|
+
def _safe_model_dump(self, obj):
|
|
780
|
+
# Check if the `model_dump` method exists (Pydantic v2)
|
|
781
|
+
if hasattr(obj, 'model_dump'):
|
|
782
|
+
return obj.model_dump()
|
|
783
|
+
# Fallback to `dict()` method (Pydantic v1)
|
|
784
|
+
elif hasattr(obj, 'dict'):
|
|
785
|
+
return obj.dict()
|
|
786
|
+
else:
|
|
787
|
+
raise TypeError("The object is not a Pydantic model")
|
|
788
|
+
|
|
748
789
|
def handle_stream_response(
|
|
749
790
|
self,
|
|
750
791
|
response: Stream[ChatCompletionChunk],
|
|
@@ -19,6 +19,7 @@ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
|
19
19
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
20
20
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
21
21
|
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig, OpenSourceConfig
|
|
22
|
+
from .reka_config import REKA_API_PARAMS, RekaConfig
|
|
22
23
|
from .samba_config import SAMBA_API_PARAMS, SambaConfig
|
|
23
24
|
from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
|
|
24
25
|
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
|
|
@@ -45,6 +46,8 @@ __all__ = [
|
|
|
45
46
|
'VLLM_API_PARAMS',
|
|
46
47
|
'MistralConfig',
|
|
47
48
|
'MISTRAL_API_PARAMS',
|
|
49
|
+
'RekaConfig',
|
|
50
|
+
'REKA_API_PARAMS',
|
|
48
51
|
'SambaConfig',
|
|
49
52
|
'SAMBA_API_PARAMS',
|
|
50
53
|
'TogetherAIConfig',
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Any, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class RekaConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using the
|
|
23
|
+
Reka API.
|
|
24
|
+
|
|
25
|
+
Reference: https://docs.reka.ai/api-reference/chat/create
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (Optional[float], optional): temperature the temperature
|
|
29
|
+
to use for sampling, e.g. 0.5.
|
|
30
|
+
top_p (Optional[float], optional): the cumulative probability of
|
|
31
|
+
tokens to generate, e.g. 0.9. Defaults to None.
|
|
32
|
+
top_k (Optional[int], optional): Parameter which forces the model to
|
|
33
|
+
only consider the tokens with the `top_k` highest probabilities at
|
|
34
|
+
the next step. Defaults to 1024.
|
|
35
|
+
max_tokens (Optional[int], optional): the maximum number of tokens to
|
|
36
|
+
generate, e.g. 100. Defaults to None.
|
|
37
|
+
stop (Optional[Union[str,list[str]]]): Stop generation if this token
|
|
38
|
+
is detected. Or if one of these tokens is detected when providing
|
|
39
|
+
a string list.
|
|
40
|
+
seed (Optional[int], optional): the random seed to use for sampling, e.
|
|
41
|
+
g. 42. Defaults to None.
|
|
42
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
43
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
44
|
+
they appear in the text so far, increasing the model's likelihood
|
|
45
|
+
to talk about new topics. See more information about frequency and
|
|
46
|
+
presence penalties. (default: :obj:`0.0`)
|
|
47
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
48
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
49
|
+
existing frequency in the text so far, decreasing the model's
|
|
50
|
+
likelihood to repeat the same line verbatim. See more information
|
|
51
|
+
about frequency and presence penalties. (default: :obj:`0.0`)
|
|
52
|
+
use_search_engine (Optional[bool]): Whether to consider using search
|
|
53
|
+
engine to complete the request. Note that even if this is set to
|
|
54
|
+
`True`, the model might decide to not use search.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
temperature: Optional[float] = None
|
|
58
|
+
top_p: Optional[float] = None
|
|
59
|
+
top_k: Optional[int] = None
|
|
60
|
+
max_tokens: Optional[int] = None
|
|
61
|
+
stop: Optional[Union[str, list[str]]] = None
|
|
62
|
+
seed: Optional[int] = None
|
|
63
|
+
frequency_penalty: float = 0.0
|
|
64
|
+
presence_penalty: float = 0.0
|
|
65
|
+
use_search_engine: Optional[bool] = False
|
|
66
|
+
|
|
67
|
+
def as_dict(self) -> dict[str, Any]:
|
|
68
|
+
config_dict = super().as_dict()
|
|
69
|
+
if "tools" in config_dict:
|
|
70
|
+
del config_dict["tools"] # Reka does not support tool calling
|
|
71
|
+
return config_dict
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
REKA_API_PARAMS = {param for param in RekaConfig().model_fields.keys()}
|
|
@@ -25,6 +25,7 @@ from .open_source_model import OpenSourceModel
|
|
|
25
25
|
from .openai_audio_models import OpenAIAudioModels
|
|
26
26
|
from .openai_compatibility_model import OpenAICompatibilityModel
|
|
27
27
|
from .openai_model import OpenAIModel
|
|
28
|
+
from .reka_model import RekaModel
|
|
28
29
|
from .samba_model import SambaModel
|
|
29
30
|
from .stub_model import StubModel
|
|
30
31
|
from .togetherai_model import TogetherAIModel
|
|
@@ -49,6 +50,7 @@ __all__ = [
|
|
|
49
50
|
'VLLMModel',
|
|
50
51
|
'GeminiModel',
|
|
51
52
|
'OpenAICompatibilityModel',
|
|
53
|
+
'RekaModel',
|
|
52
54
|
'SambaModel',
|
|
53
55
|
'TogetherAIModel',
|
|
54
56
|
]
|
|
@@ -24,6 +24,7 @@ from camel.models.ollama_model import OllamaModel
|
|
|
24
24
|
from camel.models.open_source_model import OpenSourceModel
|
|
25
25
|
from camel.models.openai_compatibility_model import OpenAICompatibilityModel
|
|
26
26
|
from camel.models.openai_model import OpenAIModel
|
|
27
|
+
from camel.models.reka_model import RekaModel
|
|
27
28
|
from camel.models.samba_model import SambaModel
|
|
28
29
|
from camel.models.stub_model import StubModel
|
|
29
30
|
from camel.models.togetherai_model import TogetherAIModel
|
|
@@ -93,6 +94,8 @@ class ModelFactory:
|
|
|
93
94
|
model_class = GeminiModel
|
|
94
95
|
elif model_platform.is_mistral and model_type.is_mistral:
|
|
95
96
|
model_class = MistralModel
|
|
97
|
+
elif model_platform.is_reka and model_type.is_reka:
|
|
98
|
+
model_class = RekaModel
|
|
96
99
|
elif model_platform.is_samba and model_type.is_samba:
|
|
97
100
|
model_class = SambaModel
|
|
98
101
|
elif model_type == ModelType.STUB:
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
16
|
+
|
|
17
|
+
from camel.configs import REKA_API_PARAMS
|
|
18
|
+
from camel.messages import OpenAIMessage
|
|
19
|
+
from camel.models import BaseModelBackend
|
|
20
|
+
from camel.types import ChatCompletion, ModelType
|
|
21
|
+
from camel.utils import (
|
|
22
|
+
BaseTokenCounter,
|
|
23
|
+
OpenAITokenCounter,
|
|
24
|
+
api_keys_required,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
if TYPE_CHECKING:
|
|
28
|
+
from reka.types import ChatMessage, ChatResponse
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
import os
|
|
32
|
+
|
|
33
|
+
if os.getenv("AGENTOPS_API_KEY") is not None:
|
|
34
|
+
from agentops import LLMEvent, record
|
|
35
|
+
else:
|
|
36
|
+
raise ImportError
|
|
37
|
+
except (ImportError, AttributeError):
|
|
38
|
+
LLMEvent = None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class RekaModel(BaseModelBackend):
|
|
42
|
+
r"""Reka API in a unified BaseModelBackend interface."""
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
model_type: ModelType,
|
|
47
|
+
model_config_dict: Dict[str, Any],
|
|
48
|
+
api_key: Optional[str] = None,
|
|
49
|
+
url: Optional[str] = None,
|
|
50
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
51
|
+
) -> None:
|
|
52
|
+
r"""Constructor for Reka backend.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
model_type (ModelType): Model for which a backend is created,
|
|
56
|
+
one of REKA_* series.
|
|
57
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
58
|
+
be fed into `Reka.chat.create`.
|
|
59
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
60
|
+
Reka service. (default: :obj:`None`)
|
|
61
|
+
url (Optional[str]): The url to the Reka service.
|
|
62
|
+
token_counter (Optional[BaseTokenCounter]): Token counter to use
|
|
63
|
+
for the model. If not provided, `OpenAITokenCounter` will be
|
|
64
|
+
used.
|
|
65
|
+
"""
|
|
66
|
+
super().__init__(
|
|
67
|
+
model_type, model_config_dict, api_key, url, token_counter
|
|
68
|
+
)
|
|
69
|
+
self._api_key = api_key or os.environ.get("REKA_API_KEY")
|
|
70
|
+
self._url = url or os.environ.get("REKA_SERVER_URL")
|
|
71
|
+
|
|
72
|
+
from reka.client import Reka
|
|
73
|
+
|
|
74
|
+
self._client = Reka(api_key=self._api_key, base_url=self._url)
|
|
75
|
+
self._token_counter: Optional[BaseTokenCounter] = None
|
|
76
|
+
|
|
77
|
+
def _convert_reka_to_openai_response(
|
|
78
|
+
self, response: 'ChatResponse'
|
|
79
|
+
) -> ChatCompletion:
|
|
80
|
+
r"""Converts a Reka `ChatResponse` to an OpenAI-style `ChatCompletion`
|
|
81
|
+
response.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
response (ChatResponse): The response object from the Reka API.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
ChatCompletion: An OpenAI-compatible chat completion response.
|
|
88
|
+
"""
|
|
89
|
+
openai_response = ChatCompletion.construct(
|
|
90
|
+
id=response.id,
|
|
91
|
+
choices=[
|
|
92
|
+
dict(
|
|
93
|
+
message={
|
|
94
|
+
"role": response.responses[0].message.role,
|
|
95
|
+
"content": response.responses[0].message.content,
|
|
96
|
+
},
|
|
97
|
+
finish_reason=response.responses[0].finish_reason
|
|
98
|
+
if response.responses[0].finish_reason
|
|
99
|
+
else None,
|
|
100
|
+
)
|
|
101
|
+
],
|
|
102
|
+
created=None,
|
|
103
|
+
model=response.model,
|
|
104
|
+
object="chat.completion",
|
|
105
|
+
usage=response.usage,
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
return openai_response
|
|
109
|
+
|
|
110
|
+
def _convert_openai_to_reka_messages(
|
|
111
|
+
self,
|
|
112
|
+
messages: List[OpenAIMessage],
|
|
113
|
+
) -> List["ChatMessage"]:
|
|
114
|
+
r"""Converts OpenAI API messages to Reka API messages.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
messages (List[OpenAIMessage]): A list of messages in OpenAI
|
|
118
|
+
format.
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
List[ChatMessage]: A list of messages converted to Reka's format.
|
|
122
|
+
"""
|
|
123
|
+
from reka.types import ChatMessage
|
|
124
|
+
|
|
125
|
+
reka_messages = []
|
|
126
|
+
for msg in messages:
|
|
127
|
+
role = msg.get("role")
|
|
128
|
+
content = str(msg.get("content"))
|
|
129
|
+
|
|
130
|
+
if role == "user":
|
|
131
|
+
reka_messages.append(ChatMessage(role="user", content=content))
|
|
132
|
+
elif role == "assistant":
|
|
133
|
+
reka_messages.append(
|
|
134
|
+
ChatMessage(role="assistant", content=content)
|
|
135
|
+
)
|
|
136
|
+
elif role == "system":
|
|
137
|
+
reka_messages.append(ChatMessage(role="user", content=content))
|
|
138
|
+
|
|
139
|
+
# Add one more assistant msg since Reka requires conversation
|
|
140
|
+
# history must alternate between 'user' and 'assistant',
|
|
141
|
+
# starting and ending with 'user'.
|
|
142
|
+
reka_messages.append(
|
|
143
|
+
ChatMessage(
|
|
144
|
+
role="assistant",
|
|
145
|
+
content="",
|
|
146
|
+
)
|
|
147
|
+
)
|
|
148
|
+
else:
|
|
149
|
+
raise ValueError(f"Unsupported message role: {role}")
|
|
150
|
+
|
|
151
|
+
return reka_messages
|
|
152
|
+
|
|
153
|
+
@property
|
|
154
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
155
|
+
r"""Initialize the token counter for the model backend.
|
|
156
|
+
|
|
157
|
+
# NOTE: Temporarily using `OpenAITokenCounter`
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
BaseTokenCounter: The token counter following the model's
|
|
161
|
+
tokenization style.
|
|
162
|
+
"""
|
|
163
|
+
if not self._token_counter:
|
|
164
|
+
self._token_counter = OpenAITokenCounter(
|
|
165
|
+
model=ModelType.GPT_4O_MINI
|
|
166
|
+
)
|
|
167
|
+
return self._token_counter
|
|
168
|
+
|
|
169
|
+
@api_keys_required("REKA_API_KEY")
|
|
170
|
+
def run(
|
|
171
|
+
self,
|
|
172
|
+
messages: List[OpenAIMessage],
|
|
173
|
+
) -> ChatCompletion:
|
|
174
|
+
r"""Runs inference of Mistral chat completion.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
178
|
+
in OpenAI API format.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
ChatCompletion.
|
|
182
|
+
"""
|
|
183
|
+
reka_messages = self._convert_openai_to_reka_messages(messages)
|
|
184
|
+
|
|
185
|
+
response = self._client.chat.create(
|
|
186
|
+
messages=reka_messages,
|
|
187
|
+
model=self.model_type.value,
|
|
188
|
+
**self.model_config_dict,
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
openai_response = self._convert_reka_to_openai_response(response)
|
|
192
|
+
|
|
193
|
+
# Add AgentOps LLM Event tracking
|
|
194
|
+
if LLMEvent:
|
|
195
|
+
llm_event = LLMEvent(
|
|
196
|
+
thread_id=openai_response.id,
|
|
197
|
+
prompt=" ".join(
|
|
198
|
+
[message.get("content") for message in messages] # type: ignore[misc]
|
|
199
|
+
),
|
|
200
|
+
prompt_tokens=openai_response.usage.input_tokens, # type: ignore[union-attr]
|
|
201
|
+
completion=openai_response.choices[0].message.content,
|
|
202
|
+
completion_tokens=openai_response.usage.output_tokens, # type: ignore[union-attr]
|
|
203
|
+
model=self.model_type.value,
|
|
204
|
+
)
|
|
205
|
+
record(llm_event)
|
|
206
|
+
|
|
207
|
+
return openai_response
|
|
208
|
+
|
|
209
|
+
def check_model_config(self):
|
|
210
|
+
r"""Check whether the model configuration contains any
|
|
211
|
+
unexpected arguments to Reka API.
|
|
212
|
+
|
|
213
|
+
Raises:
|
|
214
|
+
ValueError: If the model configuration dictionary contains any
|
|
215
|
+
unexpected arguments to Reka API.
|
|
216
|
+
"""
|
|
217
|
+
for param in self.model_config_dict:
|
|
218
|
+
if param not in REKA_API_PARAMS:
|
|
219
|
+
raise ValueError(
|
|
220
|
+
f"Unexpected argument `{param}` is "
|
|
221
|
+
"input into Reka model backend."
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
@property
|
|
225
|
+
def stream(self) -> bool:
|
|
226
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
227
|
+
results each time.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
bool: Whether the model is in stream mode.
|
|
231
|
+
"""
|
|
232
|
+
return self.model_config_dict.get('stream', False)
|
|
@@ -181,6 +181,7 @@ class AutoRetriever:
|
|
|
181
181
|
top_k: int = DEFAULT_TOP_K_RESULTS,
|
|
182
182
|
similarity_threshold: float = DEFAULT_SIMILARITY_THRESHOLD,
|
|
183
183
|
return_detailed_info: bool = False,
|
|
184
|
+
max_characters: int = 500,
|
|
184
185
|
) -> dict[str, Sequence[Collection[str]]]:
|
|
185
186
|
r"""Executes the automatic vector retriever process using vector
|
|
186
187
|
storage.
|
|
@@ -198,6 +199,8 @@ class AutoRetriever:
|
|
|
198
199
|
return_detailed_info (bool, optional): Whether to return detailed
|
|
199
200
|
information including similarity score, content path and
|
|
200
201
|
metadata. Defaults to `False`.
|
|
202
|
+
max_characters (int): Max number of characters in each chunk.
|
|
203
|
+
Defaults to `500`.
|
|
201
204
|
|
|
202
205
|
Returns:
|
|
203
206
|
dict[str, Sequence[Collection[str]]]: By default, returns
|
|
@@ -262,7 +265,7 @@ class AutoRetriever:
|
|
|
262
265
|
storage=vector_storage_instance,
|
|
263
266
|
embedding_model=self.embedding_model,
|
|
264
267
|
)
|
|
265
|
-
vr.process(content)
|
|
268
|
+
vr.process(content=content, max_characters=max_characters)
|
|
266
269
|
else:
|
|
267
270
|
vr = VectorRetriever(
|
|
268
271
|
storage=vector_storage_instance,
|
|
@@ -76,6 +76,7 @@ class VectorRetriever(BaseRetriever):
|
|
|
76
76
|
self,
|
|
77
77
|
content: Union[str, Element],
|
|
78
78
|
chunk_type: str = "chunk_by_title",
|
|
79
|
+
max_characters: int = 500,
|
|
79
80
|
**kwargs: Any,
|
|
80
81
|
) -> None:
|
|
81
82
|
r"""Processes content from a file or URL, divides it into chunks by
|
|
@@ -87,6 +88,8 @@ class VectorRetriever(BaseRetriever):
|
|
|
87
88
|
string content or Element object.
|
|
88
89
|
chunk_type (str): Type of chunking going to apply. Defaults to
|
|
89
90
|
"chunk_by_title".
|
|
91
|
+
max_characters (int): Max number of characters in each chunk.
|
|
92
|
+
Defaults to `500`.
|
|
90
93
|
**kwargs (Any): Additional keyword arguments for content parsing.
|
|
91
94
|
"""
|
|
92
95
|
if isinstance(content, Element):
|
|
@@ -101,7 +104,9 @@ class VectorRetriever(BaseRetriever):
|
|
|
101
104
|
elements = [self.uio.create_element_from_text(text=content)]
|
|
102
105
|
if elements:
|
|
103
106
|
chunks = self.uio.chunk_elements(
|
|
104
|
-
chunk_type=chunk_type,
|
|
107
|
+
chunk_type=chunk_type,
|
|
108
|
+
elements=elements,
|
|
109
|
+
max_characters=max_characters,
|
|
105
110
|
)
|
|
106
111
|
if not elements:
|
|
107
112
|
warnings.warn(
|
|
@@ -243,9 +243,6 @@ class BabyAGI:
|
|
|
243
243
|
|
|
244
244
|
assistant_response = self.assistant_agent.step(assistant_msg_msg)
|
|
245
245
|
assistant_msg = assistant_response.msgs[0]
|
|
246
|
-
self.assistant_agent.record_message(assistant_msg)
|
|
247
|
-
self.task_creation_agent.record_message(assistant_msg)
|
|
248
|
-
self.task_prioritization_agent.record_message(assistant_msg)
|
|
249
246
|
|
|
250
247
|
self.solved_subtasks.append(task_name)
|
|
251
248
|
past_tasks = self.solved_subtasks + list(self.subtasks)
|