camel-ai 0.1.5.5__tar.gz → 0.1.5.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/PKG-INFO +3 -2
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/README.md +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/__init__.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/chat_agent.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/knowledge_graph_agent.py +11 -15
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/task_agent.py +0 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/__init__.py +6 -0
- camel_ai-0.1.5.7/camel/configs/gemini_config.py +98 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/litellm_config.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/openai_config.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/zhipuai_config.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/__init__.py +2 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/base_model.py +4 -1
- camel_ai-0.1.5.7/camel/models/gemini_model.py +203 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/litellm_model.py +16 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/model_factory.py +3 -2
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/ollama_model.py +16 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/zhipuai_model.py +0 -1
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/__init__.py +23 -15
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/toolkits/base.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/toolkits/code_execution.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/toolkits/github_toolkit.py +3 -2
- camel_ai-0.1.5.7/camel/toolkits/google_maps_toolkit.py +367 -0
- camel_ai-0.1.5.7/camel/toolkits/math_toolkit.py +79 -0
- camel_ai-0.1.5.7/camel/toolkits/open_api_toolkit.py +548 -0
- camel_ai-0.1.5.7/camel/toolkits/retrieval_toolkit.py +76 -0
- camel_ai-0.1.5.7/camel/toolkits/search_toolkit.py +326 -0
- camel_ai-0.1.5.7/camel/toolkits/slack_toolkit.py +308 -0
- camel_ai-0.1.5.7/camel/toolkits/twitter_toolkit.py +522 -0
- camel_ai-0.1.5.7/camel/toolkits/weather_toolkit.py +173 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/types/enums.py +18 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/utils/__init__.py +2 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/utils/async_func.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/utils/token_counting.py +34 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/pyproject.toml +6 -1
- camel_ai-0.1.5.5/camel/functions/google_maps_function.py +0 -335
- camel_ai-0.1.5.5/camel/functions/math_functions.py +0 -61
- camel_ai-0.1.5.5/camel/functions/open_api_function.py +0 -508
- camel_ai-0.1.5.5/camel/functions/retrieval_functions.py +0 -61
- camel_ai-0.1.5.5/camel/functions/search_functions.py +0 -298
- camel_ai-0.1.5.5/camel/functions/slack_functions.py +0 -286
- camel_ai-0.1.5.5/camel/functions/twitter_function.py +0 -479
- camel_ai-0.1.5.5/camel/functions/weather_functions.py +0 -144
- camel_ai-0.1.5.5/camel/toolkits/__init__.py +0 -23
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/critic_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/base_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/configs/ollama_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/embeddings/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/generators.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/human.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/interpreters/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/memories/records.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/messages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/messages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/messages/func_message.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/anthropic_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/open_source_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/openai_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/models/stub_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/code.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/descripte_video_prompt.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/prompts/translation.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/responses/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/retrievers/auto_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/retrievers/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/societies/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/societies/role_playing.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/terminators/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/security_config.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.1.5.5/camel/functions → camel_ai-0.1.5.7/camel/toolkits}/openai_function.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/types/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/types/openai_types.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/utils/commons.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.7}/camel/utils/constants.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.5.
|
|
3
|
+
Version: 0.1.5.7
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -36,6 +36,7 @@ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
|
|
|
36
36
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
37
37
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
38
38
|
Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
39
|
+
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
39
40
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
40
41
|
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
41
42
|
Requires-Dist: jsonschema (>=4,<5)
|
|
@@ -190,7 +191,7 @@ conda create --name camel python=3.9
|
|
|
190
191
|
conda activate camel
|
|
191
192
|
|
|
192
193
|
# Clone github repo
|
|
193
|
-
git clone -b v0.1.5.
|
|
194
|
+
git clone -b v0.1.5.7 https://github.com/camel-ai/camel.git
|
|
194
195
|
|
|
195
196
|
# Change directory into project directory
|
|
196
197
|
cd camel
|
|
@@ -110,7 +110,7 @@ conda create --name camel python=3.9
|
|
|
110
110
|
conda activate camel
|
|
111
111
|
|
|
112
112
|
# Clone github repo
|
|
113
|
-
git clone -b v0.1.5.
|
|
113
|
+
git clone -b v0.1.5.7 https://github.com/camel-ai/camel.git
|
|
114
114
|
|
|
115
115
|
# Change directory into project directory
|
|
116
116
|
cd camel
|
|
@@ -42,8 +42,8 @@ from camel.utils import get_model_encoding
|
|
|
42
42
|
if TYPE_CHECKING:
|
|
43
43
|
from openai import Stream
|
|
44
44
|
|
|
45
|
-
from camel.functions import OpenAIFunction
|
|
46
45
|
from camel.terminators import ResponseTerminator
|
|
46
|
+
from camel.toolkits import OpenAIFunction
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
@dataclass(frozen=True)
|
|
@@ -78,17 +78,16 @@ Expected Output:
|
|
|
78
78
|
|
|
79
79
|
Nodes:
|
|
80
80
|
|
|
81
|
-
Node(id='John', type='Person'
|
|
82
|
-
Node(id='XYZ Corporation', type='Organization'
|
|
83
|
-
Node(id='New York City', type='Location'
|
|
81
|
+
Node(id='John', type='Person')
|
|
82
|
+
Node(id='XYZ Corporation', type='Organization')
|
|
83
|
+
Node(id='New York City', type='Location')
|
|
84
84
|
|
|
85
85
|
Relationships:
|
|
86
86
|
|
|
87
87
|
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
|
|
88
|
-
Corporation', type='Organization'), type='WorksAt'
|
|
89
|
-
{'agent_generated'})
|
|
88
|
+
Corporation', type='Organization'), type='WorksAt')
|
|
90
89
|
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
|
|
91
|
-
type='Location'), type='ResidesIn'
|
|
90
|
+
type='Location'), type='ResidesIn')
|
|
92
91
|
|
|
93
92
|
===== TASK =====
|
|
94
93
|
Please extracts nodes and relationships from given content and structures them
|
|
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
211
210
|
import re
|
|
212
211
|
|
|
213
212
|
# Regular expressions to extract nodes and relationships
|
|
214
|
-
node_pattern = r"Node\(id='(.*?)', type='(.*?)'
|
|
213
|
+
node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
|
|
215
214
|
rel_pattern = (
|
|
216
215
|
r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
|
|
217
|
-
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'
|
|
218
|
-
r"properties=\{(.*?)\}\)"
|
|
216
|
+
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
|
|
219
217
|
)
|
|
220
218
|
|
|
221
219
|
nodes = {}
|
|
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
223
221
|
|
|
224
222
|
# Extract nodes
|
|
225
223
|
for match in re.finditer(node_pattern, input_string):
|
|
226
|
-
id, type
|
|
227
|
-
properties =
|
|
224
|
+
id, type = match.groups()
|
|
225
|
+
properties = {'source': 'agent_created'}
|
|
228
226
|
if id not in nodes:
|
|
229
227
|
node = Node(id, type, properties)
|
|
230
228
|
if self._validate_node(node):
|
|
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
232
230
|
|
|
233
231
|
# Extract relationships
|
|
234
232
|
for match in re.finditer(rel_pattern, input_string):
|
|
235
|
-
subj_id, subj_type, obj_id, obj_type, rel_type
|
|
236
|
-
|
|
237
|
-
)
|
|
238
|
-
properties = eval(properties_str)
|
|
233
|
+
subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
|
|
234
|
+
properties = {'source': 'agent_created'}
|
|
239
235
|
if subj_id in nodes and obj_id in nodes:
|
|
240
236
|
subj = nodes[subj_id]
|
|
241
237
|
obj = nodes[obj_id]
|
|
@@ -13,6 +13,10 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
15
15
|
from .base_config import BaseConfig
|
|
16
|
+
from .gemini_config import (
|
|
17
|
+
Gemini_API_PARAMS,
|
|
18
|
+
GeminiConfig,
|
|
19
|
+
)
|
|
16
20
|
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
17
21
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
18
22
|
from .openai_config import (
|
|
@@ -35,4 +39,6 @@ __all__ = [
|
|
|
35
39
|
'OLLAMA_API_PARAMS',
|
|
36
40
|
'ZhipuAIConfig',
|
|
37
41
|
'ZHIPUAI_API_PARAMS',
|
|
42
|
+
'GeminiConfig',
|
|
43
|
+
'Gemini_API_PARAMS',
|
|
38
44
|
]
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from collections.abc import Iterable
|
|
17
|
+
from dataclasses import asdict, dataclass
|
|
18
|
+
from typing import TYPE_CHECKING, Optional
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
|
|
22
|
+
if TYPE_CHECKING:
|
|
23
|
+
from google.generativeai.protos import Schema
|
|
24
|
+
from google.generativeai.types.content_types import (
|
|
25
|
+
FunctionLibraryType,
|
|
26
|
+
ToolConfigType,
|
|
27
|
+
)
|
|
28
|
+
from google.generativeai.types.helper_types import RequestOptionsType
|
|
29
|
+
from google.generativeai.types.safety_types import SafetySettingOptions
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass(frozen=True)
|
|
33
|
+
class GeminiConfig(BaseConfig):
|
|
34
|
+
r"""A simple dataclass used to configure the generation parameters of
|
|
35
|
+
`GenerativeModel.generate_content`.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
candidate_count (int, optional): Number of responses to return.
|
|
39
|
+
stop_sequences (Iterable[str], optional): The set of character
|
|
40
|
+
sequences (up to 5) that will stop output generation. If specified
|
|
41
|
+
the API will stop at the first appearance of a stop sequence.
|
|
42
|
+
The stop sequence will not be included as part of the response.
|
|
43
|
+
max_output_tokens (int, optional): The maximum number of tokens to
|
|
44
|
+
include in a candidate. If unset, this will default to
|
|
45
|
+
output_token_limit specified in the model's specification.
|
|
46
|
+
temperature (float, optional): Controls the randomness of the output.
|
|
47
|
+
Note: The default value varies by model, see the
|
|
48
|
+
`Model.temperature` attribute of the `Model` returned
|
|
49
|
+
the `genai.get_model` function. Values can range from [0.0,1.0],
|
|
50
|
+
inclusive. A value closer to 1.0 will produce responses that are
|
|
51
|
+
more varied and creative, while a value closer to 0.0 will
|
|
52
|
+
typically result in more straightforward responses from the model.
|
|
53
|
+
top_p (int, optional): The maximum cumulative probability of tokens to
|
|
54
|
+
consider when sampling. The model uses combined Top-k and nucleus
|
|
55
|
+
sampling. Tokens are sorted based on their assigned probabilities
|
|
56
|
+
so that only the most likely tokens are considered. Top-k sampling
|
|
57
|
+
directly limits the maximum number of tokens to consider, while
|
|
58
|
+
Nucleus sampling limits number of tokens
|
|
59
|
+
based on the cumulative probability. Note: The default value varies
|
|
60
|
+
by model, see the `Model.top_p` attribute of the `Model` returned
|
|
61
|
+
the `genai.get_model` function.
|
|
62
|
+
top_k (int, optional): The maximum number of tokens to consider when
|
|
63
|
+
sampling. The model uses combined Top-k and nucleus sampling.Top-k
|
|
64
|
+
sampling considers the set of `top_k` most probable tokens.
|
|
65
|
+
Defaults to 40. Note: The default value varies by model, see the
|
|
66
|
+
`Model.top_k` attribute of the `Model` returned the
|
|
67
|
+
`genai.get_model` function.
|
|
68
|
+
response_mime_type (str, optional): Output response mimetype of the
|
|
69
|
+
generated candidate text. Supported mimetype:
|
|
70
|
+
`text/plain`: (default) Text output.
|
|
71
|
+
`application/json`: JSON response in the candidates.
|
|
72
|
+
response_schema (Schema, optional): Specifies the format of the
|
|
73
|
+
JSON requested if response_mime_type is `application/json`.
|
|
74
|
+
safety_settings (SafetySettingOptions, optional):
|
|
75
|
+
Overrides for the model's safety settings.
|
|
76
|
+
tools (FunctionLibraryType, optional):
|
|
77
|
+
`protos.Tools` more info coming soon.
|
|
78
|
+
tool_config (ToolConfigType, optional):
|
|
79
|
+
more info coming soon.
|
|
80
|
+
request_options (RequestOptionsType, optional):
|
|
81
|
+
Options for the request.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
candidate_count: Optional[int] = None
|
|
85
|
+
stop_sequences: Optional[Iterable[str]] = None
|
|
86
|
+
max_output_tokens: Optional[int] = None
|
|
87
|
+
temperature: Optional[float] = None
|
|
88
|
+
top_p: Optional[float] = None
|
|
89
|
+
top_k: Optional[int] = None
|
|
90
|
+
response_mime_type: Optional[str] = None
|
|
91
|
+
response_schema: Optional['Schema'] = None
|
|
92
|
+
safety_settings: Optional['SafetySettingOptions'] = None
|
|
93
|
+
tools: Optional['FunctionLibraryType'] = None
|
|
94
|
+
tool_config: Optional['ToolConfigType'] = None
|
|
95
|
+
request_options: Optional['RequestOptionsType'] = None
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .anthropic_model import AnthropicModel
|
|
15
15
|
from .base_model import BaseModelBackend
|
|
16
|
+
from .gemini_model import GeminiModel
|
|
16
17
|
from .litellm_model import LiteLLMModel
|
|
17
18
|
from .model_factory import ModelFactory
|
|
18
19
|
from .nemotron_model import NemotronModel
|
|
@@ -35,4 +36,5 @@ __all__ = [
|
|
|
35
36
|
'OpenAIAudioModels',
|
|
36
37
|
'NemotronModel',
|
|
37
38
|
'OllamaModel',
|
|
39
|
+
'GeminiModel',
|
|
38
40
|
]
|
|
@@ -109,7 +109,10 @@ class BaseModelBackend(ABC):
|
|
|
109
109
|
Returns:
|
|
110
110
|
int: The maximum token limit for the given model.
|
|
111
111
|
"""
|
|
112
|
-
return
|
|
112
|
+
return (
|
|
113
|
+
self.model_config_dict.get("max_tokens")
|
|
114
|
+
or self.model_type.token_limit
|
|
115
|
+
)
|
|
113
116
|
|
|
114
117
|
@property
|
|
115
118
|
def stream(self) -> bool:
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
15
|
+
|
|
16
|
+
from camel.configs import Gemini_API_PARAMS
|
|
17
|
+
from camel.messages import OpenAIMessage
|
|
18
|
+
from camel.models import BaseModelBackend
|
|
19
|
+
from camel.types import (
|
|
20
|
+
ChatCompletion,
|
|
21
|
+
ChatCompletionMessage,
|
|
22
|
+
Choice,
|
|
23
|
+
ModelType,
|
|
24
|
+
)
|
|
25
|
+
from camel.utils import (
|
|
26
|
+
BaseTokenCounter,
|
|
27
|
+
GeminiTokenCounter,
|
|
28
|
+
api_keys_required,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from google.generativeai.types import ContentsType, GenerateContentResponse
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class GeminiModel(BaseModelBackend):
|
|
36
|
+
r"""Gemini API in a unified BaseModelBackend interface."""
|
|
37
|
+
|
|
38
|
+
# NOTE: Currently "stream": True is not supported with Gemini due to the
|
|
39
|
+
# limitation of the current camel design.
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
model_type: ModelType,
|
|
44
|
+
model_config_dict: Dict[str, Any],
|
|
45
|
+
api_key: Optional[str] = None,
|
|
46
|
+
url: Optional[str] = None,
|
|
47
|
+
) -> None:
|
|
48
|
+
r"""Constructor for Gemini backend.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
model_type (ModelType): Model for which a backend is created.
|
|
52
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
53
|
+
be fed into generate_content().
|
|
54
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
55
|
+
gemini service. (default: :obj:`None`)
|
|
56
|
+
url (Optional[str]): The url to the gemini service.
|
|
57
|
+
"""
|
|
58
|
+
import os
|
|
59
|
+
|
|
60
|
+
import google.generativeai as genai
|
|
61
|
+
from google.generativeai.types.generation_types import GenerationConfig
|
|
62
|
+
|
|
63
|
+
super().__init__(model_type, model_config_dict, api_key, url)
|
|
64
|
+
self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
|
|
65
|
+
genai.configure(api_key=self._api_key)
|
|
66
|
+
self._client = genai.GenerativeModel(self.model_type.value)
|
|
67
|
+
self._token_counter: Optional[BaseTokenCounter] = None
|
|
68
|
+
keys = list(self.model_config_dict.keys())
|
|
69
|
+
generation_config_dict = {
|
|
70
|
+
k: self.model_config_dict.pop(k)
|
|
71
|
+
for k in keys
|
|
72
|
+
if hasattr(GenerationConfig, k)
|
|
73
|
+
}
|
|
74
|
+
generation_config = genai.types.GenerationConfig(
|
|
75
|
+
**generation_config_dict
|
|
76
|
+
)
|
|
77
|
+
self.model_config_dict["generation_config"] = generation_config
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
81
|
+
if not self._token_counter:
|
|
82
|
+
self._token_counter = GeminiTokenCounter(self.model_type)
|
|
83
|
+
return self._token_counter
|
|
84
|
+
|
|
85
|
+
@api_keys_required("GOOGLE_API_KEY")
|
|
86
|
+
def run(
|
|
87
|
+
self,
|
|
88
|
+
messages: List[OpenAIMessage],
|
|
89
|
+
) -> ChatCompletion:
|
|
90
|
+
r"""Runs inference of Gemini model.
|
|
91
|
+
This method can handle multimodal input
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
messages: Message list or Message with the chat history
|
|
95
|
+
in OpenAi format.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
response: A ChatCompletion object formatted for the OpenAI API.
|
|
99
|
+
"""
|
|
100
|
+
response = self._client.generate_content(
|
|
101
|
+
contents=self.to_gemini_req(messages),
|
|
102
|
+
**self.model_config_dict,
|
|
103
|
+
)
|
|
104
|
+
response.resolve()
|
|
105
|
+
return self.to_openai_response(response)
|
|
106
|
+
|
|
107
|
+
def check_model_config(self):
|
|
108
|
+
r"""Check whether the model configuration contains any
|
|
109
|
+
unexpected arguments to Gemini API.
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
ValueError: If the model configuration dictionary contains any
|
|
113
|
+
unexpected arguments to OpenAI API.
|
|
114
|
+
"""
|
|
115
|
+
if self.model_config_dict is not None:
|
|
116
|
+
for param in self.model_config_dict:
|
|
117
|
+
if param not in Gemini_API_PARAMS:
|
|
118
|
+
raise ValueError(
|
|
119
|
+
f"Unexpected argument `{param}` is "
|
|
120
|
+
"input into Gemini model backend."
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
@property
|
|
124
|
+
def stream(self) -> bool:
|
|
125
|
+
r"""Returns whether the model is in stream mode,
|
|
126
|
+
which sends partial results each time.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
bool: Whether the model is in stream mode.
|
|
130
|
+
"""
|
|
131
|
+
return self.model_config_dict.get('stream', False)
|
|
132
|
+
|
|
133
|
+
def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
|
|
134
|
+
r"""Converts the request from the OpenAI API format to
|
|
135
|
+
the Gemini API request format.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
messages: The request object from the OpenAI API.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
converted_messages: A list of messages formatted for Gemini API.
|
|
142
|
+
"""
|
|
143
|
+
# role reference
|
|
144
|
+
# https://ai.google.dev/api/python/google/generativeai/protos/Content
|
|
145
|
+
converted_messages = []
|
|
146
|
+
for message in messages:
|
|
147
|
+
role = message.get('role')
|
|
148
|
+
if role == 'assistant':
|
|
149
|
+
role_to_gemini = 'model'
|
|
150
|
+
else:
|
|
151
|
+
role_to_gemini = 'user'
|
|
152
|
+
converted_message = {
|
|
153
|
+
"role": role_to_gemini,
|
|
154
|
+
"parts": message.get("content"),
|
|
155
|
+
}
|
|
156
|
+
converted_messages.append(converted_message)
|
|
157
|
+
return converted_messages
|
|
158
|
+
|
|
159
|
+
def to_openai_response(
|
|
160
|
+
self,
|
|
161
|
+
response: 'GenerateContentResponse',
|
|
162
|
+
) -> ChatCompletion:
|
|
163
|
+
r"""Converts the response from the Gemini API to the OpenAI API
|
|
164
|
+
response format.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
response: The response object returned by the Gemini API
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
openai_response: A ChatCompletion object formatted for
|
|
171
|
+
the OpenAI API.
|
|
172
|
+
"""
|
|
173
|
+
import time
|
|
174
|
+
import uuid
|
|
175
|
+
|
|
176
|
+
openai_response = ChatCompletion(
|
|
177
|
+
id=f"chatcmpl-{uuid.uuid4().hex!s}",
|
|
178
|
+
object="chat.completion",
|
|
179
|
+
created=int(time.time()),
|
|
180
|
+
model=self.model_type.value,
|
|
181
|
+
choices=[],
|
|
182
|
+
)
|
|
183
|
+
for i, candidate in enumerate(response.candidates):
|
|
184
|
+
content = ""
|
|
185
|
+
if candidate.content and len(candidate.content.parts) > 0:
|
|
186
|
+
content = candidate.content.parts[0].text
|
|
187
|
+
finish_reason = candidate.finish_reason
|
|
188
|
+
finish_reason_mapping = {
|
|
189
|
+
"FinishReason.STOP": "stop",
|
|
190
|
+
"FinishReason.SAFETY": "content_filter",
|
|
191
|
+
"FinishReason.RECITATION": "content_filter",
|
|
192
|
+
"FinishReason.MAX_TOKENS": "length",
|
|
193
|
+
}
|
|
194
|
+
finish_reason = finish_reason_mapping.get(finish_reason, "stop")
|
|
195
|
+
choice = Choice(
|
|
196
|
+
index=i,
|
|
197
|
+
message=ChatCompletionMessage(
|
|
198
|
+
role="assistant", content=content
|
|
199
|
+
),
|
|
200
|
+
finish_reason=finish_reason,
|
|
201
|
+
)
|
|
202
|
+
openai_response.choices.append(choice)
|
|
203
|
+
return openai_response
|
|
@@ -138,3 +138,19 @@ class LiteLLMModel:
|
|
|
138
138
|
f"Unexpected argument `{param}` is "
|
|
139
139
|
"input into LiteLLM model backend."
|
|
140
140
|
)
|
|
141
|
+
|
|
142
|
+
@property
|
|
143
|
+
def token_limit(self) -> int:
|
|
144
|
+
"""Returns the maximum token limit for the given model.
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
int: The maximum token limit for the given model.
|
|
148
|
+
"""
|
|
149
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
150
|
+
if isinstance(max_tokens, int):
|
|
151
|
+
return max_tokens
|
|
152
|
+
print(
|
|
153
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
154
|
+
" setting up the model. Using 4096 as default value."
|
|
155
|
+
)
|
|
156
|
+
return 4096
|
|
@@ -15,6 +15,7 @@ from typing import Any, Dict, Optional, Union
|
|
|
15
15
|
|
|
16
16
|
from camel.models.anthropic_model import AnthropicModel
|
|
17
17
|
from camel.models.base_model import BaseModelBackend
|
|
18
|
+
from camel.models.gemini_model import GeminiModel
|
|
18
19
|
from camel.models.litellm_model import LiteLLMModel
|
|
19
20
|
from camel.models.ollama_model import OllamaModel
|
|
20
21
|
from camel.models.open_source_model import OpenSourceModel
|
|
@@ -59,7 +60,6 @@ class ModelFactory:
|
|
|
59
60
|
BaseModelBackend: The initialized backend.
|
|
60
61
|
"""
|
|
61
62
|
model_class: Any
|
|
62
|
-
|
|
63
63
|
if isinstance(model_type, ModelType):
|
|
64
64
|
if model_platform.is_open_source and model_type.is_open_source:
|
|
65
65
|
model_class = OpenSourceModel
|
|
@@ -70,6 +70,8 @@ class ModelFactory:
|
|
|
70
70
|
model_class = AnthropicModel
|
|
71
71
|
elif model_platform.is_zhipuai and model_type.is_zhipuai:
|
|
72
72
|
model_class = ZhipuAIModel
|
|
73
|
+
elif model_platform.is_gemini and model_type.is_gemini:
|
|
74
|
+
model_class = GeminiModel
|
|
73
75
|
elif model_type == ModelType.STUB:
|
|
74
76
|
model_class = StubModel
|
|
75
77
|
else:
|
|
@@ -90,5 +92,4 @@ class ModelFactory:
|
|
|
90
92
|
)
|
|
91
93
|
else:
|
|
92
94
|
raise ValueError(f"Invalid model type `{model_type}` provided.")
|
|
93
|
-
|
|
94
95
|
return model_class(model_type, model_config_dict, api_key, url)
|
|
@@ -104,6 +104,22 @@ class OllamaModel:
|
|
|
104
104
|
)
|
|
105
105
|
return response
|
|
106
106
|
|
|
107
|
+
@property
|
|
108
|
+
def token_limit(self) -> int:
|
|
109
|
+
"""Returns the maximum token limit for the given model.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
int: The maximum token limit for the given model.
|
|
113
|
+
"""
|
|
114
|
+
max_tokens = self.model_config_dict.get("max_tokens")
|
|
115
|
+
if isinstance(max_tokens, int):
|
|
116
|
+
return max_tokens
|
|
117
|
+
print(
|
|
118
|
+
"Must set `max_tokens` as an integer in `model_config_dict` when"
|
|
119
|
+
" setting up the model. Using 4096 as default value."
|
|
120
|
+
)
|
|
121
|
+
return 4096
|
|
122
|
+
|
|
107
123
|
@property
|
|
108
124
|
def stream(self) -> bool:
|
|
109
125
|
r"""Returns whether the model is in stream mode, which sends partial
|
|
@@ -19,33 +19,41 @@ from .openai_function import (
|
|
|
19
19
|
)
|
|
20
20
|
from .open_api_specs.security_config import openapi_security_config
|
|
21
21
|
|
|
22
|
-
from .
|
|
23
|
-
from .
|
|
24
|
-
from .
|
|
25
|
-
from .
|
|
26
|
-
from .
|
|
27
|
-
from .
|
|
28
|
-
from .
|
|
29
|
-
from .
|
|
22
|
+
from .google_maps_toolkit import MAP_FUNCS, GoogleMapsToolkit
|
|
23
|
+
from .math_toolkit import MATH_FUNCS, MathToolkit
|
|
24
|
+
from .open_api_toolkit import OPENAPI_FUNCS, OpenAPIToolkit
|
|
25
|
+
from .retrieval_toolkit import RETRIEVAL_FUNCS, RetrievalToolkit
|
|
26
|
+
from .search_toolkit import SEARCH_FUNCS, SearchToolkit
|
|
27
|
+
from .twitter_toolkit import TWITTER_FUNCS, TwitterToolkit
|
|
28
|
+
from .weather_toolkit import WEATHER_FUNCS, WeatherToolkit
|
|
29
|
+
from .slack_toolkit import SLACK_FUNCS, SlackToolkit
|
|
30
30
|
|
|
31
|
-
from .
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
)
|
|
31
|
+
from .base import BaseToolkit
|
|
32
|
+
from .code_execution import CodeExecutionToolkit
|
|
33
|
+
from .github_toolkit import GithubToolkit
|
|
35
34
|
|
|
36
35
|
__all__ = [
|
|
37
36
|
'OpenAIFunction',
|
|
38
37
|
'get_openai_function_schema',
|
|
39
38
|
'get_openai_tool_schema',
|
|
40
39
|
'openapi_security_config',
|
|
41
|
-
'apinames_filepaths_to_funs_schemas',
|
|
42
|
-
'generate_apinames_filepaths',
|
|
43
|
-
'MAP_FUNCS',
|
|
44
40
|
'MATH_FUNCS',
|
|
41
|
+
'MAP_FUNCS',
|
|
45
42
|
'OPENAPI_FUNCS',
|
|
46
43
|
'RETRIEVAL_FUNCS',
|
|
47
44
|
'SEARCH_FUNCS',
|
|
48
45
|
'TWITTER_FUNCS',
|
|
49
46
|
'WEATHER_FUNCS',
|
|
50
47
|
'SLACK_FUNCS',
|
|
48
|
+
'BaseToolkit',
|
|
49
|
+
'GithubToolkit',
|
|
50
|
+
'MathToolkit',
|
|
51
|
+
'GoogleMapsToolkit',
|
|
52
|
+
'SearchToolkit',
|
|
53
|
+
'SlackToolkit',
|
|
54
|
+
'TwitterToolkit',
|
|
55
|
+
'WeatherToolkit',
|
|
56
|
+
'RetrievalToolkit',
|
|
57
|
+
'OpenAPIToolkit',
|
|
58
|
+
'CodeExecutionToolkit',
|
|
51
59
|
]
|
|
@@ -13,8 +13,8 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from typing import List, Literal
|
|
15
15
|
|
|
16
|
-
from camel.functions import OpenAIFunction
|
|
17
16
|
from camel.interpreters import InternalPythonInterpreter
|
|
17
|
+
from camel.toolkits import OpenAIFunction
|
|
18
18
|
|
|
19
19
|
from .base import BaseToolkit
|
|
20
20
|
|