camel-ai 0.1.5.5__tar.gz → 0.1.5.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/PKG-INFO +3 -2
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/README.md +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/__init__.py +1 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/knowledge_graph_agent.py +11 -15
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/task_agent.py +0 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/__init__.py +6 -0
- camel_ai-0.1.5.6/camel/configs/gemini_config.py +97 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/__init__.py +2 -0
- camel_ai-0.1.5.6/camel/models/gemini_model.py +203 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/model_factory.py +3 -2
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/types/enums.py +18 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/utils/__init__.py +2 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/utils/token_counting.py +34 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/pyproject.toml +6 -1
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/chat_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/critic_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/base_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/litellm_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/ollama_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/openai_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/configs/zhipuai_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/embeddings/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/embeddings/openai_embedding.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/google_maps_function.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/math_functions.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_function.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/security_config.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/openai_function.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/retrieval_functions.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/search_functions.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/slack_functions.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/twitter_function.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/weather_functions.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/generators.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/human.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/interpreters/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/interpreters/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/interpreters/docker_interpreter.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/loaders/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/loaders/base_io.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/loaders/jina_url_reader.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/loaders/unstructured_io.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/memories/records.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/messages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/messages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/messages/func_message.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/anthropic_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/base_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/litellm_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/nemotron_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/ollama_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/open_source_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/openai_audio_models.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/openai_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/stub_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/models/zhipuai_model.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/code.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/descripte_video_prompt.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/prompts/translation.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/responses/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/retrievers/auto_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/retrievers/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/retrievers/bm25_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/retrievers/cohere_rerank_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/societies/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/societies/role_playing.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/graph_storages/neo4j_graph.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/redis.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/milvus.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/qdrant.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/terminators/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/toolkits/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/toolkits/base.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/toolkits/code_execution.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/toolkits/github_toolkit.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/types/__init__.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/types/openai_types.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/utils/async_func.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/utils/commons.py +0 -0
- {camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/utils/constants.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.5.
|
|
3
|
+
Version: 0.1.5.6
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -36,6 +36,7 @@ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
|
|
|
36
36
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
37
37
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
38
38
|
Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
39
|
+
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
39
40
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
40
41
|
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
41
42
|
Requires-Dist: jsonschema (>=4,<5)
|
|
@@ -190,7 +191,7 @@ conda create --name camel python=3.9
|
|
|
190
191
|
conda activate camel
|
|
191
192
|
|
|
192
193
|
# Clone github repo
|
|
193
|
-
git clone -b v0.1.5.
|
|
194
|
+
git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
|
|
194
195
|
|
|
195
196
|
# Change directory into project directory
|
|
196
197
|
cd camel
|
|
@@ -110,7 +110,7 @@ conda create --name camel python=3.9
|
|
|
110
110
|
conda activate camel
|
|
111
111
|
|
|
112
112
|
# Clone github repo
|
|
113
|
-
git clone -b v0.1.5.
|
|
113
|
+
git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
|
|
114
114
|
|
|
115
115
|
# Change directory into project directory
|
|
116
116
|
cd camel
|
|
@@ -78,17 +78,16 @@ Expected Output:
|
|
|
78
78
|
|
|
79
79
|
Nodes:
|
|
80
80
|
|
|
81
|
-
Node(id='John', type='Person'
|
|
82
|
-
Node(id='XYZ Corporation', type='Organization'
|
|
83
|
-
Node(id='New York City', type='Location'
|
|
81
|
+
Node(id='John', type='Person')
|
|
82
|
+
Node(id='XYZ Corporation', type='Organization')
|
|
83
|
+
Node(id='New York City', type='Location')
|
|
84
84
|
|
|
85
85
|
Relationships:
|
|
86
86
|
|
|
87
87
|
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
|
|
88
|
-
Corporation', type='Organization'), type='WorksAt'
|
|
89
|
-
{'agent_generated'})
|
|
88
|
+
Corporation', type='Organization'), type='WorksAt')
|
|
90
89
|
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
|
|
91
|
-
type='Location'), type='ResidesIn'
|
|
90
|
+
type='Location'), type='ResidesIn')
|
|
92
91
|
|
|
93
92
|
===== TASK =====
|
|
94
93
|
Please extracts nodes and relationships from given content and structures them
|
|
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
211
210
|
import re
|
|
212
211
|
|
|
213
212
|
# Regular expressions to extract nodes and relationships
|
|
214
|
-
node_pattern = r"Node\(id='(.*?)', type='(.*?)'
|
|
213
|
+
node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
|
|
215
214
|
rel_pattern = (
|
|
216
215
|
r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
|
|
217
|
-
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'
|
|
218
|
-
r"properties=\{(.*?)\}\)"
|
|
216
|
+
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
|
|
219
217
|
)
|
|
220
218
|
|
|
221
219
|
nodes = {}
|
|
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
223
221
|
|
|
224
222
|
# Extract nodes
|
|
225
223
|
for match in re.finditer(node_pattern, input_string):
|
|
226
|
-
id, type
|
|
227
|
-
properties =
|
|
224
|
+
id, type = match.groups()
|
|
225
|
+
properties = {'source': 'agent_created'}
|
|
228
226
|
if id not in nodes:
|
|
229
227
|
node = Node(id, type, properties)
|
|
230
228
|
if self._validate_node(node):
|
|
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
232
230
|
|
|
233
231
|
# Extract relationships
|
|
234
232
|
for match in re.finditer(rel_pattern, input_string):
|
|
235
|
-
subj_id, subj_type, obj_id, obj_type, rel_type
|
|
236
|
-
|
|
237
|
-
)
|
|
238
|
-
properties = eval(properties_str)
|
|
233
|
+
subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
|
|
234
|
+
properties = {'source': 'agent_created'}
|
|
239
235
|
if subj_id in nodes and obj_id in nodes:
|
|
240
236
|
subj = nodes[subj_id]
|
|
241
237
|
obj = nodes[obj_id]
|
|
@@ -13,6 +13,10 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
15
15
|
from .base_config import BaseConfig
|
|
16
|
+
from .gemini_config import (
|
|
17
|
+
Gemini_API_PARAMS,
|
|
18
|
+
GeminiConfig,
|
|
19
|
+
)
|
|
16
20
|
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
17
21
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
18
22
|
from .openai_config import (
|
|
@@ -35,4 +39,6 @@ __all__ = [
|
|
|
35
39
|
'OLLAMA_API_PARAMS',
|
|
36
40
|
'ZhipuAIConfig',
|
|
37
41
|
'ZHIPUAI_API_PARAMS',
|
|
42
|
+
'GeminiConfig',
|
|
43
|
+
'Gemini_API_PARAMS',
|
|
38
44
|
]
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from collections.abc import Iterable
|
|
17
|
+
from dataclasses import asdict, dataclass
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True)
|
|
24
|
+
class GeminiConfig(BaseConfig):
|
|
25
|
+
r"""A simple dataclass used to configure the generation parameters of
|
|
26
|
+
`GenerativeModel.generate_content`.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
candidate_count (int, optional): Number of responses to return.
|
|
30
|
+
stop_sequences (Iterable[str], optional): The set of character
|
|
31
|
+
sequences (up to 5) that will stop output generation. If specified
|
|
32
|
+
the API will stop at the first appearance of a stop sequence.
|
|
33
|
+
The stop sequence will not be included as part of the response.
|
|
34
|
+
max_output_tokens (int, optional): The maximum number of tokens to
|
|
35
|
+
include in a candidate. If unset, this will default to
|
|
36
|
+
output_token_limit specified in the model's specification.
|
|
37
|
+
temperature (float, optional): Controls the randomness of the output.
|
|
38
|
+
Note: The default value varies by model, see the
|
|
39
|
+
`Model.temperature` attribute of the `Model` returned
|
|
40
|
+
the `genai.get_model` function. Values can range from [0.0,1.0],
|
|
41
|
+
inclusive. A value closer to 1.0 will produce responses that are
|
|
42
|
+
more varied and creative, while a value closer to 0.0 will
|
|
43
|
+
typically result in more straightforward responses from the model.
|
|
44
|
+
top_p (int, optional): The maximum cumulative probability of tokens to
|
|
45
|
+
consider when sampling. The model uses combined Top-k and nucleus
|
|
46
|
+
sampling. Tokens are sorted based on their assigned probabilities
|
|
47
|
+
so that only the most likely tokens are considered. Top-k sampling
|
|
48
|
+
directly limits the maximum number of tokens to consider, while
|
|
49
|
+
Nucleus sampling limits number of tokens
|
|
50
|
+
based on the cumulative probability. Note: The default value varies
|
|
51
|
+
by model, see the `Model.top_p` attribute of the `Model` returned
|
|
52
|
+
the `genai.get_model` function.
|
|
53
|
+
top_k (int, optional): The maximum number of tokens to consider when
|
|
54
|
+
sampling. The model uses combined Top-k and nucleus sampling.Top-k
|
|
55
|
+
sampling considers the set of `top_k` most probable tokens.
|
|
56
|
+
Defaults to 40. Note: The default value varies by model, see the
|
|
57
|
+
`Model.top_k` attribute of the `Model` returned the
|
|
58
|
+
`genai.get_model` function.
|
|
59
|
+
response_mime_type (str, optional): Output response mimetype of the
|
|
60
|
+
generated candidate text. Supported mimetype:
|
|
61
|
+
`text/plain`: (default) Text output.
|
|
62
|
+
`application/json`: JSON response in the candidates.
|
|
63
|
+
response_schema (Schema, optional): Specifies the format of the
|
|
64
|
+
JSON requested if response_mime_type is `application/json`.
|
|
65
|
+
safety_settings (SafetySettingOptions, optional):
|
|
66
|
+
Overrides for the model's safety settings.
|
|
67
|
+
tools (FunctionLibraryType, optional):
|
|
68
|
+
`protos.Tools` more info coming soon.
|
|
69
|
+
tool_config (ToolConfigType, optional):
|
|
70
|
+
more info coming soon.
|
|
71
|
+
request_options (RequestOptionsType, optional):
|
|
72
|
+
Options for the request.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
from google.generativeai.protos import Schema
|
|
76
|
+
from google.generativeai.types.content_types import (
|
|
77
|
+
FunctionLibraryType,
|
|
78
|
+
ToolConfigType,
|
|
79
|
+
)
|
|
80
|
+
from google.generativeai.types.helper_types import RequestOptionsType
|
|
81
|
+
from google.generativeai.types.safety_types import SafetySettingOptions
|
|
82
|
+
|
|
83
|
+
candidate_count: Optional[int] = None
|
|
84
|
+
stop_sequences: Optional[Iterable[str]] = None
|
|
85
|
+
max_output_tokens: Optional[int] = None
|
|
86
|
+
temperature: Optional[float] = None
|
|
87
|
+
top_p: Optional[float] = None
|
|
88
|
+
top_k: Optional[int] = None
|
|
89
|
+
response_mime_type: Optional[str] = None
|
|
90
|
+
response_schema: Optional[Schema] = None
|
|
91
|
+
safety_settings: Optional[SafetySettingOptions] = None
|
|
92
|
+
tools: Optional[FunctionLibraryType] = None
|
|
93
|
+
tool_config: Optional[ToolConfigType] = None
|
|
94
|
+
request_options: Optional[RequestOptionsType] = None
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .anthropic_model import AnthropicModel
|
|
15
15
|
from .base_model import BaseModelBackend
|
|
16
|
+
from .gemini_model import GeminiModel
|
|
16
17
|
from .litellm_model import LiteLLMModel
|
|
17
18
|
from .model_factory import ModelFactory
|
|
18
19
|
from .nemotron_model import NemotronModel
|
|
@@ -35,4 +36,5 @@ __all__ = [
|
|
|
35
36
|
'OpenAIAudioModels',
|
|
36
37
|
'NemotronModel',
|
|
37
38
|
'OllamaModel',
|
|
39
|
+
'GeminiModel',
|
|
38
40
|
]
|
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
|
15
|
+
|
|
16
|
+
from camel.configs import Gemini_API_PARAMS
|
|
17
|
+
from camel.messages import OpenAIMessage
|
|
18
|
+
from camel.models import BaseModelBackend
|
|
19
|
+
from camel.types import (
|
|
20
|
+
ChatCompletion,
|
|
21
|
+
ChatCompletionMessage,
|
|
22
|
+
Choice,
|
|
23
|
+
ModelType,
|
|
24
|
+
)
|
|
25
|
+
from camel.utils import (
|
|
26
|
+
BaseTokenCounter,
|
|
27
|
+
GeminiTokenCounter,
|
|
28
|
+
api_keys_required,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
if TYPE_CHECKING:
|
|
32
|
+
from google.generativeai.types import ContentsType, GenerateContentResponse
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class GeminiModel(BaseModelBackend):
|
|
36
|
+
r"""Gemini API in a unified BaseModelBackend interface."""
|
|
37
|
+
|
|
38
|
+
# NOTE: Currently "stream": True is not supported with Gemini due to the
|
|
39
|
+
# limitation of the current camel design.
|
|
40
|
+
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
model_type: ModelType,
|
|
44
|
+
model_config_dict: Dict[str, Any],
|
|
45
|
+
api_key: Optional[str] = None,
|
|
46
|
+
url: Optional[str] = None,
|
|
47
|
+
) -> None:
|
|
48
|
+
r"""Constructor for Gemini backend.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
model_type (ModelType): Model for which a backend is created.
|
|
52
|
+
model_config_dict (Dict[str, Any]): A dictionary that will
|
|
53
|
+
be fed into generate_content().
|
|
54
|
+
api_key (Optional[str]): The API key for authenticating with the
|
|
55
|
+
gemini service. (default: :obj:`None`)
|
|
56
|
+
url (Optional[str]): The url to the gemini service.
|
|
57
|
+
"""
|
|
58
|
+
import os
|
|
59
|
+
|
|
60
|
+
import google.generativeai as genai
|
|
61
|
+
from google.generativeai.types.generation_types import GenerationConfig
|
|
62
|
+
|
|
63
|
+
super().__init__(model_type, model_config_dict, api_key, url)
|
|
64
|
+
self._api_key = api_key or os.environ.get("GOOGLE_API_KEY")
|
|
65
|
+
genai.configure(api_key=self._api_key)
|
|
66
|
+
self._client = genai.GenerativeModel(self.model_type.value)
|
|
67
|
+
self._token_counter: Optional[BaseTokenCounter] = None
|
|
68
|
+
keys = list(self.model_config_dict.keys())
|
|
69
|
+
generation_config_dict = {
|
|
70
|
+
k: self.model_config_dict.pop(k)
|
|
71
|
+
for k in keys
|
|
72
|
+
if hasattr(GenerationConfig, k)
|
|
73
|
+
}
|
|
74
|
+
generation_config = genai.types.GenerationConfig(
|
|
75
|
+
**generation_config_dict
|
|
76
|
+
)
|
|
77
|
+
self.model_config_dict["generation_config"] = generation_config
|
|
78
|
+
|
|
79
|
+
@property
|
|
80
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
81
|
+
if not self._token_counter:
|
|
82
|
+
self._token_counter = GeminiTokenCounter(self.model_type)
|
|
83
|
+
return self._token_counter
|
|
84
|
+
|
|
85
|
+
@api_keys_required("GOOGLE_API_KEY")
|
|
86
|
+
def run(
|
|
87
|
+
self,
|
|
88
|
+
messages: List[OpenAIMessage],
|
|
89
|
+
) -> ChatCompletion:
|
|
90
|
+
r"""Runs inference of Gemini model.
|
|
91
|
+
This method can handle multimodal input
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
messages: Message list or Message with the chat history
|
|
95
|
+
in OpenAi format.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
response: A ChatCompletion object formatted for the OpenAI API.
|
|
99
|
+
"""
|
|
100
|
+
response = self._client.generate_content(
|
|
101
|
+
contents=self.to_gemini_req(messages),
|
|
102
|
+
**self.model_config_dict,
|
|
103
|
+
)
|
|
104
|
+
response.resolve()
|
|
105
|
+
return self.to_openai_response(response)
|
|
106
|
+
|
|
107
|
+
def check_model_config(self):
|
|
108
|
+
r"""Check whether the model configuration contains any
|
|
109
|
+
unexpected arguments to Gemini API.
|
|
110
|
+
|
|
111
|
+
Raises:
|
|
112
|
+
ValueError: If the model configuration dictionary contains any
|
|
113
|
+
unexpected arguments to OpenAI API.
|
|
114
|
+
"""
|
|
115
|
+
if self.model_config_dict is not None:
|
|
116
|
+
for param in self.model_config_dict:
|
|
117
|
+
if param not in Gemini_API_PARAMS:
|
|
118
|
+
raise ValueError(
|
|
119
|
+
f"Unexpected argument `{param}` is "
|
|
120
|
+
"input into Gemini model backend."
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
@property
|
|
124
|
+
def stream(self) -> bool:
|
|
125
|
+
r"""Returns whether the model is in stream mode,
|
|
126
|
+
which sends partial results each time.
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
bool: Whether the model is in stream mode.
|
|
130
|
+
"""
|
|
131
|
+
return self.model_config_dict.get('stream', False)
|
|
132
|
+
|
|
133
|
+
def to_gemini_req(self, messages: List[OpenAIMessage]) -> 'ContentsType':
|
|
134
|
+
r"""Converts the request from the OpenAI API format to
|
|
135
|
+
the Gemini API request format.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
messages: The request object from the OpenAI API.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
converted_messages: A list of messages formatted for Gemini API.
|
|
142
|
+
"""
|
|
143
|
+
# role reference
|
|
144
|
+
# https://ai.google.dev/api/python/google/generativeai/protos/Content
|
|
145
|
+
converted_messages = []
|
|
146
|
+
for message in messages:
|
|
147
|
+
role = message.get('role')
|
|
148
|
+
if role == 'assistant':
|
|
149
|
+
role_to_gemini = 'model'
|
|
150
|
+
else:
|
|
151
|
+
role_to_gemini = 'user'
|
|
152
|
+
converted_message = {
|
|
153
|
+
"role": role_to_gemini,
|
|
154
|
+
"parts": message.get("content"),
|
|
155
|
+
}
|
|
156
|
+
converted_messages.append(converted_message)
|
|
157
|
+
return converted_messages
|
|
158
|
+
|
|
159
|
+
def to_openai_response(
|
|
160
|
+
self,
|
|
161
|
+
response: 'GenerateContentResponse',
|
|
162
|
+
) -> ChatCompletion:
|
|
163
|
+
r"""Converts the response from the Gemini API to the OpenAI API
|
|
164
|
+
response format.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
response: The response object returned by the Gemini API
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
openai_response: A ChatCompletion object formatted for
|
|
171
|
+
the OpenAI API.
|
|
172
|
+
"""
|
|
173
|
+
import time
|
|
174
|
+
import uuid
|
|
175
|
+
|
|
176
|
+
openai_response = ChatCompletion(
|
|
177
|
+
id=f"chatcmpl-{uuid.uuid4().hex!s}",
|
|
178
|
+
object="chat.completion",
|
|
179
|
+
created=int(time.time()),
|
|
180
|
+
model=self.model_type.value,
|
|
181
|
+
choices=[],
|
|
182
|
+
)
|
|
183
|
+
for i, candidate in enumerate(response.candidates):
|
|
184
|
+
content = ""
|
|
185
|
+
if candidate.content and len(candidate.content.parts) > 0:
|
|
186
|
+
content = candidate.content.parts[0].text
|
|
187
|
+
finish_reason = candidate.finish_reason
|
|
188
|
+
finish_reason_mapping = {
|
|
189
|
+
"FinishReason.STOP": "stop",
|
|
190
|
+
"FinishReason.SAFETY": "content_filter",
|
|
191
|
+
"FinishReason.RECITATION": "content_filter",
|
|
192
|
+
"FinishReason.MAX_TOKENS": "length",
|
|
193
|
+
}
|
|
194
|
+
finish_reason = finish_reason_mapping.get(finish_reason, "stop")
|
|
195
|
+
choice = Choice(
|
|
196
|
+
index=i,
|
|
197
|
+
message=ChatCompletionMessage(
|
|
198
|
+
role="assistant", content=content
|
|
199
|
+
),
|
|
200
|
+
finish_reason=finish_reason,
|
|
201
|
+
)
|
|
202
|
+
openai_response.choices.append(choice)
|
|
203
|
+
return openai_response
|
|
@@ -15,6 +15,7 @@ from typing import Any, Dict, Optional, Union
|
|
|
15
15
|
|
|
16
16
|
from camel.models.anthropic_model import AnthropicModel
|
|
17
17
|
from camel.models.base_model import BaseModelBackend
|
|
18
|
+
from camel.models.gemini_model import GeminiModel
|
|
18
19
|
from camel.models.litellm_model import LiteLLMModel
|
|
19
20
|
from camel.models.ollama_model import OllamaModel
|
|
20
21
|
from camel.models.open_source_model import OpenSourceModel
|
|
@@ -59,7 +60,6 @@ class ModelFactory:
|
|
|
59
60
|
BaseModelBackend: The initialized backend.
|
|
60
61
|
"""
|
|
61
62
|
model_class: Any
|
|
62
|
-
|
|
63
63
|
if isinstance(model_type, ModelType):
|
|
64
64
|
if model_platform.is_open_source and model_type.is_open_source:
|
|
65
65
|
model_class = OpenSourceModel
|
|
@@ -70,6 +70,8 @@ class ModelFactory:
|
|
|
70
70
|
model_class = AnthropicModel
|
|
71
71
|
elif model_platform.is_zhipuai and model_type.is_zhipuai:
|
|
72
72
|
model_class = ZhipuAIModel
|
|
73
|
+
elif model_platform.is_gemini and model_type.is_gemini:
|
|
74
|
+
model_class = GeminiModel
|
|
73
75
|
elif model_type == ModelType.STUB:
|
|
74
76
|
model_class = StubModel
|
|
75
77
|
else:
|
|
@@ -90,5 +92,4 @@ class ModelFactory:
|
|
|
90
92
|
)
|
|
91
93
|
else:
|
|
92
94
|
raise ValueError(f"Invalid model type `{model_type}` provided.")
|
|
93
|
-
|
|
94
95
|
return model_class(model_type, model_config_dict, api_key, url)
|
|
@@ -58,6 +58,10 @@ class ModelType(Enum):
|
|
|
58
58
|
# Nvidia models
|
|
59
59
|
NEMOTRON_4_REWARD = "nvidia/nemotron-4-340b-reward"
|
|
60
60
|
|
|
61
|
+
# Gemini models
|
|
62
|
+
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
|
63
|
+
GEMINI_1_5_PRO = "gemini-1.5-pro"
|
|
64
|
+
|
|
61
65
|
@property
|
|
62
66
|
def value_for_tiktoken(self) -> str:
|
|
63
67
|
return (
|
|
@@ -126,6 +130,10 @@ class ModelType(Enum):
|
|
|
126
130
|
ModelType.NEMOTRON_4_REWARD,
|
|
127
131
|
}
|
|
128
132
|
|
|
133
|
+
@property
|
|
134
|
+
def is_gemini(self) -> bool:
|
|
135
|
+
return self in {ModelType.GEMINI_1_5_FLASH, ModelType.GEMINI_1_5_PRO}
|
|
136
|
+
|
|
129
137
|
@property
|
|
130
138
|
def token_limit(self) -> int:
|
|
131
139
|
r"""Returns the maximum token limit for a given model.
|
|
@@ -142,6 +150,10 @@ class ModelType(Enum):
|
|
|
142
150
|
return 128000
|
|
143
151
|
elif self is ModelType.GPT_4O:
|
|
144
152
|
return 128000
|
|
153
|
+
elif self == ModelType.GEMINI_1_5_FLASH:
|
|
154
|
+
return 1048576
|
|
155
|
+
elif self == ModelType.GEMINI_1_5_PRO:
|
|
156
|
+
return 1048576
|
|
145
157
|
elif self == ModelType.GLM_4_OPEN_SOURCE:
|
|
146
158
|
return 8192
|
|
147
159
|
elif self == ModelType.GLM_3_TURBO:
|
|
@@ -331,6 +343,7 @@ class ModelPlatformType(Enum):
|
|
|
331
343
|
LITELLM = "litellm"
|
|
332
344
|
ZHIPU = "zhipuai"
|
|
333
345
|
DEFAULT = "default"
|
|
346
|
+
GEMINI = "gemini"
|
|
334
347
|
|
|
335
348
|
@property
|
|
336
349
|
def is_openai(self) -> bool:
|
|
@@ -367,6 +380,11 @@ class ModelPlatformType(Enum):
|
|
|
367
380
|
r"""Returns whether this platform is opensource."""
|
|
368
381
|
return self is ModelPlatformType.OPENSOURCE
|
|
369
382
|
|
|
383
|
+
@property
|
|
384
|
+
def is_gemini(self) -> bool:
|
|
385
|
+
r"""Returns whether this platform is Gemini."""
|
|
386
|
+
return self is ModelPlatformType.GEMINI
|
|
387
|
+
|
|
370
388
|
|
|
371
389
|
class AudioModelType(Enum):
|
|
372
390
|
TTS_1 = "tts-1"
|
|
@@ -32,6 +32,7 @@ from .constants import Constants
|
|
|
32
32
|
from .token_counting import (
|
|
33
33
|
AnthropicTokenCounter,
|
|
34
34
|
BaseTokenCounter,
|
|
35
|
+
GeminiTokenCounter,
|
|
35
36
|
LiteLLMTokenCounter,
|
|
36
37
|
OpenAITokenCounter,
|
|
37
38
|
OpenSourceTokenCounter,
|
|
@@ -60,4 +61,5 @@ __all__ = [
|
|
|
60
61
|
'dependencies_required',
|
|
61
62
|
'api_keys_required',
|
|
62
63
|
'is_docker_running',
|
|
64
|
+
'GeminiTokenCounter',
|
|
63
65
|
]
|
|
@@ -342,6 +342,40 @@ class AnthropicTokenCounter(BaseTokenCounter):
|
|
|
342
342
|
return num_tokens
|
|
343
343
|
|
|
344
344
|
|
|
345
|
+
class GeminiTokenCounter(BaseTokenCounter):
|
|
346
|
+
def __init__(self, model_type: ModelType):
|
|
347
|
+
r"""Constructor for the token counter for Gemini models."""
|
|
348
|
+
import google.generativeai as genai
|
|
349
|
+
|
|
350
|
+
self.model_type = model_type
|
|
351
|
+
self._client = genai.GenerativeModel(self.model_type.value)
|
|
352
|
+
|
|
353
|
+
def count_tokens_from_messages(self, messages: List[OpenAIMessage]) -> int:
|
|
354
|
+
r"""Count number of tokens in the provided message list using
|
|
355
|
+
loaded tokenizer specific for this type of model.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
359
|
+
in OpenAI API format.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
int: Number of tokens in the messages.
|
|
363
|
+
"""
|
|
364
|
+
converted_messages = []
|
|
365
|
+
for message in messages:
|
|
366
|
+
role = message.get('role')
|
|
367
|
+
if role == 'assistant':
|
|
368
|
+
role_to_gemini = 'model'
|
|
369
|
+
else:
|
|
370
|
+
role_to_gemini = 'user'
|
|
371
|
+
converted_message = {
|
|
372
|
+
"role": role_to_gemini,
|
|
373
|
+
"parts": message.get("content"),
|
|
374
|
+
}
|
|
375
|
+
converted_messages.append(converted_message)
|
|
376
|
+
return self._client.count_tokens(converted_messages).total_tokens
|
|
377
|
+
|
|
378
|
+
|
|
345
379
|
class LiteLLMTokenCounter:
|
|
346
380
|
def __init__(self, model_type: str):
|
|
347
381
|
r"""Constructor for the token counter for LiteLLM models.
|
|
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
|
|
|
4
4
|
|
|
5
5
|
[tool.poetry]
|
|
6
6
|
name = "camel-ai"
|
|
7
|
-
version = "0.1.5.
|
|
7
|
+
version = "0.1.5.6"
|
|
8
8
|
authors = ["CAMEL-AI.org"]
|
|
9
9
|
description = "Communicative Agents for AI Society Study"
|
|
10
10
|
readme = "README.md"
|
|
@@ -43,6 +43,7 @@ pillow ="^10.2.0"
|
|
|
43
43
|
|
|
44
44
|
# model-platforms
|
|
45
45
|
litellm = { version = "^1.38.1", optional = true }
|
|
46
|
+
google-generativeai = { version = "^0.6.0", optional = true }
|
|
46
47
|
|
|
47
48
|
# huggingface-agent
|
|
48
49
|
transformers = { version = "^4", optional = true }
|
|
@@ -104,6 +105,7 @@ test = ["pytest", "mock", "pytest-asyncio"]
|
|
|
104
105
|
|
|
105
106
|
model-platforms = [
|
|
106
107
|
"litellm",
|
|
108
|
+
"google-generativeai",
|
|
107
109
|
]
|
|
108
110
|
|
|
109
111
|
huggingface-agent = [
|
|
@@ -210,6 +212,7 @@ all = [
|
|
|
210
212
|
"litellm",
|
|
211
213
|
# kv-storages
|
|
212
214
|
"redis",
|
|
215
|
+
"google-generativeai",
|
|
213
216
|
]
|
|
214
217
|
|
|
215
218
|
[tool.poetry.group.dev]
|
|
@@ -335,5 +338,7 @@ module = [
|
|
|
335
338
|
"pyTelegramBotAPI",
|
|
336
339
|
"discord.py",
|
|
337
340
|
"docker.*",
|
|
341
|
+
"google.*",
|
|
342
|
+
"google-generativeai",
|
|
338
343
|
]
|
|
339
344
|
ignore_missing_imports = true
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/create_qr_code/__init__.py
RENAMED
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/create_qr_code/openapi.yaml
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/ai-plugin.json
RENAMED
|
File without changes
|
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/__init__.py
RENAMED
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/get_classes.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/__init__.py
RENAMED
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/ai-plugin.json
RENAMED
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/openapi.yaml
RENAMED
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/paths/__init__.py
RENAMED
|
File without changes
|
{camel_ai-0.1.5.5 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/paths/scraper.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|