camel-ai 0.1.5.4__tar.gz → 0.1.5.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/PKG-INFO +12 -3
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/README.md +5 -1
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/__init__.py +1 -1
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/knowledge_graph_agent.py +11 -15
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/task_agent.py +0 -1
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/configs/__init__.py +12 -0
- camel_ai-0.1.5.6/camel/configs/gemini_config.py +97 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/configs/litellm_config.py +8 -18
- camel_ai-0.1.5.6/camel/configs/ollama_config.py +85 -0
- camel_ai-0.1.5.6/camel/configs/zhipuai_config.py +78 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/embeddings/openai_embedding.py +2 -2
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/search_functions.py +5 -14
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/slack_functions.py +5 -7
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/twitter_function.py +3 -8
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/weather_functions.py +3 -8
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/interpreters/__init__.py +2 -0
- camel_ai-0.1.5.6/camel/interpreters/docker_interpreter.py +235 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/loaders/__init__.py +2 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/loaders/base_io.py +5 -9
- camel_ai-0.1.5.6/camel/loaders/jina_url_reader.py +99 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/loaders/unstructured_io.py +4 -6
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/__init__.py +2 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/anthropic_model.py +6 -4
- camel_ai-0.1.5.6/camel/models/gemini_model.py +203 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/litellm_model.py +49 -21
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/model_factory.py +4 -2
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/nemotron_model.py +14 -6
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/ollama_model.py +11 -17
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/openai_audio_models.py +10 -2
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/openai_model.py +4 -3
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/zhipuai_model.py +12 -6
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/retrievers/bm25_retriever.py +3 -8
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/retrievers/cohere_rerank_retriever.py +3 -5
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/__init__.py +2 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/graph_storages/neo4j_graph.py +3 -7
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/__init__.py +2 -0
- camel_ai-0.1.5.6/camel/storages/key_value_storages/redis.py +169 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/milvus.py +3 -7
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/qdrant.py +3 -7
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/toolkits/__init__.py +2 -0
- camel_ai-0.1.5.6/camel/toolkits/code_execution.py +69 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/toolkits/github_toolkit.py +5 -9
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/types/enums.py +53 -1
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/utils/__init__.py +4 -2
- camel_ai-0.1.5.6/camel/utils/async_func.py +42 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/utils/commons.py +31 -49
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/utils/token_counting.py +74 -1
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/pyproject.toml +23 -4
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/chat_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/critic_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/deductive_reasoner_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/embodied_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/role_assignment_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/search_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/tool_agents/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/tool_agents/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/agents/tool_agents/hugging_face_tool_agent.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/configs/anthropic_config.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/configs/base_config.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/configs/openai_config.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/embeddings/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/embeddings/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/embeddings/sentence_transformers_embeddings.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/embeddings/vlm_embedding.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/google_maps_function.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/math_functions.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_function.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/biztoc/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/biztoc/ai-plugin.json +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/biztoc/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/coursera/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/coursera/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/create_qr_code/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/create_qr_code/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/klarna/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/klarna/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/nasa_apod/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/nasa_apod/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/ai-plugin.json +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/get_classes.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/outschool/paths/search_teachers.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/security_config.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/speak/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/speak/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/ai-plugin.json +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/openapi.yaml +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/paths/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/open_api_specs/web_scraper/paths/scraper.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/openai_function.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/functions/retrieval_functions.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/generators.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/human.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/interpreters/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/interpreters/internal_python_interpreter.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/interpreters/interpreter_error.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/interpreters/subprocess_interpreter.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/agent_memories.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/blocks/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/blocks/chat_history_block.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/blocks/vectordb_block.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/context_creators/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/context_creators/score_based.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/memories/records.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/messages/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/messages/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/messages/func_message.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/base_model.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/open_source_model.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/models/stub_model.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/ai_society.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/code.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/descripte_video_prompt.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/evaluation.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/generate_text_embedding_data.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/misalignment.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/object_recognition.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/prompt_templates.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/role_description_prompt_template.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/solution_extraction.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/task_prompt_template.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/prompts/translation.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/responses/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/responses/agent_responses.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/retrievers/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/retrievers/auto_retriever.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/retrievers/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/retrievers/vector_retriever.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/societies/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/societies/babyagi_playing.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/societies/role_playing.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/graph_storages/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/graph_storages/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/graph_storages/graph_element.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/in_memory.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/key_value_storages/json.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/storages/vectordb_storages/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/terminators/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/terminators/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/terminators/response_terminator.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/terminators/token_limit_terminator.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/toolkits/base.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/types/__init__.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/types/openai_types.py +0 -0
- {camel_ai-0.1.5.4 → camel_ai-0.1.5.6}/camel/utils/constants.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.1.5.
|
|
3
|
+
Version: 0.1.5.6
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Home-page: https://www.camel-ai.org/
|
|
6
6
|
License: Apache-2.0
|
|
@@ -16,6 +16,7 @@ Provides-Extra: all
|
|
|
16
16
|
Provides-Extra: encoders
|
|
17
17
|
Provides-Extra: graph-storages
|
|
18
18
|
Provides-Extra: huggingface-agent
|
|
19
|
+
Provides-Extra: kv-stroages
|
|
19
20
|
Provides-Extra: model-platforms
|
|
20
21
|
Provides-Extra: retrievers
|
|
21
22
|
Provides-Extra: test
|
|
@@ -31,13 +32,16 @@ Requires-Dist: curl_cffi (==0.6.2)
|
|
|
31
32
|
Requires-Dist: datasets (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
|
|
32
33
|
Requires-Dist: diffusers (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
33
34
|
Requires-Dist: discord.py (>=2.3.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
35
|
+
Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "tools" or extra == "all"
|
|
34
36
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
35
37
|
Requires-Dist: docx2txt (>=0.8,<0.9) ; extra == "tools" or extra == "all"
|
|
36
38
|
Requires-Dist: duckduckgo-search (>=6.1.0,<7.0.0) ; extra == "tools" or extra == "all"
|
|
39
|
+
Requires-Dist: google-generativeai (>=0.6.0,<0.7.0) ; extra == "model-platforms" or extra == "all"
|
|
37
40
|
Requires-Dist: googlemaps (>=4.10.0,<5.0.0) ; extra == "tools" or extra == "all"
|
|
38
41
|
Requires-Dist: imageio[pyav] (>=2.34.2,<3.0.0) ; extra == "tools" or extra == "all"
|
|
39
42
|
Requires-Dist: jsonschema (>=4,<5)
|
|
40
43
|
Requires-Dist: litellm (>=1.38.1,<2.0.0) ; extra == "model-platforms" or extra == "all"
|
|
44
|
+
Requires-Dist: milvus-lite (>=2.4.0,<=2.4.7)
|
|
41
45
|
Requires-Dist: mock (>=5,<6) ; extra == "test"
|
|
42
46
|
Requires-Dist: neo4j (>=5.18.0,<6.0.0) ; extra == "graph-storages" or extra == "all"
|
|
43
47
|
Requires-Dist: newspaper3k (>=0.2.8,<0.3.0) ; extra == "tools" or extra == "all"
|
|
@@ -59,13 +63,14 @@ Requires-Dist: pytest (>=7,<8) ; extra == "test"
|
|
|
59
63
|
Requires-Dist: pytest-asyncio (>=0.23.0,<0.24.0) ; extra == "test"
|
|
60
64
|
Requires-Dist: qdrant-client (>=1.9.0,<2.0.0) ; extra == "vector-databases" or extra == "all"
|
|
61
65
|
Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0) ; extra == "retrievers" or extra == "all"
|
|
66
|
+
Requires-Dist: redis (>=5.0.6,<6.0.0) ; extra == "kv-stroages" or extra == "all"
|
|
62
67
|
Requires-Dist: requests_oauthlib (>=1.3.1,<2.0.0) ; extra == "tools" or extra == "all"
|
|
63
68
|
Requires-Dist: sentence-transformers (>=3.0.1,<4.0.0) ; extra == "encoders" or extra == "all"
|
|
64
69
|
Requires-Dist: sentencepiece (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
65
70
|
Requires-Dist: slack-sdk (>=3.27.2,<4.0.0) ; extra == "tools" or extra == "all"
|
|
66
71
|
Requires-Dist: soundfile (>=0,<1) ; extra == "huggingface-agent" or extra == "all"
|
|
67
72
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
|
68
|
-
Requires-Dist: torch (>=
|
|
73
|
+
Requires-Dist: torch (>=2,<3) ; extra == "huggingface-agent" or extra == "all"
|
|
69
74
|
Requires-Dist: transformers (>=4,<5) ; extra == "huggingface-agent" or extra == "all"
|
|
70
75
|
Requires-Dist: unstructured[all-docs] (>=0.10.30,<0.11.0) ; extra == "tools" or extra == "all"
|
|
71
76
|
Requires-Dist: wikipedia (>=1,<2) ; extra == "tools" or extra == "all"
|
|
@@ -186,7 +191,7 @@ conda create --name camel python=3.9
|
|
|
186
191
|
conda activate camel
|
|
187
192
|
|
|
188
193
|
# Clone github repo
|
|
189
|
-
git clone -b v0.1.5.
|
|
194
|
+
git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
|
|
190
195
|
|
|
191
196
|
# Change directory into project directory
|
|
192
197
|
cd camel
|
|
@@ -198,6 +203,10 @@ pip install -e .
|
|
|
198
203
|
pip install -e .[all] # (Optional)
|
|
199
204
|
```
|
|
200
205
|
|
|
206
|
+
### From Docker
|
|
207
|
+
|
|
208
|
+
Detailed guidance can be find [here](https://github.com/camel-ai/camel/blob/master/.container/README.md)
|
|
209
|
+
|
|
201
210
|
## Documentation
|
|
202
211
|
|
|
203
212
|
[CAMEL package documentation pages](https://camel-ai.github.io/camel/).
|
|
@@ -110,7 +110,7 @@ conda create --name camel python=3.9
|
|
|
110
110
|
conda activate camel
|
|
111
111
|
|
|
112
112
|
# Clone github repo
|
|
113
|
-
git clone -b v0.1.5.
|
|
113
|
+
git clone -b v0.1.5.6 https://github.com/camel-ai/camel.git
|
|
114
114
|
|
|
115
115
|
# Change directory into project directory
|
|
116
116
|
cd camel
|
|
@@ -122,6 +122,10 @@ pip install -e .
|
|
|
122
122
|
pip install -e .[all] # (Optional)
|
|
123
123
|
```
|
|
124
124
|
|
|
125
|
+
### From Docker
|
|
126
|
+
|
|
127
|
+
Detailed guidance can be find [here](https://github.com/camel-ai/camel/blob/master/.container/README.md)
|
|
128
|
+
|
|
125
129
|
## Documentation
|
|
126
130
|
|
|
127
131
|
[CAMEL package documentation pages](https://camel-ai.github.io/camel/).
|
|
@@ -78,17 +78,16 @@ Expected Output:
|
|
|
78
78
|
|
|
79
79
|
Nodes:
|
|
80
80
|
|
|
81
|
-
Node(id='John', type='Person'
|
|
82
|
-
Node(id='XYZ Corporation', type='Organization'
|
|
83
|
-
Node(id='New York City', type='Location'
|
|
81
|
+
Node(id='John', type='Person')
|
|
82
|
+
Node(id='XYZ Corporation', type='Organization')
|
|
83
|
+
Node(id='New York City', type='Location')
|
|
84
84
|
|
|
85
85
|
Relationships:
|
|
86
86
|
|
|
87
87
|
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='XYZ
|
|
88
|
-
Corporation', type='Organization'), type='WorksAt'
|
|
89
|
-
{'agent_generated'})
|
|
88
|
+
Corporation', type='Organization'), type='WorksAt')
|
|
90
89
|
Relationship(subj=Node(id='John', type='Person'), obj=Node(id='New York City',
|
|
91
|
-
type='Location'), type='ResidesIn'
|
|
90
|
+
type='Location'), type='ResidesIn')
|
|
92
91
|
|
|
93
92
|
===== TASK =====
|
|
94
93
|
Please extracts nodes and relationships from given content and structures them
|
|
@@ -211,11 +210,10 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
211
210
|
import re
|
|
212
211
|
|
|
213
212
|
# Regular expressions to extract nodes and relationships
|
|
214
|
-
node_pattern = r"Node\(id='(.*?)', type='(.*?)'
|
|
213
|
+
node_pattern = r"Node\(id='(.*?)', type='(.*?)'\)"
|
|
215
214
|
rel_pattern = (
|
|
216
215
|
r"Relationship\(subj=Node\(id='(.*?)', type='(.*?)'\), "
|
|
217
|
-
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'
|
|
218
|
-
r"properties=\{(.*?)\}\)"
|
|
216
|
+
r"obj=Node\(id='(.*?)', type='(.*?)'\), type='(.*?)'\)"
|
|
219
217
|
)
|
|
220
218
|
|
|
221
219
|
nodes = {}
|
|
@@ -223,8 +221,8 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
223
221
|
|
|
224
222
|
# Extract nodes
|
|
225
223
|
for match in re.finditer(node_pattern, input_string):
|
|
226
|
-
id, type
|
|
227
|
-
properties =
|
|
224
|
+
id, type = match.groups()
|
|
225
|
+
properties = {'source': 'agent_created'}
|
|
228
226
|
if id not in nodes:
|
|
229
227
|
node = Node(id, type, properties)
|
|
230
228
|
if self._validate_node(node):
|
|
@@ -232,10 +230,8 @@ class KnowledgeGraphAgent(ChatAgent):
|
|
|
232
230
|
|
|
233
231
|
# Extract relationships
|
|
234
232
|
for match in re.finditer(rel_pattern, input_string):
|
|
235
|
-
subj_id, subj_type, obj_id, obj_type, rel_type
|
|
236
|
-
|
|
237
|
-
)
|
|
238
|
-
properties = eval(properties_str)
|
|
233
|
+
subj_id, subj_type, obj_id, obj_type, rel_type = match.groups()
|
|
234
|
+
properties = {'source': 'agent_created'}
|
|
239
235
|
if subj_id in nodes and obj_id in nodes:
|
|
240
236
|
subj = nodes[subj_id]
|
|
241
237
|
obj = nodes[obj_id]
|
|
@@ -13,12 +13,18 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
15
15
|
from .base_config import BaseConfig
|
|
16
|
+
from .gemini_config import (
|
|
17
|
+
Gemini_API_PARAMS,
|
|
18
|
+
GeminiConfig,
|
|
19
|
+
)
|
|
16
20
|
from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
21
|
+
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
17
22
|
from .openai_config import (
|
|
18
23
|
OPENAI_API_PARAMS,
|
|
19
24
|
ChatGPTConfig,
|
|
20
25
|
OpenSourceConfig,
|
|
21
26
|
)
|
|
27
|
+
from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
|
|
22
28
|
|
|
23
29
|
__all__ = [
|
|
24
30
|
'BaseConfig',
|
|
@@ -29,4 +35,10 @@ __all__ = [
|
|
|
29
35
|
'OpenSourceConfig',
|
|
30
36
|
'LiteLLMConfig',
|
|
31
37
|
'LITELLM_API_PARAMS',
|
|
38
|
+
'OllamaConfig',
|
|
39
|
+
'OLLAMA_API_PARAMS',
|
|
40
|
+
'ZhipuAIConfig',
|
|
41
|
+
'ZHIPUAI_API_PARAMS',
|
|
42
|
+
'GeminiConfig',
|
|
43
|
+
'Gemini_API_PARAMS',
|
|
32
44
|
]
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from collections.abc import Iterable
|
|
17
|
+
from dataclasses import asdict, dataclass
|
|
18
|
+
from typing import Optional
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True)
|
|
24
|
+
class GeminiConfig(BaseConfig):
|
|
25
|
+
r"""A simple dataclass used to configure the generation parameters of
|
|
26
|
+
`GenerativeModel.generate_content`.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
candidate_count (int, optional): Number of responses to return.
|
|
30
|
+
stop_sequences (Iterable[str], optional): The set of character
|
|
31
|
+
sequences (up to 5) that will stop output generation. If specified
|
|
32
|
+
the API will stop at the first appearance of a stop sequence.
|
|
33
|
+
The stop sequence will not be included as part of the response.
|
|
34
|
+
max_output_tokens (int, optional): The maximum number of tokens to
|
|
35
|
+
include in a candidate. If unset, this will default to
|
|
36
|
+
output_token_limit specified in the model's specification.
|
|
37
|
+
temperature (float, optional): Controls the randomness of the output.
|
|
38
|
+
Note: The default value varies by model, see the
|
|
39
|
+
`Model.temperature` attribute of the `Model` returned
|
|
40
|
+
the `genai.get_model` function. Values can range from [0.0,1.0],
|
|
41
|
+
inclusive. A value closer to 1.0 will produce responses that are
|
|
42
|
+
more varied and creative, while a value closer to 0.0 will
|
|
43
|
+
typically result in more straightforward responses from the model.
|
|
44
|
+
top_p (int, optional): The maximum cumulative probability of tokens to
|
|
45
|
+
consider when sampling. The model uses combined Top-k and nucleus
|
|
46
|
+
sampling. Tokens are sorted based on their assigned probabilities
|
|
47
|
+
so that only the most likely tokens are considered. Top-k sampling
|
|
48
|
+
directly limits the maximum number of tokens to consider, while
|
|
49
|
+
Nucleus sampling limits number of tokens
|
|
50
|
+
based on the cumulative probability. Note: The default value varies
|
|
51
|
+
by model, see the `Model.top_p` attribute of the `Model` returned
|
|
52
|
+
the `genai.get_model` function.
|
|
53
|
+
top_k (int, optional): The maximum number of tokens to consider when
|
|
54
|
+
sampling. The model uses combined Top-k and nucleus sampling.Top-k
|
|
55
|
+
sampling considers the set of `top_k` most probable tokens.
|
|
56
|
+
Defaults to 40. Note: The default value varies by model, see the
|
|
57
|
+
`Model.top_k` attribute of the `Model` returned the
|
|
58
|
+
`genai.get_model` function.
|
|
59
|
+
response_mime_type (str, optional): Output response mimetype of the
|
|
60
|
+
generated candidate text. Supported mimetype:
|
|
61
|
+
`text/plain`: (default) Text output.
|
|
62
|
+
`application/json`: JSON response in the candidates.
|
|
63
|
+
response_schema (Schema, optional): Specifies the format of the
|
|
64
|
+
JSON requested if response_mime_type is `application/json`.
|
|
65
|
+
safety_settings (SafetySettingOptions, optional):
|
|
66
|
+
Overrides for the model's safety settings.
|
|
67
|
+
tools (FunctionLibraryType, optional):
|
|
68
|
+
`protos.Tools` more info coming soon.
|
|
69
|
+
tool_config (ToolConfigType, optional):
|
|
70
|
+
more info coming soon.
|
|
71
|
+
request_options (RequestOptionsType, optional):
|
|
72
|
+
Options for the request.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
from google.generativeai.protos import Schema
|
|
76
|
+
from google.generativeai.types.content_types import (
|
|
77
|
+
FunctionLibraryType,
|
|
78
|
+
ToolConfigType,
|
|
79
|
+
)
|
|
80
|
+
from google.generativeai.types.helper_types import RequestOptionsType
|
|
81
|
+
from google.generativeai.types.safety_types import SafetySettingOptions
|
|
82
|
+
|
|
83
|
+
candidate_count: Optional[int] = None
|
|
84
|
+
stop_sequences: Optional[Iterable[str]] = None
|
|
85
|
+
max_output_tokens: Optional[int] = None
|
|
86
|
+
temperature: Optional[float] = None
|
|
87
|
+
top_p: Optional[float] = None
|
|
88
|
+
top_k: Optional[int] = None
|
|
89
|
+
response_mime_type: Optional[str] = None
|
|
90
|
+
response_schema: Optional[Schema] = None
|
|
91
|
+
safety_settings: Optional[SafetySettingOptions] = None
|
|
92
|
+
tools: Optional[FunctionLibraryType] = None
|
|
93
|
+
tool_config: Optional[ToolConfigType] = None
|
|
94
|
+
request_options: Optional[RequestOptionsType] = None
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
Gemini_API_PARAMS = {param for param in asdict(GeminiConfig()).keys()}
|
|
@@ -13,11 +13,14 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from dataclasses import asdict, dataclass
|
|
17
|
-
from typing import List, Optional, Union
|
|
16
|
+
from dataclasses import asdict, dataclass
|
|
17
|
+
from typing import TYPE_CHECKING, List, Optional, Union
|
|
18
18
|
|
|
19
19
|
from camel.configs.base_config import BaseConfig
|
|
20
20
|
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from camel.functions import OpenAIFunction
|
|
23
|
+
|
|
21
24
|
|
|
22
25
|
@dataclass(frozen=True)
|
|
23
26
|
class LiteLLMConfig(BaseConfig):
|
|
@@ -25,9 +28,6 @@ class LiteLLMConfig(BaseConfig):
|
|
|
25
28
|
LiteLLM API.
|
|
26
29
|
|
|
27
30
|
Args:
|
|
28
|
-
model (str): The name of the language model to use for text completion.
|
|
29
|
-
messages (List): A list of message objects representing the
|
|
30
|
-
conversation context. (default: [])
|
|
31
31
|
timeout (Optional[Union[float, str]], optional): Request timeout.
|
|
32
32
|
(default: None)
|
|
33
33
|
temperature (Optional[float], optional): Temperature parameter for
|
|
@@ -65,12 +65,7 @@ class LiteLLMConfig(BaseConfig):
|
|
|
65
65
|
deployment_id (Optional[str], optional): Deployment ID. (default: None)
|
|
66
66
|
extra_headers (Optional[dict], optional): Additional headers for the
|
|
67
67
|
request. (default: None)
|
|
68
|
-
base_url (Optional[str], optional): Base URL for the API. (default:
|
|
69
|
-
None)
|
|
70
68
|
api_version (Optional[str], optional): API version. (default: None)
|
|
71
|
-
api_key (Optional[str], optional): API key. (default: None)
|
|
72
|
-
model_list (Optional[list], optional): List of API base, version,
|
|
73
|
-
keys. (default: None)
|
|
74
69
|
mock_response (Optional[str], optional): Mock completion response for
|
|
75
70
|
testing or debugging. (default: None)
|
|
76
71
|
custom_llm_provider (Optional[str], optional): Non-OpenAI LLM
|
|
@@ -79,8 +74,6 @@ class LiteLLMConfig(BaseConfig):
|
|
|
79
74
|
(default: None)
|
|
80
75
|
"""
|
|
81
76
|
|
|
82
|
-
model: str = "gpt-3.5-turbo"
|
|
83
|
-
messages: List = field(default_factory=list)
|
|
84
77
|
timeout: Optional[Union[float, str]] = None
|
|
85
78
|
temperature: Optional[float] = None
|
|
86
79
|
top_p: Optional[float] = None
|
|
@@ -91,20 +84,17 @@ class LiteLLMConfig(BaseConfig):
|
|
|
91
84
|
max_tokens: Optional[int] = None
|
|
92
85
|
presence_penalty: Optional[float] = None
|
|
93
86
|
frequency_penalty: Optional[float] = None
|
|
94
|
-
logit_bias: Optional[dict] =
|
|
87
|
+
logit_bias: Optional[dict] = None
|
|
95
88
|
user: Optional[str] = None
|
|
96
89
|
response_format: Optional[dict] = None
|
|
97
90
|
seed: Optional[int] = None
|
|
98
|
-
tools: Optional[
|
|
91
|
+
tools: Optional[list[OpenAIFunction]] = None
|
|
99
92
|
tool_choice: Optional[Union[str, dict]] = None
|
|
100
93
|
logprobs: Optional[bool] = None
|
|
101
94
|
top_logprobs: Optional[int] = None
|
|
102
95
|
deployment_id: Optional[str] = None
|
|
103
|
-
extra_headers: Optional[dict] =
|
|
104
|
-
base_url: Optional[str] = None
|
|
96
|
+
extra_headers: Optional[dict] = None
|
|
105
97
|
api_version: Optional[str] = None
|
|
106
|
-
api_key: Optional[str] = None
|
|
107
|
-
model_list: Optional[list] = field(default_factory=list)
|
|
108
98
|
mock_response: Optional[str] = None
|
|
109
99
|
custom_llm_provider: Optional[str] = None
|
|
110
100
|
max_retries: Optional[int] = None
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from dataclasses import asdict, dataclass
|
|
17
|
+
from typing import Sequence
|
|
18
|
+
|
|
19
|
+
from openai._types import NOT_GIVEN, NotGiven
|
|
20
|
+
|
|
21
|
+
from camel.configs.base_config import BaseConfig
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass(frozen=True)
|
|
25
|
+
class OllamaConfig(BaseConfig):
|
|
26
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
27
|
+
compatibility
|
|
28
|
+
|
|
29
|
+
Reference: https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
33
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
34
|
+
while lower values make it more focused and deterministic.
|
|
35
|
+
(default: :obj:`0.2`)
|
|
36
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
37
|
+
called nucleus sampling, where the model considers the results of
|
|
38
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
39
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
40
|
+
(default: :obj:`1.0`)
|
|
41
|
+
response_format (object, optional): An object specifying the format
|
|
42
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
43
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
44
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
45
|
+
message the model generates is valid JSON. Important: when using
|
|
46
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
47
|
+
yourself via a system or user message. Without this, the model
|
|
48
|
+
may generate an unending stream of whitespace until the generation
|
|
49
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
50
|
+
"stuck" request. Also note that the message content may be
|
|
51
|
+
partially cut off if finish_reason="length", which indicates the
|
|
52
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
53
|
+
max context length.
|
|
54
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
55
|
+
as data-only server-sent events as they become available.
|
|
56
|
+
(default: :obj:`False`)
|
|
57
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
58
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
59
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
60
|
+
in the chat completion. The total length of input tokens and
|
|
61
|
+
generated tokens is limited by the model's context length.
|
|
62
|
+
(default: :obj:`None`)
|
|
63
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
64
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
65
|
+
they appear in the text so far, increasing the model's likelihood
|
|
66
|
+
to talk about new topics. See more information about frequency and
|
|
67
|
+
presence penalties. (default: :obj:`0.0`)
|
|
68
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
69
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
70
|
+
existing frequency in the text so far, decreasing the model's
|
|
71
|
+
likelihood to repeat the same line verbatim. See more information
|
|
72
|
+
about frequency and presence penalties. (default: :obj:`0.0`)
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
temperature: float = 0.2
|
|
76
|
+
top_p: float = 1.0
|
|
77
|
+
stream: bool = False
|
|
78
|
+
stop: str | Sequence[str] | NotGiven = NOT_GIVEN
|
|
79
|
+
max_tokens: int | NotGiven = NOT_GIVEN
|
|
80
|
+
presence_penalty: float = 0.0
|
|
81
|
+
response_format: dict | NotGiven = NOT_GIVEN
|
|
82
|
+
frequency_penalty: float = 0.0
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
OLLAMA_API_PARAMS = {param for param in asdict(OllamaConfig()).keys()}
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from dataclasses import asdict, dataclass
|
|
17
|
+
from typing import TYPE_CHECKING, Optional, Sequence
|
|
18
|
+
|
|
19
|
+
from openai._types import NOT_GIVEN, NotGiven
|
|
20
|
+
|
|
21
|
+
from camel.configs.base_config import BaseConfig
|
|
22
|
+
|
|
23
|
+
if TYPE_CHECKING:
|
|
24
|
+
from camel.functions import OpenAIFunction
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass(frozen=True)
|
|
28
|
+
class ZhipuAIConfig(BaseConfig):
|
|
29
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
30
|
+
compatibility
|
|
31
|
+
|
|
32
|
+
Reference: https://open.bigmodel.cn/dev/api#glm-4v
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
36
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
37
|
+
while lower values make it more focused and deterministic.
|
|
38
|
+
(default: :obj:`0.2`)
|
|
39
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
40
|
+
called nucleus sampling, where the model considers the results of
|
|
41
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
42
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
43
|
+
(default: :obj:`0.6`)
|
|
44
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
45
|
+
as data-only server-sent events as they become available.
|
|
46
|
+
(default: :obj:`False`)
|
|
47
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
48
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
49
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
50
|
+
in the chat completion. The total length of input tokens and
|
|
51
|
+
generated tokens is limited by the model's context length.
|
|
52
|
+
(default: :obj:`None`)
|
|
53
|
+
tools (list[OpenAIFunction], optional): A list of tools the model may
|
|
54
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
55
|
+
to provide a list of functions the model may generate JSON inputs
|
|
56
|
+
for. A max of 128 functions are supported.
|
|
57
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
58
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
59
|
+
will not call any tool and instead generates a message.
|
|
60
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
61
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
62
|
+
model must call one or more tools. Specifying a particular tool
|
|
63
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
64
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
65
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
66
|
+
are present.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
temperature: float = 0.2
|
|
70
|
+
top_p: float = 0.6
|
|
71
|
+
stream: bool = False
|
|
72
|
+
stop: str | Sequence[str] | NotGiven = NOT_GIVEN
|
|
73
|
+
max_tokens: int | NotGiven = NOT_GIVEN
|
|
74
|
+
tools: Optional[list[OpenAIFunction]] = None
|
|
75
|
+
tool_choice: Optional[dict[str, str] | str] = None
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
ZHIPUAI_API_PARAMS = {param for param in asdict(ZhipuAIConfig()).keys()}
|
|
@@ -20,7 +20,7 @@ from openai import NOT_GIVEN, NotGiven, OpenAI
|
|
|
20
20
|
|
|
21
21
|
from camel.embeddings.base import BaseEmbedding
|
|
22
22
|
from camel.types import EmbeddingModelType
|
|
23
|
-
from camel.utils import
|
|
23
|
+
from camel.utils import api_keys_required
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
class OpenAIEmbedding(BaseEmbedding[str]):
|
|
@@ -58,7 +58,7 @@ class OpenAIEmbedding(BaseEmbedding[str]):
|
|
|
58
58
|
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
59
59
|
self.client = OpenAI(timeout=60, max_retries=3, api_key=self._api_key)
|
|
60
60
|
|
|
61
|
-
@
|
|
61
|
+
@api_keys_required("OPENAI_API_KEY")
|
|
62
62
|
def embed_list(
|
|
63
63
|
self,
|
|
64
64
|
objs: list[str],
|
|
@@ -15,8 +15,10 @@ import os
|
|
|
15
15
|
from typing import Any, Dict, List
|
|
16
16
|
|
|
17
17
|
from camel.functions.openai_function import OpenAIFunction
|
|
18
|
+
from camel.utils import dependencies_required
|
|
18
19
|
|
|
19
20
|
|
|
21
|
+
@dependencies_required('wikipedia')
|
|
20
22
|
def search_wiki(entity: str) -> str:
|
|
21
23
|
r"""Search the entity in WikiPedia and return the summary of the required
|
|
22
24
|
page, containing factual information about the given entity.
|
|
@@ -28,13 +30,7 @@ def search_wiki(entity: str) -> str:
|
|
|
28
30
|
str: The search result. If the page corresponding to the entity
|
|
29
31
|
exists, return the summary of this entity in a string.
|
|
30
32
|
"""
|
|
31
|
-
|
|
32
|
-
import wikipedia
|
|
33
|
-
except ImportError:
|
|
34
|
-
raise ImportError(
|
|
35
|
-
"Please install `wikipedia` first. You can install it by running "
|
|
36
|
-
"`pip install wikipedia`."
|
|
37
|
-
)
|
|
33
|
+
import wikipedia
|
|
38
34
|
|
|
39
35
|
result: str
|
|
40
36
|
|
|
@@ -241,6 +237,7 @@ def search_google(
|
|
|
241
237
|
return responses
|
|
242
238
|
|
|
243
239
|
|
|
240
|
+
@dependencies_required('wolframalpha')
|
|
244
241
|
def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
|
|
245
242
|
r"""Queries Wolfram|Alpha and returns the result. Wolfram|Alpha is an
|
|
246
243
|
answer engine developed by Wolfram Research. It is offered as an online
|
|
@@ -255,13 +252,7 @@ def query_wolfram_alpha(query: str, is_detailed: bool) -> str:
|
|
|
255
252
|
Returns:
|
|
256
253
|
str: The result from Wolfram Alpha, formatted as a string.
|
|
257
254
|
"""
|
|
258
|
-
|
|
259
|
-
import wolframalpha
|
|
260
|
-
except ImportError:
|
|
261
|
-
raise ImportError(
|
|
262
|
-
"Please install `wolframalpha` first. You can install it by"
|
|
263
|
-
" running `pip install wolframalpha`."
|
|
264
|
-
)
|
|
255
|
+
import wolframalpha
|
|
265
256
|
|
|
266
257
|
WOLFRAMALPHA_APP_ID = os.environ.get('WOLFRAMALPHA_APP_ID')
|
|
267
258
|
if not WOLFRAMALPHA_APP_ID:
|
|
@@ -19,6 +19,8 @@ import logging
|
|
|
19
19
|
import os
|
|
20
20
|
from typing import TYPE_CHECKING, List, Optional
|
|
21
21
|
|
|
22
|
+
from camel.utils import dependencies_required
|
|
23
|
+
|
|
22
24
|
if TYPE_CHECKING:
|
|
23
25
|
from ssl import SSLContext
|
|
24
26
|
|
|
@@ -29,6 +31,7 @@ from camel.functions import OpenAIFunction
|
|
|
29
31
|
logger = logging.getLogger(__name__)
|
|
30
32
|
|
|
31
33
|
|
|
34
|
+
@dependencies_required('slack_sdk')
|
|
32
35
|
def _login_slack(
|
|
33
36
|
slack_token: Optional[str] = None,
|
|
34
37
|
ssl: Optional[SSLContext] = None,
|
|
@@ -50,13 +53,8 @@ def _login_slack(
|
|
|
50
53
|
KeyError: If SLACK_BOT_TOKEN or SLACK_USER_TOKEN environment variables
|
|
51
54
|
are not set.
|
|
52
55
|
"""
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
except ImportError as e:
|
|
56
|
-
raise ImportError(
|
|
57
|
-
"Cannot import slack_sdk. Please install the package with \
|
|
58
|
-
`pip install slack_sdk`."
|
|
59
|
-
) from e
|
|
56
|
+
from slack_sdk import WebClient
|
|
57
|
+
|
|
60
58
|
if not slack_token:
|
|
61
59
|
slack_token = os.environ.get("SLACK_BOT_TOKEN") or os.environ.get(
|
|
62
60
|
"SLACK_USER_TOKEN"
|
|
@@ -20,6 +20,7 @@ from typing import List, Optional, Tuple, Union
|
|
|
20
20
|
import requests
|
|
21
21
|
|
|
22
22
|
from camel.functions import OpenAIFunction
|
|
23
|
+
from camel.utils import dependencies_required
|
|
23
24
|
|
|
24
25
|
TWEET_TEXT_LIMIT = 280
|
|
25
26
|
|
|
@@ -55,6 +56,7 @@ def get_twitter_api_key() -> Tuple[str, str]:
|
|
|
55
56
|
return TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET
|
|
56
57
|
|
|
57
58
|
|
|
59
|
+
@dependencies_required('requests_oauthlib')
|
|
58
60
|
def get_oauth_session() -> requests.Session:
|
|
59
61
|
r'''Initiates an OAuth1Session with Twitter's API and returns it.
|
|
60
62
|
|
|
@@ -75,14 +77,7 @@ def get_oauth_session() -> requests.Session:
|
|
|
75
77
|
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/Manage-Tweets/create_tweet.py
|
|
76
78
|
https://github.com/twitterdev/Twitter-API-v2-sample-code/blob/main/User-Lookup/get_users_me_user_context.py
|
|
77
79
|
'''
|
|
78
|
-
|
|
79
|
-
from requests_oauthlib import OAuth1Session
|
|
80
|
-
except ImportError:
|
|
81
|
-
raise ImportError(
|
|
82
|
-
"Please install `requests_oauthlib` first. You can "
|
|
83
|
-
"install it by running `pip install "
|
|
84
|
-
"requests_oauthlib`."
|
|
85
|
-
)
|
|
80
|
+
from requests_oauthlib import OAuth1Session
|
|
86
81
|
|
|
87
82
|
consumer_key, consumer_secret = get_twitter_api_key()
|
|
88
83
|
|