agentica 0.2.5__tar.gz → 1.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {agentica-0.2.5/agentica.egg-info → agentica-1.0.1}/PKG-INFO +14 -27
- {agentica-0.2.5 → agentica-1.0.1}/README.md +9 -24
- {agentica-0.2.5 → agentica-1.0.1}/agentica/agent.py +2 -5
- {agentica-0.2.5 → agentica-1.0.1}/agentica/config.py +1 -0
- agentica-1.0.1/agentica/mcp/__init__.py +20 -0
- agentica-1.0.1/agentica/mcp/client.py +160 -0
- agentica-1.0.1/agentica/mcp/server.py +293 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/media.py +3 -1
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/base.py +9 -4
- agentica-1.0.1/agentica/tools/mcp_tool.py +375 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/weather_tool.py +14 -13
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/json_util.py +3 -3
- agentica-1.0.1/agentica/version.py +1 -0
- {agentica-0.2.5 → agentica-1.0.1/agentica.egg-info}/PKG-INFO +14 -27
- {agentica-0.2.5 → agentica-1.0.1}/agentica.egg-info/SOURCES.txt +4 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica.egg-info/requires.txt +1 -0
- {agentica-0.2.5 → agentica-1.0.1}/setup.py +2 -1
- agentica-0.2.5/agentica/version.py +0 -1
- {agentica-0.2.5 → agentica-1.0.1}/LICENSE +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/MANIFEST.in +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/agent_session.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/cli.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/document.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/azure_openai_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/fireworks_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/genimi_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/hash_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/huggingface_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/ollama_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/openai_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/sentence_transformer_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/text2vec_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/together_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/word2vec_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/emb/zhipuai_emb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/file/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/file/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/file/csv.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/file/txt.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/knowledge/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/knowledge/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/knowledge/langchain_knowledge.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/knowledge/llamaindex_knowledge.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/memory.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/memorydb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/anthropic/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/anthropic/claude.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/aws/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/aws/api_client.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/aws/bedrock.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/aws/claude.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/azure/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/azure/openai_chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/cohere/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/cohere/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/content.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/deepseek/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/deepseek/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/doubao/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/doubao/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/fireworks/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/fireworks/fireworks.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/google/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/google/gemini.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/google/gemini_openai.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/groq/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/groq/groq.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/huggingface/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/huggingface/hf.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/internlm/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/internlm/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/message.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/mistral/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/mistral/mistral.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/moonshot/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/moonshot/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/nvidia/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/nvidia/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/ollama/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/ollama/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/ollama/hermes.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/ollama/tools.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/openai/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/openai/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/openai/like.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/openrouter/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/openrouter/openrouter.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/qwen/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/qwen/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/response.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/sambanova/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/sambanova/sambanova.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/together/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/together/together.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/vertexai/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/vertexai/gemini.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/xai/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/xai/grok.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/yi/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/yi/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/zhipuai/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/model/zhipuai/chat.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/python_agent.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/react_agent.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/reasoning.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/reranker/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/reranker/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/reranker/bge.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/reranker/cohere.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/run_response.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/agent/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/agent/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/agent/json_file.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/agent/postgres.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/agent/sqlite.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/agent/yaml_file.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/workflow/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/workflow/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/workflow/postgres.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/storage/workflow/sqlite.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/template.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/airflow_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/analyze_image_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/apify_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/arxiv_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/baidusearch_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/calculator_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/cogvideo_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/cogview_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/dalle_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/dblp_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/duckduckgo_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/file_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/hackernews_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/jina_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/newspaper_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/ocr_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/resend_tools.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/run_nb_code_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/run_python_code_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/search_exa_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/search_serper_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/shell_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/sql_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/string_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/text_analysis_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/url_crawler_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/web_search_pro_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/wikipedia_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/tools/yfinance_tool.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/console.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/file_parser.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/io.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/log.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/message.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/misc.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/shell.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/string.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/utils/timer.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/__init__.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/base.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/chromadb_vectordb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/lancedb_vectordb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/memory_vectordb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/pgvectordb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/pineconedb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/vectordb/qdrantdb.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/workflow.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica/workflow_session.py +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica.egg-info/dependency_links.txt +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica.egg-info/entry_points.txt +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica.egg-info/not-zip-safe +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/agentica.egg-info/top_level.txt +0 -0
- {agentica-0.2.5 → agentica-1.0.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: agentica
|
3
|
-
Version: 0.
|
3
|
+
Version: 1.0.1
|
4
4
|
Summary: LLM agents
|
5
5
|
Home-page: https://github.com/shibing624/agentica
|
6
6
|
Author: XuMing
|
@@ -15,7 +15,7 @@ Classifier: License :: OSI Approved :: Apache Software License
|
|
15
15
|
Classifier: Operating System :: OS Independent
|
16
16
|
Classifier: Programming Language :: Python :: 3
|
17
17
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
18
|
-
Requires-Python: >=3.
|
18
|
+
Requires-Python: >=3.10.0
|
19
19
|
Description-Content-Type: text/markdown
|
20
20
|
License-File: LICENSE
|
21
21
|
Requires-Dist: httpx
|
@@ -31,6 +31,7 @@ Requires-Dist: markdownify
|
|
31
31
|
Requires-Dist: tqdm
|
32
32
|
Requires-Dist: rich
|
33
33
|
Requires-Dist: pyyaml
|
34
|
+
Requires-Dist: mcp
|
34
35
|
Dynamic: author
|
35
36
|
Dynamic: author-email
|
36
37
|
Dynamic: classifier
|
@@ -39,6 +40,7 @@ Dynamic: description-content-type
|
|
39
40
|
Dynamic: home-page
|
40
41
|
Dynamic: keywords
|
41
42
|
Dynamic: license
|
43
|
+
Dynamic: license-file
|
42
44
|
Dynamic: requires-dist
|
43
45
|
Dynamic: requires-python
|
44
46
|
Dynamic: summary
|
@@ -70,26 +72,9 @@ Dynamic: summary
|
|
70
72
|
|
71
73
|
**Agentica** 可以构建AI Agent,包括规划、记忆和工具使用、执行等组件。
|
72
74
|
|
73
|
-
#### Agent Components
|
74
|
-
<img src="https://github.com/shibing624/agentica/blob/main/docs/llm_agentv2.png" width="800" />
|
75
|
-
|
76
|
-
- **规划(Planning)**:任务拆解、生成计划、反思
|
77
|
-
- **记忆(Memory)**:短期记忆(prompt实现)、长期记忆(RAG实现)
|
78
|
-
- **工具使用(Tool use)**:function call能力,调用外部API,以获取外部信息,包括当前日期、日历、代码执行能力、对专用信息源的访问等
|
79
|
-
|
80
|
-
#### Agentica Workflow
|
81
|
-
|
82
|
-
**Agentica** can also build multi-agent systems and workflows.
|
83
|
-
|
84
|
-
**Agentica** 还可以构建多Agent系统和工作流。
|
85
|
-
|
86
|
-
<img src="https://github.com/shibing624/agentica/blob/main/docs/agent_arch.png" width="800" />
|
87
|
-
|
88
|
-
- **Planner**:负责让LLM生成一个多步计划来完成复杂任务,生成相互依赖的“链式计划”,定义每一步所依赖的上一步的输出
|
89
|
-
- **Worker**:接受“链式计划”,循环遍历计划中的每个子任务,并调用工具完成任务,可以自动反思纠错以完成任务
|
90
|
-
- **Solver**:求解器将所有这些输出整合为最终答案
|
91
|
-
|
92
75
|
## 🔥 News
|
76
|
+
[2025/04/21] v1.0.0版本:支持了`MCP`的工具调用,兼容SSE和Stdio两种MCP Server,详见[Release-v1.0.0](https://github.com/shibing624/agentica/releases/tag/1.0.0)
|
77
|
+
|
93
78
|
[2024/12/29] v0.2.3版本: 支持了`ZhipuAI`的api调用,包括免费模型和工具使用,详见[Release-v0.2.3](https://github.com/shibing624/agentica/releases/tag/0.2.3)
|
94
79
|
|
95
80
|
[2024/12/25] v0.2.0版本: 支持了多模态模型,输入可以是文本、图片、音频、视频,升级Assistant为Agent,Workflow支持拆解并实现复杂任务,详见[Release-v0.2.0](https://github.com/shibing624/agentica/releases/tag/0.2.0)
|
@@ -101,10 +86,10 @@ Dynamic: summary
|
|
101
86
|
`Agentica`是一个用于构建Agent的工具,具有以下功能:
|
102
87
|
|
103
88
|
- **Agent编排**:通过简单代码快速编排Agent,支持 Reflection(反思)、Plan and Solve(计划并执行)、RAG、Agent、Multi-Agent、Team、Workflow等功能
|
104
|
-
-
|
89
|
+
- **工具调用**:支持自定义工具OpenAI的function call,支持MCP Server的工具调用
|
105
90
|
- **LLM集成**:支持OpenAI、Azure、Deepseek、Moonshot、Anthropic、ZhipuAI、Ollama、Together等多方大模型厂商的API
|
106
|
-
-
|
107
|
-
- **Multi-Agent协作**:支持多Agent和任务委托(Team
|
91
|
+
- **记忆功能**:支持短期记忆和长期记忆功能
|
92
|
+
- **Multi-Agent协作**:支持多Agent和任务委托(Team)的团队协作
|
108
93
|
- **Workflow工作流**:拆解复杂任务为多个Agent,基于工作流自动化串行逐步完成任务,如投资研究、新闻文章撰写和技术教程创建
|
109
94
|
- **自我进化Agent**:具有反思和增强记忆能力的自我进化Agent
|
110
95
|
- **Web UI**:兼容ChatPilot,可以基于Web页面交互,支持主流的open-webui、streamlit、gradio等前端交互框架
|
@@ -134,10 +119,10 @@ cd examples
|
|
134
119
|
python 12_web_search_moonshot_demo.py
|
135
120
|
```
|
136
121
|
|
137
|
-
1. 复制[.env.example](https://github.com/shibing624/agentica/blob/main/.env.example)文件为`~/.agentica/.env`,并填写LLM api key(选填
|
122
|
+
1. 复制[.env.example](https://github.com/shibing624/agentica/blob/main/.env.example)文件为`~/.agentica/.env`,并填写LLM api key(选填OPENAI_API_KEY、ZHIPUAI_API_KEY 等任一个)。或者使用`export`命令设置环境变量:
|
138
123
|
|
139
124
|
```shell
|
140
|
-
export
|
125
|
+
export ZHIPUAI_API_KEY=your_api_key
|
141
126
|
export SERPER_API_KEY=your_serper_api_key
|
142
127
|
```
|
143
128
|
|
@@ -228,6 +213,8 @@ bash start.sh
|
|
228
213
|
| [examples/38_workflow_write_tutorial_demo.py](https://github.com/shibing624/agentica/blob/main/examples/38_workflow_write_tutorial_demo.py) | 实现了写技术教程的工作流:定教程目录 - 反思目录内容 - 撰写教程内容 - 保存为md文件 |
|
229
214
|
| [examples/39_audio_multi_turn_demo.py](https://github.com/shibing624/agentica/blob/main/examples/39_audio_multi_turn_demo.py) | 基于openai的语音api做多轮音频对话的Demo |
|
230
215
|
| [examples/40_weather_zhipuai_demo.py](https://github.com/shibing624/agentica/blob/main/examples/40_web_search_zhipuai_demo.py) | 基于智谱AI的api做天气查询的Demo |
|
216
|
+
| [examples/41_mcp_stdio_demo.py](https://github.com/shibing624/agentica/blob/main/examples/41_mcp_stdio_demo.py) | Stdio的MCP Server调用的Demo |
|
217
|
+
| [examples/42_mcp_sse_client.py](https://github.com/shibing624/agentica/blob/main/examples/42_mcp_sse_client.py) | SSE的MCP Server调用的Demo |
|
231
218
|
|
232
219
|
|
233
220
|
### Self-evolving Agent
|
@@ -25,26 +25,9 @@
|
|
25
25
|
|
26
26
|
**Agentica** 可以构建AI Agent,包括规划、记忆和工具使用、执行等组件。
|
27
27
|
|
28
|
-
#### Agent Components
|
29
|
-
<img src="https://github.com/shibing624/agentica/blob/main/docs/llm_agentv2.png" width="800" />
|
30
|
-
|
31
|
-
- **规划(Planning)**:任务拆解、生成计划、反思
|
32
|
-
- **记忆(Memory)**:短期记忆(prompt实现)、长期记忆(RAG实现)
|
33
|
-
- **工具使用(Tool use)**:function call能力,调用外部API,以获取外部信息,包括当前日期、日历、代码执行能力、对专用信息源的访问等
|
34
|
-
|
35
|
-
#### Agentica Workflow
|
36
|
-
|
37
|
-
**Agentica** can also build multi-agent systems and workflows.
|
38
|
-
|
39
|
-
**Agentica** 还可以构建多Agent系统和工作流。
|
40
|
-
|
41
|
-
<img src="https://github.com/shibing624/agentica/blob/main/docs/agent_arch.png" width="800" />
|
42
|
-
|
43
|
-
- **Planner**:负责让LLM生成一个多步计划来完成复杂任务,生成相互依赖的“链式计划”,定义每一步所依赖的上一步的输出
|
44
|
-
- **Worker**:接受“链式计划”,循环遍历计划中的每个子任务,并调用工具完成任务,可以自动反思纠错以完成任务
|
45
|
-
- **Solver**:求解器将所有这些输出整合为最终答案
|
46
|
-
|
47
28
|
## 🔥 News
|
29
|
+
[2025/04/21] v1.0.0版本:支持了`MCP`的工具调用,兼容SSE和Stdio两种MCP Server,详见[Release-v1.0.0](https://github.com/shibing624/agentica/releases/tag/1.0.0)
|
30
|
+
|
48
31
|
[2024/12/29] v0.2.3版本: 支持了`ZhipuAI`的api调用,包括免费模型和工具使用,详见[Release-v0.2.3](https://github.com/shibing624/agentica/releases/tag/0.2.3)
|
49
32
|
|
50
33
|
[2024/12/25] v0.2.0版本: 支持了多模态模型,输入可以是文本、图片、音频、视频,升级Assistant为Agent,Workflow支持拆解并实现复杂任务,详见[Release-v0.2.0](https://github.com/shibing624/agentica/releases/tag/0.2.0)
|
@@ -56,10 +39,10 @@
|
|
56
39
|
`Agentica`是一个用于构建Agent的工具,具有以下功能:
|
57
40
|
|
58
41
|
- **Agent编排**:通过简单代码快速编排Agent,支持 Reflection(反思)、Plan and Solve(计划并执行)、RAG、Agent、Multi-Agent、Team、Workflow等功能
|
59
|
-
-
|
42
|
+
- **工具调用**:支持自定义工具OpenAI的function call,支持MCP Server的工具调用
|
60
43
|
- **LLM集成**:支持OpenAI、Azure、Deepseek、Moonshot、Anthropic、ZhipuAI、Ollama、Together等多方大模型厂商的API
|
61
|
-
-
|
62
|
-
- **Multi-Agent协作**:支持多Agent和任务委托(Team
|
44
|
+
- **记忆功能**:支持短期记忆和长期记忆功能
|
45
|
+
- **Multi-Agent协作**:支持多Agent和任务委托(Team)的团队协作
|
63
46
|
- **Workflow工作流**:拆解复杂任务为多个Agent,基于工作流自动化串行逐步完成任务,如投资研究、新闻文章撰写和技术教程创建
|
64
47
|
- **自我进化Agent**:具有反思和增强记忆能力的自我进化Agent
|
65
48
|
- **Web UI**:兼容ChatPilot,可以基于Web页面交互,支持主流的open-webui、streamlit、gradio等前端交互框架
|
@@ -89,10 +72,10 @@ cd examples
|
|
89
72
|
python 12_web_search_moonshot_demo.py
|
90
73
|
```
|
91
74
|
|
92
|
-
1. 复制[.env.example](https://github.com/shibing624/agentica/blob/main/.env.example)文件为`~/.agentica/.env`,并填写LLM api key(选填
|
75
|
+
1. 复制[.env.example](https://github.com/shibing624/agentica/blob/main/.env.example)文件为`~/.agentica/.env`,并填写LLM api key(选填OPENAI_API_KEY、ZHIPUAI_API_KEY 等任一个)。或者使用`export`命令设置环境变量:
|
93
76
|
|
94
77
|
```shell
|
95
|
-
export
|
78
|
+
export ZHIPUAI_API_KEY=your_api_key
|
96
79
|
export SERPER_API_KEY=your_serper_api_key
|
97
80
|
```
|
98
81
|
|
@@ -183,6 +166,8 @@ bash start.sh
|
|
183
166
|
| [examples/38_workflow_write_tutorial_demo.py](https://github.com/shibing624/agentica/blob/main/examples/38_workflow_write_tutorial_demo.py) | 实现了写技术教程的工作流:定教程目录 - 反思目录内容 - 撰写教程内容 - 保存为md文件 |
|
184
167
|
| [examples/39_audio_multi_turn_demo.py](https://github.com/shibing624/agentica/blob/main/examples/39_audio_multi_turn_demo.py) | 基于openai的语音api做多轮音频对话的Demo |
|
185
168
|
| [examples/40_weather_zhipuai_demo.py](https://github.com/shibing624/agentica/blob/main/examples/40_web_search_zhipuai_demo.py) | 基于智谱AI的api做天气查询的Demo |
|
169
|
+
| [examples/41_mcp_stdio_demo.py](https://github.com/shibing624/agentica/blob/main/examples/41_mcp_stdio_demo.py) | Stdio的MCP Server调用的Demo |
|
170
|
+
| [examples/42_mcp_sse_client.py](https://github.com/shibing624/agentica/blob/main/examples/42_mcp_sse_client.py) | SSE的MCP Server调用的Demo |
|
186
171
|
|
187
172
|
|
188
173
|
### Self-evolving Agent
|
@@ -2055,7 +2055,7 @@ class Agent(BaseModel):
|
|
2055
2055
|
self.run_id = str(uuid4())
|
2056
2056
|
self.run_response = RunResponse(run_id=self.run_id, session_id=self.session_id, agent_id=self.agent_id)
|
2057
2057
|
|
2058
|
-
logger.debug(f"***********
|
2058
|
+
logger.debug(f"*********** Agent Run Start: {self.run_response.run_id} ***********")
|
2059
2059
|
|
2060
2060
|
# 1. Update the Model (set defaults, add tools, etc.)
|
2061
2061
|
self.update_model()
|
@@ -2227,10 +2227,7 @@ class Agent(BaseModel):
|
|
2227
2227
|
elif messages is not None:
|
2228
2228
|
self.run_input = [m.to_dict() if isinstance(m, Message) else m for m in messages]
|
2229
2229
|
|
2230
|
-
|
2231
|
-
await self.alog_agent_run()
|
2232
|
-
|
2233
|
-
logger.debug(f"*********** Async Agent Run End: {self.run_response.run_id} ***********")
|
2230
|
+
logger.debug(f"*********** Agent Run End: {self.run_response.run_id} ***********")
|
2234
2231
|
if self.stream_intermediate_steps:
|
2235
2232
|
yield self.generic_run_response(
|
2236
2233
|
content=self.run_response.content,
|
@@ -9,6 +9,7 @@ from dotenv import load_dotenv # noqa
|
|
9
9
|
from loguru import logger # noqa, need to import logger here to avoid circular import
|
10
10
|
|
11
11
|
AGENTICA_HOME = os.getenv("AGENTICA_HOME", os.path.expanduser("~/.agentica"))
|
12
|
+
os.makedirs(AGENTICA_HOME, exist_ok=True)
|
12
13
|
|
13
14
|
# Load environment variables from .env file
|
14
15
|
AGENTICA_DOTENV_PATH = os.getenv("AGENTICA_DOTENV_PATH", f"{AGENTICA_HOME}/.env")
|
@@ -0,0 +1,20 @@
|
|
1
|
+
try:
|
2
|
+
from .server import (
|
3
|
+
MCPServer,
|
4
|
+
MCPServerSse,
|
5
|
+
MCPServerSseParams,
|
6
|
+
MCPServerStdio,
|
7
|
+
MCPServerStdioParams,
|
8
|
+
)
|
9
|
+
from .client import MCPClient
|
10
|
+
except ImportError:
|
11
|
+
pass
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"MCPServer",
|
15
|
+
"MCPServerSse",
|
16
|
+
"MCPServerSseParams",
|
17
|
+
"MCPServerStdio",
|
18
|
+
"MCPServerStdioParams",
|
19
|
+
"MCPClient",
|
20
|
+
]
|
@@ -0,0 +1,160 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
@author:XuMing(xuming624@qq.com)
|
4
|
+
@description:
|
5
|
+
"""
|
6
|
+
|
7
|
+
from typing import Any, Dict, List, Optional
|
8
|
+
from mcp import Tool as MCPTool
|
9
|
+
from mcp.types import CallToolResult, EmbeddedResource, ImageContent, TextContent
|
10
|
+
from agentica.mcp.server import MCPServer
|
11
|
+
from agentica.utils.log import logger
|
12
|
+
|
13
|
+
__all__ = ["MCPClient"]
|
14
|
+
|
15
|
+
|
16
|
+
class MCPClient:
|
17
|
+
"""A client for interacting with MCP servers.
|
18
|
+
|
19
|
+
This client can be used as an async context manager to automatically manage connections
|
20
|
+
and resource cleanup.
|
21
|
+
|
22
|
+
Example:
|
23
|
+
```python
|
24
|
+
# For stdio-based server
|
25
|
+
params = MCPServerStdioParams(command="python", args=["server.py"])
|
26
|
+
async with MCPClient(server=MCPServerStdio(params)) as client:
|
27
|
+
result = await client.call_tool("add", {"a": 1, "b": 2})
|
28
|
+
print(result)
|
29
|
+
|
30
|
+
# For SSE-based server
|
31
|
+
params = MCPServerSseParams(url="http://localhost:8000/sse")
|
32
|
+
async with MCPClient(server=MCPServerSse(params)) as client:
|
33
|
+
result = await client.call_tool("get_weather", {"city": "Tokyo"})
|
34
|
+
print(result)
|
35
|
+
```
|
36
|
+
"""
|
37
|
+
|
38
|
+
def __init__(
|
39
|
+
self,
|
40
|
+
server: MCPServer,
|
41
|
+
include_tools: Optional[List[str]] = None,
|
42
|
+
exclude_tools: Optional[List[str]] = None
|
43
|
+
):
|
44
|
+
"""Initialize the MCP client.
|
45
|
+
|
46
|
+
Args:
|
47
|
+
server: The MCP server to connect to
|
48
|
+
include_tools: Optional list of tool names to include (if None, includes all)
|
49
|
+
exclude_tools: Optional list of tool names to exclude (if None, excludes none)
|
50
|
+
"""
|
51
|
+
self.server = server
|
52
|
+
self.tools_list: List[MCPTool] = []
|
53
|
+
self.tools_by_name: Dict[str, MCPTool] = {}
|
54
|
+
self.include_tools = include_tools
|
55
|
+
self.exclude_tools = exclude_tools or []
|
56
|
+
|
57
|
+
async def __aenter__(self) -> 'MCPClient':
|
58
|
+
"""Enter the async context manager."""
|
59
|
+
await self.connect()
|
60
|
+
return self
|
61
|
+
|
62
|
+
async def __aexit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
63
|
+
"""Exit the async context manager."""
|
64
|
+
await self.cleanup()
|
65
|
+
|
66
|
+
async def connect(self) -> None:
|
67
|
+
"""Connect to the MCP server and retrieve the list of available tools."""
|
68
|
+
logger.info(f"Connecting to MCP server: {self.server.name}")
|
69
|
+
|
70
|
+
try:
|
71
|
+
await self.server.connect()
|
72
|
+
|
73
|
+
# Get the list of tools from the MCP server
|
74
|
+
all_tools = await self.server.list_tools()
|
75
|
+
|
76
|
+
# Filter tools based on include/exclude lists
|
77
|
+
self.tools_list = []
|
78
|
+
for tool in all_tools:
|
79
|
+
if tool.name in self.exclude_tools:
|
80
|
+
continue
|
81
|
+
if self.include_tools is None or tool.name in self.include_tools:
|
82
|
+
self.tools_list.append(tool)
|
83
|
+
|
84
|
+
# Create a mapping of tool names to tools
|
85
|
+
self.tools_by_name = {tool.name: tool for tool in self.tools_list}
|
86
|
+
|
87
|
+
logger.info(f"Connected to {self.server.name} with {len(self.tools_list)} tools available")
|
88
|
+
for tool in self.tools_list:
|
89
|
+
logger.debug(f" - {tool.name}: {tool.description or 'No description'}")
|
90
|
+
|
91
|
+
except Exception as e:
|
92
|
+
logger.error(f"Error connecting to MCP server: {e}")
|
93
|
+
await self.cleanup()
|
94
|
+
raise
|
95
|
+
|
96
|
+
async def call_tool(self, tool_name: str, arguments: Dict[str, Any] = None) -> Any:
|
97
|
+
"""Call a tool on the MCP server.
|
98
|
+
|
99
|
+
Args:
|
100
|
+
tool_name: The name of the tool to call
|
101
|
+
arguments: The arguments to pass to the tool
|
102
|
+
|
103
|
+
Returns:
|
104
|
+
The result of the tool call
|
105
|
+
|
106
|
+
Raises:
|
107
|
+
ValueError: If the tool is not available
|
108
|
+
"""
|
109
|
+
if tool_name not in self.tools_by_name:
|
110
|
+
available_tools = ", ".join(self.tools_by_name.keys())
|
111
|
+
raise ValueError(f"Tool '{tool_name}' not available. Available tools: {available_tools}")
|
112
|
+
logger.debug(f"Calling tool '{tool_name}' with arguments: {arguments}")
|
113
|
+
try:
|
114
|
+
result = await self.server.call_tool(tool_name, arguments)
|
115
|
+
return result
|
116
|
+
except Exception as e:
|
117
|
+
msg = f"Error calling tool '{tool_name}': {e}"
|
118
|
+
logger.error(msg)
|
119
|
+
return msg
|
120
|
+
|
121
|
+
def extract_result_text(self, result: CallToolResult) -> str:
|
122
|
+
"""Extract text content from a tool call result.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
result: The result from a tool call
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
The extracted text content
|
129
|
+
"""
|
130
|
+
if result.isError:
|
131
|
+
return f"Error: {result.content}"
|
132
|
+
|
133
|
+
text_parts = []
|
134
|
+
for content_item in result.content:
|
135
|
+
if isinstance(content_item, TextContent):
|
136
|
+
text_parts.append(content_item.text)
|
137
|
+
elif isinstance(content_item, ImageContent):
|
138
|
+
text_parts.append(f"[Image content: {content_item.data}]")
|
139
|
+
elif isinstance(content_item, EmbeddedResource):
|
140
|
+
text_parts.append(f"[Embedded resource: {content_item.resource.model_dump_json()}]")
|
141
|
+
else:
|
142
|
+
text_parts.append(f"[Unsupported content type: {content_item.type}]")
|
143
|
+
|
144
|
+
return "\n".join(text_parts)
|
145
|
+
|
146
|
+
async def list_tools(self) -> List[MCPTool]:
|
147
|
+
"""List the available tools.
|
148
|
+
|
149
|
+
Returns:
|
150
|
+
List of available tools
|
151
|
+
"""
|
152
|
+
return self.tools_list
|
153
|
+
|
154
|
+
async def cleanup(self) -> None:
|
155
|
+
"""Clean up resources and close the connection."""
|
156
|
+
try:
|
157
|
+
await self.server.cleanup()
|
158
|
+
logger.debug(f"Disconnected from MCP server: {self.server.name}")
|
159
|
+
except Exception as e:
|
160
|
+
logger.error(f"Error cleaning up MCP client: {e}")
|
@@ -0,0 +1,293 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
"""
|
3
|
+
@author:XuMing(xuming624@qq.com)
|
4
|
+
@description: Model Context Protocol (MCP) client implementations supporting both stdio and SSE transports
|
5
|
+
"""
|
6
|
+
from __future__ import annotations
|
7
|
+
|
8
|
+
import abc
|
9
|
+
import asyncio
|
10
|
+
from contextlib import AbstractAsyncContextManager, AsyncExitStack
|
11
|
+
from pathlib import Path
|
12
|
+
from typing import Any, Dict, List, Literal, Optional, Tuple, TypedDict, Union
|
13
|
+
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
|
14
|
+
from mcp import ClientSession, StdioServerParameters, Tool as MCPTool, stdio_client
|
15
|
+
from mcp.client.sse import sse_client
|
16
|
+
from mcp.types import CallToolResult, JSONRPCMessage
|
17
|
+
|
18
|
+
from agentica.utils.log import logger
|
19
|
+
|
20
|
+
|
21
|
+
class MCPServer(abc.ABC):
|
22
|
+
"""Base class for Model Context Protocol servers."""
|
23
|
+
|
24
|
+
@abc.abstractmethod
|
25
|
+
async def connect(self):
|
26
|
+
"""Connect to the server. For example, this might mean spawning a subprocess or
|
27
|
+
opening a network connection. The server is expected to remain connected until
|
28
|
+
`cleanup()` is called.
|
29
|
+
"""
|
30
|
+
pass
|
31
|
+
|
32
|
+
@property
|
33
|
+
@abc.abstractmethod
|
34
|
+
def name(self) -> str:
|
35
|
+
"""A readable name for the server."""
|
36
|
+
pass
|
37
|
+
|
38
|
+
@abc.abstractmethod
|
39
|
+
async def cleanup(self):
|
40
|
+
"""Cleanup the server. For example, this might mean closing a subprocess or
|
41
|
+
closing a network connection.
|
42
|
+
"""
|
43
|
+
pass
|
44
|
+
|
45
|
+
@abc.abstractmethod
|
46
|
+
async def list_tools(self) -> List[MCPTool]:
|
47
|
+
"""List the tools available on the server."""
|
48
|
+
pass
|
49
|
+
|
50
|
+
@abc.abstractmethod
|
51
|
+
async def call_tool(self, tool_name: str, arguments: Dict[str, Any] | None) -> CallToolResult:
|
52
|
+
"""Invoke a tool on the server."""
|
53
|
+
pass
|
54
|
+
|
55
|
+
|
56
|
+
class _MCPServerWithClientSession(MCPServer, abc.ABC):
|
57
|
+
"""Base class for MCP servers that use a `ClientSession` to communicate with the server."""
|
58
|
+
|
59
|
+
def __init__(self, cache_tools_list: bool = True):
|
60
|
+
"""
|
61
|
+
Args:
|
62
|
+
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
|
63
|
+
cached and only fetched from the server once. If `False`, the tools list will be
|
64
|
+
fetched from the server on each call to `list_tools()`. The cache can be invalidated
|
65
|
+
by calling `invalidate_tools_cache()`. You should set this to `True` if you know the
|
66
|
+
server will not change its tools list, because it can drastically improve latency
|
67
|
+
(by avoiding a round-trip to the server every time).
|
68
|
+
"""
|
69
|
+
self.session: Optional[ClientSession] = None
|
70
|
+
self.exit_stack: AsyncExitStack = AsyncExitStack()
|
71
|
+
self._cleanup_lock: asyncio.Lock = asyncio.Lock()
|
72
|
+
self.cache_tools_list = cache_tools_list
|
73
|
+
|
74
|
+
# The cache is always dirty at startup, so that we fetch tools at least once
|
75
|
+
self._cache_dirty = True
|
76
|
+
self._tools_list: Optional[List[MCPTool]] = None
|
77
|
+
|
78
|
+
@abc.abstractmethod
|
79
|
+
def create_streams(
|
80
|
+
self,
|
81
|
+
) -> AbstractAsyncContextManager[
|
82
|
+
Tuple[
|
83
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
84
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
85
|
+
]
|
86
|
+
]:
|
87
|
+
"""Create the streams for the server."""
|
88
|
+
pass
|
89
|
+
|
90
|
+
async def __aenter__(self):
|
91
|
+
await self.connect()
|
92
|
+
return self
|
93
|
+
|
94
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
95
|
+
await self.cleanup()
|
96
|
+
|
97
|
+
def invalidate_tools_cache(self):
|
98
|
+
"""Invalidate the tools cache."""
|
99
|
+
self._cache_dirty = True
|
100
|
+
|
101
|
+
async def connect(self):
|
102
|
+
"""Connect to the server."""
|
103
|
+
try:
|
104
|
+
transport = await self.exit_stack.enter_async_context(self.create_streams())
|
105
|
+
read, write = transport
|
106
|
+
session = await self.exit_stack.enter_async_context(ClientSession(read, write))
|
107
|
+
await session.initialize()
|
108
|
+
self.session = session
|
109
|
+
except Exception as e:
|
110
|
+
logger.error(f"Error initializing MCP server: {e}")
|
111
|
+
await self.cleanup()
|
112
|
+
raise
|
113
|
+
|
114
|
+
async def list_tools(self) -> List[MCPTool]:
|
115
|
+
"""List the tools available on the server."""
|
116
|
+
if not self.session:
|
117
|
+
raise ValueError("Server not initialized. Make sure you call `connect()` first.")
|
118
|
+
|
119
|
+
# Return from cache if caching is enabled, we have tools, and the cache is not dirty
|
120
|
+
if self.cache_tools_list and not self._cache_dirty and self._tools_list:
|
121
|
+
return self._tools_list
|
122
|
+
|
123
|
+
# Reset the cache dirty to False
|
124
|
+
self._cache_dirty = False
|
125
|
+
|
126
|
+
# Fetch the tools from the server
|
127
|
+
self._tools_list = (await self.session.list_tools()).tools
|
128
|
+
return self._tools_list
|
129
|
+
|
130
|
+
async def call_tool(self, tool_name: str, arguments: Dict[str, Any] | None) -> CallToolResult:
|
131
|
+
"""Invoke a tool on the server."""
|
132
|
+
if not self.session:
|
133
|
+
raise ValueError("Server not initialized. Make sure you call `connect()` first.")
|
134
|
+
|
135
|
+
return await self.session.call_tool(tool_name, arguments)
|
136
|
+
|
137
|
+
async def cleanup(self):
|
138
|
+
"""Cleanup the server."""
|
139
|
+
async with self._cleanup_lock:
|
140
|
+
try:
|
141
|
+
await self.exit_stack.aclose()
|
142
|
+
self.session = None
|
143
|
+
except Exception as e:
|
144
|
+
logger.error(f"Error cleaning up server: {e}")
|
145
|
+
|
146
|
+
|
147
|
+
class MCPServerStdioParams(TypedDict):
|
148
|
+
"""Parameters for the MCPServerStdio class."""
|
149
|
+
command: str
|
150
|
+
"""The executable to run to start the server. For example, `python` or `node`."""
|
151
|
+
|
152
|
+
args: Optional[List[str]]
|
153
|
+
"""Command line args to pass to the `command` executable. For example, `['foo.py']` or
|
154
|
+
`['server.js', '--port', '8080']`."""
|
155
|
+
|
156
|
+
env: Optional[Dict[str, str]]
|
157
|
+
"""The environment variables to set for the server."""
|
158
|
+
|
159
|
+
cwd: Optional[Union[str, Path]]
|
160
|
+
"""The working directory to use when spawning the process."""
|
161
|
+
|
162
|
+
encoding: Optional[str]
|
163
|
+
"""The text encoding used when sending/receiving messages to the server. Defaults to `utf-8`."""
|
164
|
+
|
165
|
+
encoding_error_handler: Optional[Literal["strict", "ignore", "replace"]]
|
166
|
+
"""The text encoding error handler. Defaults to `strict`.
|
167
|
+
See https://docs.python.org/3/library/codecs.html#codec-base-classes for
|
168
|
+
explanations of possible values.
|
169
|
+
"""
|
170
|
+
|
171
|
+
|
172
|
+
class MCPServerStdio(_MCPServerWithClientSession):
|
173
|
+
"""MCP server implementation that uses the stdio transport.
|
174
|
+
|
175
|
+
See the specification for details: https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#stdio
|
176
|
+
"""
|
177
|
+
|
178
|
+
def __init__(
|
179
|
+
self,
|
180
|
+
params: MCPServerStdioParams,
|
181
|
+
cache_tools_list: bool = True,
|
182
|
+
name: Optional[str] = None,
|
183
|
+
):
|
184
|
+
"""Create a new MCP server based on the stdio transport.
|
185
|
+
|
186
|
+
Args:
|
187
|
+
params: The params that configure the server. This includes the command to run to
|
188
|
+
start the server, the args to pass to the command, the environment variables to
|
189
|
+
set for the server, the working directory to use when spawning the process, and
|
190
|
+
the text encoding used when sending/receiving messages to the server.
|
191
|
+
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
|
192
|
+
cached and only fetched from the server once. If `False`, the tools list will be
|
193
|
+
fetched from the server on each call to `list_tools()`. The cache can be
|
194
|
+
invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
|
195
|
+
if you know the server will not change its tools list, because it can drastically
|
196
|
+
improve latency (by avoiding a round-trip to the server every time).
|
197
|
+
name: A readable name for the server. If not provided, we'll create one from the
|
198
|
+
command.
|
199
|
+
"""
|
200
|
+
super().__init__(cache_tools_list)
|
201
|
+
self.params = StdioServerParameters(
|
202
|
+
command=params["command"],
|
203
|
+
args=params.get("args", []),
|
204
|
+
env=params.get("env"),
|
205
|
+
cwd=params.get("cwd"),
|
206
|
+
encoding=params.get("encoding", "utf-8"),
|
207
|
+
encoding_error_handler=params.get("encoding_error_handler", "strict"),
|
208
|
+
)
|
209
|
+
self._name = name or f"stdio: {self.params.command}"
|
210
|
+
|
211
|
+
def create_streams(
|
212
|
+
self,
|
213
|
+
) -> AbstractAsyncContextManager[
|
214
|
+
Tuple[
|
215
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
216
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
217
|
+
]
|
218
|
+
]:
|
219
|
+
"""Create the streams for the server."""
|
220
|
+
return stdio_client(self.params)
|
221
|
+
|
222
|
+
@property
|
223
|
+
def name(self) -> str:
|
224
|
+
"""A readable name for the server."""
|
225
|
+
return self._name
|
226
|
+
|
227
|
+
|
228
|
+
class MCPServerSseParams(TypedDict):
|
229
|
+
"""Parameters for the MCPServerSse class."""
|
230
|
+
url: str
|
231
|
+
"""The URL of the server."""
|
232
|
+
|
233
|
+
headers: Optional[Dict[str, str]]
|
234
|
+
"""The headers to send to the server."""
|
235
|
+
|
236
|
+
timeout: Optional[float]
|
237
|
+
"""The timeout for the HTTP request. Defaults to 5 seconds."""
|
238
|
+
|
239
|
+
sse_read_timeout: Optional[float]
|
240
|
+
"""The timeout for the SSE connection, in seconds. Defaults to 5 minutes."""
|
241
|
+
|
242
|
+
|
243
|
+
class MCPServerSse(_MCPServerWithClientSession):
|
244
|
+
"""MCP server implementation that uses the HTTP with SSE transport.
|
245
|
+
|
246
|
+
See the specification for details: https://spec.modelcontextprotocol.io/specification/2024-11-05/basic/transports/#http-with-sse
|
247
|
+
"""
|
248
|
+
|
249
|
+
def __init__(
|
250
|
+
self,
|
251
|
+
params: MCPServerSseParams,
|
252
|
+
cache_tools_list: bool = True,
|
253
|
+
name: Optional[str] = None,
|
254
|
+
):
|
255
|
+
"""Create a new MCP server based on the HTTP with SSE transport.
|
256
|
+
|
257
|
+
Args:
|
258
|
+
params: The params that configure the server. This includes the URL of the server,
|
259
|
+
the headers to send to the server, the timeout for the HTTP request, and the
|
260
|
+
timeout for the SSE connection.
|
261
|
+
cache_tools_list: Whether to cache the tools list. If `True`, the tools list will be
|
262
|
+
cached and only fetched from the server once. If `False`, the tools list will be
|
263
|
+
fetched from the server on each call to `list_tools()`. The cache can be
|
264
|
+
invalidated by calling `invalidate_tools_cache()`. You should set this to `True`
|
265
|
+
if you know the server will not change its tools list, because it can drastically
|
266
|
+
improve latency (by avoiding a round-trip to the server every time).
|
267
|
+
name: A readable name for the server. If not provided, we'll create one from the
|
268
|
+
URL.
|
269
|
+
"""
|
270
|
+
super().__init__(cache_tools_list)
|
271
|
+
self.params = params
|
272
|
+
self._name = name or f"sse: {self.params['url']}"
|
273
|
+
|
274
|
+
def create_streams(
|
275
|
+
self,
|
276
|
+
) -> AbstractAsyncContextManager[
|
277
|
+
Tuple[
|
278
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
279
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
280
|
+
]
|
281
|
+
]:
|
282
|
+
"""Create the streams for the server."""
|
283
|
+
return sse_client(
|
284
|
+
url=self.params["url"],
|
285
|
+
headers=self.params.get("headers"),
|
286
|
+
timeout=self.params.get("timeout", 5),
|
287
|
+
sse_read_timeout=self.params.get("sse_read_timeout", 60 * 5),
|
288
|
+
)
|
289
|
+
|
290
|
+
@property
|
291
|
+
def name(self) -> str:
|
292
|
+
"""A readable name for the server."""
|
293
|
+
return self._name
|